summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavan Deolasee2017-06-15 07:41:07 +0000
committerPavan Deolasee2017-06-15 07:41:07 +0000
commit0ffa504a17f58f2bc045b0039f40e4917ee50d20 (patch)
treec629c449bcfcc45de1d03b2586e89932d546e8ba
parent36ccc8d64e61fe9d77bb7ac62267945f7c146baa (diff)
parente800656d9a9b40b2f55afabe76354ab6d93353b3 (diff)
Merge 'remotes/PGSQL/master' into xl10devel
Merge upstream master branch upto e800656d9a9b40b2f55afabe76354ab6d93353b3. Code compiles and regression works ok (with lots and lots of failures though).
-rw-r--r--contrib/hstore_plperl/expected/create_transform.out6
-rw-r--r--contrib/isn/expected/isn.out32
-rw-r--r--contrib/postgres_fdw/connection.c361
-rw-r--r--contrib/postgres_fdw/expected/postgres_fdw.out84
-rw-r--r--contrib/sepgsql/database.c2
-rw-r--r--contrib/sepgsql/expected/misc.out2
-rw-r--r--contrib/sepgsql/proc.c4
-rw-r--r--contrib/sepgsql/relation.c8
-rw-r--r--contrib/sepgsql/schema.c2
-rw-r--r--doc/src/sgml/brin.sgml2
-rw-r--r--doc/src/sgml/catalogs.sgml8
-rw-r--r--doc/src/sgml/config.sgml2
-rwxr-xr-xdoc/src/sgml/ddl.sgml2
-rw-r--r--doc/src/sgml/dfunc.sgml26
-rw-r--r--doc/src/sgml/ecpg.sgml5
-rw-r--r--doc/src/sgml/external-projects.sgml48
-rw-r--r--doc/src/sgml/func.sgml2
-rw-r--r--doc/src/sgml/high-availability.sgml4
-rw-r--r--doc/src/sgml/information_schema.sgml26
-rw-r--r--doc/src/sgml/keywords.sgml44
-rw-r--r--doc/src/sgml/libpq.sgml8
-rw-r--r--doc/src/sgml/monitoring.sgml4
-rw-r--r--doc/src/sgml/parallel.sgml2
-rw-r--r--doc/src/sgml/pgstattuple.sgml14
-rw-r--r--doc/src/sgml/postgres-fdw.sgml2
-rw-r--r--doc/src/sgml/protocol.sgml4
-rw-r--r--doc/src/sgml/ref/alter_aggregate.sgml3
-rw-r--r--doc/src/sgml/ref/alter_publication.sgml37
-rw-r--r--doc/src/sgml/ref/alter_sequence.sgml15
-rw-r--r--doc/src/sgml/ref/alter_subscription.sgml41
-rw-r--r--doc/src/sgml/ref/alter_system.sgml3
-rwxr-xr-xdoc/src/sgml/ref/alter_table.sgml6
-rw-r--r--doc/src/sgml/ref/alter_type.sgml3
-rw-r--r--doc/src/sgml/ref/copy.sgml2
-rw-r--r--doc/src/sgml/ref/create_access_method.sgml3
-rw-r--r--doc/src/sgml/ref/create_publication.sgml3
-rw-r--r--doc/src/sgml/ref/create_subscription.sgml16
-rwxr-xr-xdoc/src/sgml/ref/create_table.sgml72
-rw-r--r--doc/src/sgml/ref/drop_aggregate.sgml3
-rw-r--r--doc/src/sgml/ref/drop_policy.sgml3
-rw-r--r--doc/src/sgml/ref/drop_publication.sgml4
-rw-r--r--doc/src/sgml/ref/drop_statistics.sgml4
-rw-r--r--doc/src/sgml/ref/drop_subscription.sgml26
-rw-r--r--doc/src/sgml/ref/import_foreign_schema.sgml4
-rw-r--r--doc/src/sgml/ref/initdb.sgml4
-rw-r--r--doc/src/sgml/ref/insert.sgml3
-rw-r--r--doc/src/sgml/ref/pg_dump.sgml28
-rw-r--r--doc/src/sgml/ref/pg_dumpall.sgml16
-rw-r--r--doc/src/sgml/ref/pg_recvlogical.sgml52
-rw-r--r--doc/src/sgml/ref/pgbench.sgml20
-rw-r--r--doc/src/sgml/ref/pgupgrade.sgml26
-rw-r--r--doc/src/sgml/ref/psql-ref.sgml89
-rwxr-xr-xdoc/src/sgml/ref/set_transaction.sgml6
-rw-r--r--doc/src/sgml/release-10.sgml57
-rw-r--r--doc/src/sgml/xfunc.sgml89
-rw-r--r--src/Makefile.global.in4
-rw-r--r--src/backend/access/brin/brin.c3
-rw-r--r--src/backend/access/brin/brin_pageops.c20
-rw-r--r--src/backend/access/brin/brin_revmap.c13
-rw-r--r--src/backend/access/brin/brin_validate.c36
-rw-r--r--src/backend/access/gin/ginvalidate.c32
-rw-r--r--src/backend/access/gist/gistvalidate.c32
-rw-r--r--src/backend/access/hash/hashvalidate.c40
-rw-r--r--src/backend/access/nbtree/nbtvalidate.c36
-rw-r--r--src/backend/access/spgist/spgvalidate.c36
-rw-r--r--src/backend/access/transam/parallel.c35
-rw-r--r--src/backend/access/transam/recovery.conf.sample10
-rw-r--r--src/backend/access/transam/subtrans.c1
-rw-r--r--src/backend/access/transam/twophase.c87
-rw-r--r--src/backend/access/transam/xact.c68
-rw-r--r--src/backend/access/transam/xlog.c5
-rw-r--r--src/backend/access/transam/xlogfuncs.c4
-rw-r--r--src/backend/bootstrap/bootparse.y2
-rw-r--r--src/backend/bootstrap/bootstrap.c5
-rw-r--r--src/backend/catalog/aclchk.c4
-rw-r--r--src/backend/catalog/catalog.c16
-rw-r--r--src/backend/catalog/heap.c11
-rw-r--r--src/backend/catalog/index.c4
-rw-r--r--src/backend/catalog/information_schema.sql8
-rw-r--r--src/backend/catalog/namespace.c2
-rw-r--r--src/backend/catalog/objectaddress.c11
-rw-r--r--src/backend/catalog/partition.c6
-rw-r--r--src/backend/catalog/pg_subscription.c15
-rw-r--r--src/backend/commands/collationcmds.c20
-rw-r--r--src/backend/commands/copy.c21
-rw-r--r--src/backend/commands/extension.c9
-rw-r--r--src/backend/commands/indexcmds.c13
-rw-r--r--src/backend/commands/policy.c3
-rw-r--r--src/backend/commands/sequence.c168
-rw-r--r--src/backend/commands/statscmds.c5
-rw-r--r--src/backend/commands/subscriptioncmds.c43
-rw-r--r--src/backend/commands/tablecmds.c96
-rw-r--r--src/backend/commands/vacuumlazy.c12
-rw-r--r--src/backend/commands/variable.c2
-rw-r--r--src/backend/common.mk2
-rw-r--r--src/backend/executor/execMain.c138
-rw-r--r--src/backend/executor/execParallel.c16
-rw-r--r--src/backend/executor/functions.c1
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c2
-rw-r--r--src/backend/executor/nodeCustom.c2
-rw-r--r--src/backend/executor/nodeForeignscan.c2
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c2
-rw-r--r--src/backend/executor/nodeIndexscan.c2
-rw-r--r--src/backend/executor/nodeModifyTable.c42
-rw-r--r--src/backend/executor/nodeSeqscan.c2
-rw-r--r--src/backend/libpq/auth-scram.c67
-rw-r--r--src/backend/libpq/auth.c15
-rw-r--r--src/backend/libpq/pqmq.c4
-rw-r--r--src/backend/nodes/copyfuncs.c3
-rw-r--r--src/backend/nodes/equalfuncs.c2
-rw-r--r--src/backend/nodes/nodeFuncs.c30
-rw-r--r--src/backend/nodes/outfuncs.c11
-rw-r--r--src/backend/nodes/readfuncs.c11
-rw-r--r--src/backend/optimizer/geqo/geqo_cx.c3
-rw-r--r--src/backend/optimizer/geqo/geqo_erx.c3
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c4
-rw-r--r--src/backend/optimizer/geqo/geqo_mutation.c4
-rw-r--r--src/backend/optimizer/geqo/geqo_ox1.c3
-rw-r--r--src/backend/optimizer/geqo/geqo_ox2.c3
-rw-r--r--src/backend/optimizer/geqo/geqo_pmx.c3
-rw-r--r--src/backend/optimizer/geqo/geqo_px.c3
-rw-r--r--src/backend/optimizer/geqo/geqo_recombination.c5
-rw-r--r--src/backend/optimizer/path/costsize.c30
-rw-r--r--src/backend/optimizer/path/indxpath.c11
-rw-r--r--src/backend/optimizer/plan/createplan.c7
-rw-r--r--src/backend/optimizer/util/plancat.c6
-rw-r--r--src/backend/optimizer/util/predtest.c152
-rw-r--r--src/backend/parser/gram.y14
-rw-r--r--src/backend/parser/parse_agg.c8
-rw-r--r--src/backend/parser/parse_clause.c38
-rw-r--r--src/backend/parser/parse_expr.c79
-rw-r--r--src/backend/parser/parse_func.c41
-rw-r--r--src/backend/parser/parse_oper.c14
-rw-r--r--src/backend/parser/parse_utilcmd.c24
-rw-r--r--src/backend/postmaster/bgworker.c4
-rw-r--r--src/backend/postmaster/postmaster.c7
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c31
-rw-r--r--src/backend/replication/logical/launcher.c92
-rw-r--r--src/backend/replication/logical/relation.c2
-rw-r--r--src/backend/replication/logical/snapbuild.c14
-rw-r--r--src/backend/replication/logical/tablesync.c345
-rw-r--r--src/backend/replication/logical/worker.c127
-rw-r--r--src/backend/replication/slot.c2
-rw-r--r--src/backend/replication/walsender.c199
-rw-r--r--src/backend/rewrite/rewriteHandler.c9
-rw-r--r--src/backend/snowball/Makefile3
-rw-r--r--src/backend/storage/ipc/latch.c2
-rw-r--r--src/backend/storage/ipc/procarray.c11
-rw-r--r--src/backend/storage/ipc/procsignal.c4
-rw-r--r--src/backend/storage/ipc/shm_mq.c4
-rw-r--r--src/backend/storage/ipc/shm_toc.c50
-rw-r--r--src/backend/storage/lmgr/condition_variable.c6
-rw-r--r--src/backend/storage/lmgr/predicate.c28
-rw-r--r--src/backend/storage/page/bufpage.c4
-rw-r--r--src/backend/tcop/postgres.c44
-rw-r--r--src/backend/tcop/utility.c1
-rw-r--r--src/backend/utils/adt/json.c4
-rw-r--r--src/backend/utils/adt/jsonb.c6
-rw-r--r--src/backend/utils/adt/ruleutils.c2
-rw-r--r--src/backend/utils/adt/selfuncs.c173
-rw-r--r--src/backend/utils/cache/attoptcache.c2
-rw-r--r--src/backend/utils/cache/evtcache.c2
-rw-r--r--src/backend/utils/cache/syscache.c106
-rw-r--r--src/backend/utils/init/globals.c1
-rw-r--r--src/backend/utils/misc/pg_rusage.c2
-rw-r--r--src/backend/utils/misc/postgresql.conf.sample6
-rw-r--r--src/backend/utils/time/snapmgr.c119
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c4
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c4
-rw-r--r--src/bin/pg_basebackup/receivelog.c2
-rw-r--r--src/bin/pg_dump/pg_dump.c5
-rw-r--r--src/bin/pg_dump/pg_dumpall.c4
-rw-r--r--src/bin/pg_upgrade/.gitignore2
-rw-r--r--src/bin/pg_upgrade/Makefile6
-rw-r--r--src/bin/pg_upgrade/TESTING48
-rw-r--r--src/bin/pg_upgrade/test.sh26
-rw-r--r--src/bin/pg_waldump/pg_waldump.c49
-rw-r--r--src/bin/pgbench/pgbench.c4
-rw-r--r--src/bin/psql/describe.c27
-rw-r--r--src/bin/psql/help.c14
-rw-r--r--src/bin/psql/tab-complete.c42
-rw-r--r--src/include/Makefile2
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/catalog/pg_collation.h6
-rw-r--r--src/include/catalog/pg_proc.h4
-rw-r--r--src/include/catalog/pg_subscription_rel.h2
-rw-r--r--src/include/commands/defrem.h1
-rw-r--r--src/include/lib/simplehash.h10
-rw-r--r--src/include/miscadmin.h5
-rw-r--r--src/include/nodes/parsenodes.h15
-rw-r--r--src/include/optimizer/geqo.h2
-rw-r--r--src/include/optimizer/predtest.h8
-rw-r--r--src/include/parser/parse_func.h5
-rw-r--r--src/include/parser/parse_node.h5
-rw-r--r--src/include/parser/parse_oper.h2
-rw-r--r--src/include/postgres.h8
-rw-r--r--src/include/replication/logicallauncher.h2
-rw-r--r--src/include/replication/logicalworker.h2
-rw-r--r--src/include/replication/walsender.h2
-rw-r--r--src/include/replication/worker_internal.h4
-rw-r--r--src/include/storage/predicate.h4
-rw-r--r--src/include/storage/procarray.h2
-rw-r--r--src/include/storage/procsignal.h2
-rw-r--r--src/include/storage/shm_toc.h17
-rw-r--r--src/include/utils/syscache.h10
-rw-r--r--src/interfaces/ecpg/ecpglib/pg_type.h28
-rw-r--r--src/interfaces/libpq/fe-auth.c19
-rw-r--r--src/interfaces/libpq/fe-connect.c137
-rw-r--r--src/interfaces/libpq/test/README2
-rw-r--r--src/makefiles/Makefile.linux7
-rw-r--r--src/makefiles/Makefile.netbsd4
-rw-r--r--src/makefiles/Makefile.openbsd4
-rw-r--r--src/pl/plpgsql/src/pl_comp.c6
-rw-r--r--src/test/isolation/expected/sequence-ddl.out30
-rw-r--r--src/test/isolation/specs/sequence-ddl.spec19
-rw-r--r--src/test/modules/test_extensions/expected/test_extensions.out14
-rw-r--r--src/test/modules/test_extensions/sql/test_extensions.sql4
-rw-r--r--src/test/modules/test_shm_mq/worker.c6
-rw-r--r--src/test/modules/worker_spi/worker_spi.c2
-rw-r--r--src/test/regress/expected/alter_table.out19
-rw-r--r--src/test/regress/expected/create_table.out2
-rw-r--r--src/test/regress/expected/foreign_data.out43
-rw-r--r--src/test/regress/expected/insert.out15
-rw-r--r--src/test/regress/expected/join.out50
-rw-r--r--src/test/regress/expected/plpgsql.out45
-rw-r--r--src/test/regress/expected/rangefuncs.out12
-rw-r--r--src/test/regress/expected/rowsecurity.out440
-rw-r--r--src/test/regress/expected/subscription.out2
-rw-r--r--src/test/regress/expected/triggers.out29
-rw-r--r--src/test/regress/expected/tsrf.out32
-rw-r--r--src/test/regress/expected/updatable_views.out36
-rw-r--r--src/test/regress/sql/alter_table.sql17
-rw-r--r--src/test/regress/sql/insert.sql10
-rw-r--r--src/test/regress/sql/plpgsql.sql39
-rw-r--r--src/test/regress/sql/rangefuncs.sql9
-rw-r--r--src/test/regress/sql/rowsecurity.sql156
-rw-r--r--src/test/regress/sql/subscription.sql2
-rw-r--r--src/test/regress/sql/triggers.sql31
-rw-r--r--src/test/regress/sql/tsrf.sql8
-rw-r--r--src/test/regress/sql/updatable_views.sql10
-rw-r--r--src/test/subscription/t/001_rep_changes.pl2
-rw-r--r--src/tools/msvc/Solution.pm39
-rw-r--r--src/tools/msvc/config_default.pl1
-rw-r--r--src/tools/msvc/vcregress.pl12
244 files changed, 4039 insertions, 2164 deletions
diff --git a/contrib/hstore_plperl/expected/create_transform.out b/contrib/hstore_plperl/expected/create_transform.out
index 02dc62af0d..dc72395376 100644
--- a/contrib/hstore_plperl/expected/create_transform.out
+++ b/contrib/hstore_plperl/expected/create_transform.out
@@ -44,7 +44,7 @@ DROP FUNCTION plperl_to_hstore(val internal);
CREATE EXTENSION hstore_plperl;
\dx+ hstore_plperl
Objects in extension "hstore_plperl"
- Object Description
+ Object description
--------------------------------------
function hstore_to_plperl(internal)
function plperl_to_hstore(internal)
@@ -54,7 +54,7 @@ CREATE EXTENSION hstore_plperl;
ALTER EXTENSION hstore_plperl DROP TRANSFORM FOR hstore LANGUAGE plperl;
\dx+ hstore_plperl
Objects in extension "hstore_plperl"
- Object Description
+ Object description
-------------------------------------
function hstore_to_plperl(internal)
function plperl_to_hstore(internal)
@@ -63,7 +63,7 @@ Objects in extension "hstore_plperl"
ALTER EXTENSION hstore_plperl ADD TRANSFORM FOR hstore LANGUAGE plperl;
\dx+ hstore_plperl
Objects in extension "hstore_plperl"
- Object Description
+ Object description
--------------------------------------
function hstore_to_plperl(internal)
function plperl_to_hstore(internal)
diff --git a/contrib/isn/expected/isn.out b/contrib/isn/expected/isn.out
index ef9d3a61e7..18fe37a82c 100644
--- a/contrib/isn/expected/isn.out
+++ b/contrib/isn/expected/isn.out
@@ -10,22 +10,22 @@ FROM (SELECT amname, opcname, opc.oid
WHERE opc.oid >= 16384
ORDER BY 1, 2 OFFSET 0) ss
WHERE NOT amvalidate(oid);
-INFO: btree operator family "isn_ops" is missing cross-type operator(s)
-INFO: btree operator family "isn_ops" is missing cross-type operator(s)
-INFO: btree operator family "isn_ops" is missing cross-type operator(s)
-INFO: btree operator family "isn_ops" is missing cross-type operator(s)
-INFO: btree operator family "isn_ops" is missing cross-type operator(s)
-INFO: btree operator family "isn_ops" is missing cross-type operator(s)
-INFO: btree operator family "isn_ops" is missing cross-type operator(s)
-INFO: btree operator family "isn_ops" is missing cross-type operator(s)
-INFO: hash operator family "isn_ops" is missing cross-type operator(s)
-INFO: hash operator family "isn_ops" is missing cross-type operator(s)
-INFO: hash operator family "isn_ops" is missing cross-type operator(s)
-INFO: hash operator family "isn_ops" is missing cross-type operator(s)
-INFO: hash operator family "isn_ops" is missing cross-type operator(s)
-INFO: hash operator family "isn_ops" is missing cross-type operator(s)
-INFO: hash operator family "isn_ops" is missing cross-type operator(s)
-INFO: hash operator family "isn_ops" is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method btree is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method btree is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method btree is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method btree is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method btree is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method btree is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method btree is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method btree is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method hash is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method hash is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method hash is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method hash is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method hash is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method hash is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method hash is missing cross-type operator(s)
+INFO: operator family "isn_ops" of access method hash is missing cross-type operator(s)
amname | opcname
--------+------------
btree | ean13_ops
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index c6e3d44515..1b691fb05e 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -14,6 +14,8 @@
#include "postgres_fdw.h"
+#include "access/htup_details.h"
+#include "catalog/pg_user_mapping.h"
#include "access/xact.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
@@ -21,6 +23,7 @@
#include "storage/latch.h"
#include "utils/hsearch.h"
#include "utils/memutils.h"
+#include "utils/syscache.h"
/*
@@ -49,6 +52,7 @@ typedef struct ConnCacheEntry
* one level of subxact open, etc */
bool have_prep_stmt; /* have we prepared any stmts in this xact? */
bool have_error; /* have any subxacts aborted in this xact? */
+ bool changing_xact_state; /* xact state change in process */
} ConnCacheEntry;
/*
@@ -74,6 +78,12 @@ static void pgfdw_subxact_callback(SubXactEvent event,
SubTransactionId mySubid,
SubTransactionId parentSubid,
void *arg);
+static void pgfdw_reject_incomplete_xact_state_change(ConnCacheEntry *entry);
+static bool pgfdw_cancel_query(PGconn *conn);
+static bool pgfdw_exec_cleanup_query(PGconn *conn, const char *query,
+ bool ignore_errors);
+static bool pgfdw_get_cleanup_result(PGconn *conn, TimestampTz endtime,
+ PGresult **result);
/*
@@ -139,8 +149,12 @@ GetConnection(UserMapping *user, bool will_prep_stmt)
entry->xact_depth = 0;
entry->have_prep_stmt = false;
entry->have_error = false;
+ entry->changing_xact_state = false;
}
+ /* Reject further use of connections which failed abort cleanup. */
+ pgfdw_reject_incomplete_xact_state_change(entry);
+
/*
* We don't check the health of cached connection here, because it would
* require some overhead. Broken connection will be detected when the
@@ -343,7 +357,9 @@ do_sql_command(PGconn *conn, const char *sql)
{
PGresult *res;
- res = PQexec(conn, sql);
+ if (!PQsendQuery(conn, sql))
+ pgfdw_report_error(ERROR, NULL, conn, false, sql);
+ res = pgfdw_get_result(conn, sql);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
pgfdw_report_error(ERROR, res, conn, true, sql);
PQclear(res);
@@ -376,8 +392,10 @@ begin_remote_xact(ConnCacheEntry *entry)
sql = "START TRANSACTION ISOLATION LEVEL SERIALIZABLE";
else
sql = "START TRANSACTION ISOLATION LEVEL REPEATABLE READ";
+ entry->changing_xact_state = true;
do_sql_command(entry->conn, sql);
entry->xact_depth = 1;
+ entry->changing_xact_state = false;
}
/*
@@ -390,8 +408,10 @@ begin_remote_xact(ConnCacheEntry *entry)
char sql[64];
snprintf(sql, sizeof(sql), "SAVEPOINT s%d", entry->xact_depth + 1);
+ entry->changing_xact_state = true;
do_sql_command(entry->conn, sql);
entry->xact_depth++;
+ entry->changing_xact_state = false;
}
}
@@ -604,6 +624,8 @@ pgfdw_xact_callback(XactEvent event, void *arg)
/* If it has an open remote transaction, try to close it */
if (entry->xact_depth > 0)
{
+ bool abort_cleanup_failure = false;
+
elog(DEBUG3, "closing remote transaction on connection %p",
entry->conn);
@@ -611,8 +633,17 @@ pgfdw_xact_callback(XactEvent event, void *arg)
{
case XACT_EVENT_PARALLEL_PRE_COMMIT:
case XACT_EVENT_PRE_COMMIT:
+
+ /*
+ * If abort cleanup previously failed for this connection,
+ * we can't issue any more commands against it.
+ */
+ pgfdw_reject_incomplete_xact_state_change(entry);
+
/* Commit all remote transactions during pre-commit */
+ entry->changing_xact_state = true;
do_sql_command(entry->conn, "COMMIT TRANSACTION");
+ entry->changing_xact_state = false;
/*
* If there were any errors in subtransactions, and we
@@ -660,6 +691,27 @@ pgfdw_xact_callback(XactEvent event, void *arg)
break;
case XACT_EVENT_PARALLEL_ABORT:
case XACT_EVENT_ABORT:
+
+ /*
+ * Don't try to clean up the connection if we're already
+ * in error recursion trouble.
+ */
+ if (in_error_recursion_trouble())
+ entry->changing_xact_state = true;
+
+ /*
+ * If connection is already unsalvageable, don't touch it
+ * further.
+ */
+ if (entry->changing_xact_state)
+ break;
+
+ /*
+ * Mark this connection as in the process of changing
+ * transaction state.
+ */
+ entry->changing_xact_state = true;
+
/* Assume we might have lost track of prepared statements */
entry->have_error = true;
@@ -670,40 +722,35 @@ pgfdw_xact_callback(XactEvent event, void *arg)
* command is still being processed by the remote server,
* and if so, request cancellation of the command.
*/
- if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
+ if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE &&
+ !pgfdw_cancel_query(entry->conn))
{
- PGcancel *cancel;
- char errbuf[256];
-
- if ((cancel = PQgetCancel(entry->conn)))
- {
- if (!PQcancel(cancel, errbuf, sizeof(errbuf)))
- ereport(WARNING,
- (errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("could not send cancel request: %s",
- errbuf)));
- PQfreeCancel(cancel);
- }
+ /* Unable to cancel running query. */
+ abort_cleanup_failure = true;
+ }
+ else if (!pgfdw_exec_cleanup_query(entry->conn,
+ "ABORT TRANSACTION",
+ false))
+ {
+ /* Unable to abort remote transaction. */
+ abort_cleanup_failure = true;
+ }
+ else if (entry->have_prep_stmt && entry->have_error &&
+ !pgfdw_exec_cleanup_query(entry->conn,
+ "DEALLOCATE ALL",
+ true))
+ {
+ /* Trouble clearing prepared statements. */
+ abort_cleanup_failure = true;
}
-
- /* If we're aborting, abort all remote transactions too */
- res = PQexec(entry->conn, "ABORT TRANSACTION");
- /* Note: can't throw ERROR, it would be infinite loop */
- if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(WARNING, res, entry->conn, true,
- "ABORT TRANSACTION");
else
{
- PQclear(res);
- /* As above, make sure to clear any prepared stmts */
- if (entry->have_prep_stmt && entry->have_error)
- {
- res = PQexec(entry->conn, "DEALLOCATE ALL");
- PQclear(res);
- }
entry->have_prep_stmt = false;
entry->have_error = false;
}
+
+ /* Disarm changing_xact_state if it all worked. */
+ entry->changing_xact_state = abort_cleanup_failure;
break;
}
}
@@ -716,11 +763,13 @@ pgfdw_xact_callback(XactEvent event, void *arg)
* recover. Next GetConnection will open a new connection.
*/
if (PQstatus(entry->conn) != CONNECTION_OK ||
- PQtransactionStatus(entry->conn) != PQTRANS_IDLE)
+ PQtransactionStatus(entry->conn) != PQTRANS_IDLE ||
+ entry->changing_xact_state)
{
elog(DEBUG3, "discarding connection %p", entry->conn);
PQfinish(entry->conn);
entry->conn = NULL;
+ entry->changing_xact_state = false;
}
}
@@ -763,7 +812,6 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
hash_seq_init(&scan, ConnectionHash);
while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
{
- PGresult *res;
char sql[100];
/*
@@ -779,12 +827,33 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
if (event == SUBXACT_EVENT_PRE_COMMIT_SUB)
{
+ /*
+ * If abort cleanup previously failed for this connection, we
+ * can't issue any more commands against it.
+ */
+ pgfdw_reject_incomplete_xact_state_change(entry);
+
/* Commit all remote subtransactions during pre-commit */
snprintf(sql, sizeof(sql), "RELEASE SAVEPOINT s%d", curlevel);
+ entry->changing_xact_state = true;
do_sql_command(entry->conn, sql);
+ entry->changing_xact_state = false;
}
- else
+ else if (in_error_recursion_trouble())
+ {
+ /*
+ * Don't try to clean up the connection if we're already in error
+ * recursion trouble.
+ */
+ entry->changing_xact_state = true;
+ }
+ else if (!entry->changing_xact_state)
{
+ bool abort_cleanup_failure = false;
+
+ /* Remember that abort cleanup is in progress. */
+ entry->changing_xact_state = true;
+
/* Assume we might have lost track of prepared statements */
entry->have_error = true;
@@ -795,34 +864,220 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
* processed by the remote server, and if so, request cancellation
* of the command.
*/
- if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
+ if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE &&
+ !pgfdw_cancel_query(entry->conn))
+ abort_cleanup_failure = true;
+ else
{
- PGcancel *cancel;
- char errbuf[256];
-
- if ((cancel = PQgetCancel(entry->conn)))
- {
- if (!PQcancel(cancel, errbuf, sizeof(errbuf)))
- ereport(WARNING,
- (errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("could not send cancel request: %s",
- errbuf)));
- PQfreeCancel(cancel);
- }
+ /* Rollback all remote subtransactions during abort */
+ snprintf(sql, sizeof(sql),
+ "ROLLBACK TO SAVEPOINT s%d; RELEASE SAVEPOINT s%d",
+ curlevel, curlevel);
+ if (!pgfdw_exec_cleanup_query(entry->conn, sql, false))
+ abort_cleanup_failure = true;
}
- /* Rollback all remote subtransactions during abort */
- snprintf(sql, sizeof(sql),
- "ROLLBACK TO SAVEPOINT s%d; RELEASE SAVEPOINT s%d",
- curlevel, curlevel);
- res = PQexec(entry->conn, sql);
- if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(WARNING, res, entry->conn, true, sql);
- else
- PQclear(res);
+ /* Disarm changing_xact_state if it all worked. */
+ entry->changing_xact_state = abort_cleanup_failure;
}
/* OK, we're outta that level of subtransaction */
entry->xact_depth--;
}
}
+
+/*
+ * Raise an error if the given connection cache entry is marked as being
+ * in the middle of an xact state change. This should be called at which no
+ * such change is expected to be in progress; if one is found to be in
+ * progress, it means that we aborted in the middle of a previous state change
+ * and now don't know what the remote transaction state actually is.
+ * Such connections can't safely be further used. Re-establishing the
+ * connection would change the snapshot and roll back any writes already
+ * performed, so that's not an option, either. Thus, we must abort.
+ */
+static void
+pgfdw_reject_incomplete_xact_state_change(ConnCacheEntry *entry)
+{
+ HeapTuple tup;
+ Form_pg_user_mapping umform;
+ ForeignServer *server;
+
+ if (!entry->changing_xact_state)
+ return;
+
+ tup = SearchSysCache1(USERMAPPINGOID,
+ ObjectIdGetDatum(entry->key));
+ if (!HeapTupleIsValid(tup))
+ elog(ERROR, "cache lookup failed for user mapping %u", entry->key);
+ umform = (Form_pg_user_mapping) GETSTRUCT(tup);
+ server = GetForeignServer(umform->umserver);
+ ReleaseSysCache(tup);
+
+ ereport(ERROR,
+ (errcode(ERRCODE_CONNECTION_EXCEPTION),
+ errmsg("connection to server \"%s\" was lost",
+ server->servername)));
+}
+
+/*
+ * Cancel the currently-in-progress query (whose query text we do not have)
+ * and ignore the result. Returns true if we successfully cancel the query
+ * and discard any pending result, and false if not.
+ */
+static bool
+pgfdw_cancel_query(PGconn *conn)
+{
+ PGcancel *cancel;
+ char errbuf[256];
+ PGresult *result = NULL;
+ TimestampTz endtime;
+
+ /*
+ * If it takes too long to cancel the query and discard the result, assume
+ * the connection is dead.
+ */
+ endtime = TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 30000);
+
+ /*
+ * Issue cancel request. Unfortunately, there's no good way to limit the
+ * amount of time that we might block inside PQgetCancel().
+ */
+ if ((cancel = PQgetCancel(conn)))
+ {
+ if (!PQcancel(cancel, errbuf, sizeof(errbuf)))
+ {
+ ereport(WARNING,
+ (errcode(ERRCODE_CONNECTION_FAILURE),
+ errmsg("could not send cancel request: %s",
+ errbuf)));
+ PQfreeCancel(cancel);
+ return false;
+ }
+ PQfreeCancel(cancel);
+ }
+
+ /* Get and discard the result of the query. */
+ if (pgfdw_get_cleanup_result(conn, endtime, &result))
+ return false;
+ PQclear(result);
+
+ return true;
+}
+
+/*
+ * Submit a query during (sub)abort cleanup and wait up to 30 seconds for the
+ * result. If the query is executed without error, the return value is true.
+ * If the query is executed successfully but returns an error, the return
+ * value is true if and only if ignore_errors is set. If the query can't be
+ * sent or times out, the return value is false.
+ */
+static bool
+pgfdw_exec_cleanup_query(PGconn *conn, const char *query, bool ignore_errors)
+{
+ PGresult *result = NULL;
+ TimestampTz endtime;
+
+ /*
+ * If it takes too long to execute a cleanup query, assume the connection
+ * is dead. It's fairly likely that this is why we aborted in the first
+ * place (e.g. statement timeout, user cancel), so the timeout shouldn't
+ * be too long.
+ */
+ endtime = TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 30000);
+
+ /*
+ * Submit a query. Since we don't use non-blocking mode, this also can
+ * block. But its risk is relatively small, so we ignore that for now.
+ */
+ if (!PQsendQuery(conn, query))
+ {
+ pgfdw_report_error(WARNING, NULL, conn, false, query);
+ return false;
+ }
+
+ /* Get the result of the query. */
+ if (pgfdw_get_cleanup_result(conn, endtime, &result))
+ return false;
+
+ /* Issue a warning if not successful. */
+ if (PQresultStatus(result) != PGRES_COMMAND_OK)
+ {
+ pgfdw_report_error(WARNING, result, conn, true, query);
+ return ignore_errors;
+ }
+
+ return true;
+}
+
+/*
+ * Get, during abort cleanup, the result of a query that is in progress. This
+ * might be a query that is being interrupted by transaction abort, or it might
+ * be a query that was initiated as part of transaction abort to get the remote
+ * side back to the appropriate state.
+ *
+ * It's not a huge problem if we throw an ERROR here, but if we get into error
+ * recursion trouble, we'll end up slamming the connection shut, which will
+ * necessitate failing the entire toplevel transaction even if subtransactions
+ * were used. Try to use WARNING where we can.
+ *
+ * endtime is the time at which we should give up and assume the remote
+ * side is dead. Returns true if the timeout expired, otherwise false.
+ * Sets *result except in case of a timeout.
+ */
+static bool
+pgfdw_get_cleanup_result(PGconn *conn, TimestampTz endtime, PGresult **result)
+{
+ PGresult *last_res = NULL;
+
+ for (;;)
+ {
+ PGresult *res;
+
+ while (PQisBusy(conn))
+ {
+ int wc;
+ TimestampTz now = GetCurrentTimestamp();
+ long secs;
+ int microsecs;
+ long cur_timeout;
+
+ /* If timeout has expired, give up, else get sleep time. */
+ if (now >= endtime)
+ return true;
+ TimestampDifference(now, endtime, &secs, &microsecs);
+
+ /* To protect against clock skew, limit sleep to one minute. */
+ cur_timeout = Min(60000, secs * USECS_PER_SEC + microsecs);
+
+ /* Sleep until there's something to do */
+ wc = WaitLatchOrSocket(MyLatch,
+ WL_LATCH_SET | WL_SOCKET_READABLE | WL_TIMEOUT,
+ PQsocket(conn),
+ cur_timeout, PG_WAIT_EXTENSION);
+ ResetLatch(MyLatch);
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Data available in socket */
+ if (wc & WL_SOCKET_READABLE)
+ {
+ if (!PQconsumeInput(conn))
+ {
+ *result = NULL;
+ return false;
+ }
+ }
+ }
+
+ res = PQgetResult(conn);
+ if (res == NULL)
+ break; /* query is complete */
+
+ PQclear(last_res);
+ last_res = res;
+ }
+
+ *result = last_res;
+ return false;
+}
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 4d86ab54dd..b112c197ab 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -181,7 +181,7 @@ ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
\det+
List of foreign tables
- Schema | Table | Server | FDW Options | Description
+ Schema | Table | Server | FDW options | Description
--------+------------+-----------+--------------------------------------------------+-------------
public | ft1 | loopback | (schema_name 'S 1', table_name 'T 1') |
public | ft2 | loopback | (schema_name 'S 1', table_name 'T 1') |
@@ -6948,7 +6948,7 @@ CREATE SCHEMA import_dest1;
IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest1;
\det+ import_dest1.*
List of foreign tables
- Schema | Table | Server | FDW Options | Description
+ Schema | Table | Server | FDW options | Description
--------------+-------+----------+-------------------------------------------------+-------------
import_dest1 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
import_dest1 | t2 | loopback | (schema_name 'import_source', table_name 't2') |
@@ -6960,51 +6960,51 @@ IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest1;
\d import_dest1.*
Foreign table "import_dest1.t1"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-------------------+-----------+----------+---------+--------------------
c1 | integer | | | | (column_name 'c1')
c2 | character varying | | not null | | (column_name 'c2')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't1')
+FDW options: (schema_name 'import_source', table_name 't1')
Foreign table "import_dest1.t2"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-------------------+-----------+----------+---------+--------------------
c1 | integer | | | | (column_name 'c1')
c2 | character varying | | | | (column_name 'c2')
c3 | text | POSIX | | | (column_name 'c3')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't2')
+FDW options: (schema_name 'import_source', table_name 't2')
Foreign table "import_dest1.t3"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+--------------------------+-----------+----------+---------+--------------------
c1 | timestamp with time zone | | | | (column_name 'c1')
c2 | typ1 | | | | (column_name 'c2')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't3')
+FDW options: (schema_name 'import_source', table_name 't3')
Foreign table "import_dest1.t4"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+---------+-----------+----------+---------+--------------------
c1 | integer | | | | (column_name 'c1')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't4')
+FDW options: (schema_name 'import_source', table_name 't4')
Foreign table "import_dest1.x 4"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-----------------------+-----------+----------+---------+---------------------
c1 | double precision | | | | (column_name 'c1')
C 2 | text | | | | (column_name 'C 2')
c3 | character varying(42) | | | | (column_name 'c3')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 'x 4')
+FDW options: (schema_name 'import_source', table_name 'x 4')
Foreign table "import_dest1.x 5"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+------+-----------+----------+---------+-------------
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 'x 5')
+FDW options: (schema_name 'import_source', table_name 'x 5')
-- Options
CREATE SCHEMA import_dest2;
@@ -7012,7 +7012,7 @@ IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest2
OPTIONS (import_default 'true');
\det+ import_dest2.*
List of foreign tables
- Schema | Table | Server | FDW Options | Description
+ Schema | Table | Server | FDW options | Description
--------------+-------+----------+-------------------------------------------------+-------------
import_dest2 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
import_dest2 | t2 | loopback | (schema_name 'import_source', table_name 't2') |
@@ -7024,58 +7024,58 @@ IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest2
\d import_dest2.*
Foreign table "import_dest2.t1"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-------------------+-----------+----------+---------+--------------------
c1 | integer | | | | (column_name 'c1')
c2 | character varying | | not null | | (column_name 'c2')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't1')
+FDW options: (schema_name 'import_source', table_name 't1')
Foreign table "import_dest2.t2"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-------------------+-----------+----------+---------+--------------------
c1 | integer | | | 42 | (column_name 'c1')
c2 | character varying | | | | (column_name 'c2')
c3 | text | POSIX | | | (column_name 'c3')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't2')
+FDW options: (schema_name 'import_source', table_name 't2')
Foreign table "import_dest2.t3"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+--------------------------+-----------+----------+---------+--------------------
c1 | timestamp with time zone | | | now() | (column_name 'c1')
c2 | typ1 | | | | (column_name 'c2')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't3')
+FDW options: (schema_name 'import_source', table_name 't3')
Foreign table "import_dest2.t4"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+---------+-----------+----------+---------+--------------------
c1 | integer | | | | (column_name 'c1')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't4')
+FDW options: (schema_name 'import_source', table_name 't4')
Foreign table "import_dest2.x 4"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-----------------------+-----------+----------+---------+---------------------
c1 | double precision | | | | (column_name 'c1')
C 2 | text | | | | (column_name 'C 2')
c3 | character varying(42) | | | | (column_name 'c3')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 'x 4')
+FDW options: (schema_name 'import_source', table_name 'x 4')
Foreign table "import_dest2.x 5"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+------+-----------+----------+---------+-------------
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 'x 5')
+FDW options: (schema_name 'import_source', table_name 'x 5')
CREATE SCHEMA import_dest3;
IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3
OPTIONS (import_collate 'false', import_not_null 'false');
\det+ import_dest3.*
List of foreign tables
- Schema | Table | Server | FDW Options | Description
+ Schema | Table | Server | FDW options | Description
--------------+-------+----------+-------------------------------------------------+-------------
import_dest3 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
import_dest3 | t2 | loopback | (schema_name 'import_source', table_name 't2') |
@@ -7087,51 +7087,51 @@ IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3
\d import_dest3.*
Foreign table "import_dest3.t1"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-------------------+-----------+----------+---------+--------------------
c1 | integer | | | | (column_name 'c1')
c2 | character varying | | | | (column_name 'c2')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't1')
+FDW options: (schema_name 'import_source', table_name 't1')
Foreign table "import_dest3.t2"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-------------------+-----------+----------+---------+--------------------
c1 | integer | | | | (column_name 'c1')
c2 | character varying | | | | (column_name 'c2')
c3 | text | | | | (column_name 'c3')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't2')
+FDW options: (schema_name 'import_source', table_name 't2')
Foreign table "import_dest3.t3"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+--------------------------+-----------+----------+---------+--------------------
c1 | timestamp with time zone | | | | (column_name 'c1')
c2 | typ1 | | | | (column_name 'c2')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't3')
+FDW options: (schema_name 'import_source', table_name 't3')
Foreign table "import_dest3.t4"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+---------+-----------+----------+---------+--------------------
c1 | integer | | | | (column_name 'c1')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 't4')
+FDW options: (schema_name 'import_source', table_name 't4')
Foreign table "import_dest3.x 4"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+-----------------------+-----------+----------+---------+---------------------
c1 | double precision | | | | (column_name 'c1')
C 2 | text | | | | (column_name 'C 2')
c3 | character varying(42) | | | | (column_name 'c3')
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 'x 4')
+FDW options: (schema_name 'import_source', table_name 'x 4')
Foreign table "import_dest3.x 5"
- Column | Type | Collation | Nullable | Default | FDW Options
+ Column | Type | Collation | Nullable | Default | FDW options
--------+------+-----------+----------+---------+-------------
Server: loopback
-FDW Options: (schema_name 'import_source', table_name 'x 5')
+FDW options: (schema_name 'import_source', table_name 'x 5')
-- Check LIMIT TO and EXCEPT
CREATE SCHEMA import_dest4;
@@ -7139,7 +7139,7 @@ IMPORT FOREIGN SCHEMA import_source LIMIT TO (t1, nonesuch)
FROM SERVER loopback INTO import_dest4;
\det+ import_dest4.*
List of foreign tables
- Schema | Table | Server | FDW Options | Description
+ Schema | Table | Server | FDW options | Description
--------------+-------+----------+------------------------------------------------+-------------
import_dest4 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
(1 row)
@@ -7148,7 +7148,7 @@ IMPORT FOREIGN SCHEMA import_source EXCEPT (t1, "x 4", nonesuch)
FROM SERVER loopback INTO import_dest4;
\det+ import_dest4.*
List of foreign tables
- Schema | Table | Server | FDW Options | Description
+ Schema | Table | Server | FDW options | Description
--------------+-------+----------+-------------------------------------------------+-------------
import_dest4 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
import_dest4 | t2 | loopback | (schema_name 'import_source', table_name 't2') |
diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c
index 69dd290a77..8fc5a87e00 100644
--- a/contrib/sepgsql/database.c
+++ b/contrib/sepgsql/database.c
@@ -88,7 +88,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
SnapshotSelf, 1, &skey);
tuple = systable_getnext(sscan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "catalog lookup failed for database %u", databaseId);
+ elog(ERROR, "could not find tuple for database %u", databaseId);
datForm = (Form_pg_database) GETSTRUCT(tuple);
diff --git a/contrib/sepgsql/expected/misc.out b/contrib/sepgsql/expected/misc.out
index 7b55142653..98f8005a60 100644
--- a/contrib/sepgsql/expected/misc.out
+++ b/contrib/sepgsql/expected/misc.out
@@ -32,9 +32,7 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
(6 rows)
SELECT * FROM t1p WHERE o > 50 AND p like '%64%';
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4le(integer,integer)"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4le(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4le(integer,integer)"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p"
diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c
index 4ccf4a5e60..73564edaa7 100644
--- a/contrib/sepgsql/proc.c
+++ b/contrib/sepgsql/proc.c
@@ -68,7 +68,7 @@ sepgsql_proc_post_create(Oid functionId)
tuple = systable_getnext(sscan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "catalog lookup failed for proc %u", functionId);
+ elog(ERROR, "could not find tuple for function %u", functionId);
proForm = (Form_pg_proc) GETSTRUCT(tuple);
@@ -261,7 +261,7 @@ sepgsql_proc_setattr(Oid functionId)
SnapshotSelf, 1, &skey);
newtup = systable_getnext(sscan);
if (!HeapTupleIsValid(newtup))
- elog(ERROR, "catalog lookup failed for function %u", functionId);
+ elog(ERROR, "could not find tuple for function %u", functionId);
newform = (Form_pg_proc) GETSTRUCT(newtup);
/*
diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c
index 59a6d9be6e..228869a520 100644
--- a/contrib/sepgsql/relation.c
+++ b/contrib/sepgsql/relation.c
@@ -83,7 +83,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
tuple = systable_getnext(sscan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "catalog lookup failed for column %d of relation %u",
+ elog(ERROR, "could not find tuple for column %d of relation %u",
attnum, relOid);
attForm = (Form_pg_attribute) GETSTRUCT(tuple);
@@ -271,7 +271,7 @@ sepgsql_relation_post_create(Oid relOid)
tuple = systable_getnext(sscan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "catalog lookup failed for relation %u", relOid);
+ elog(ERROR, "could not find tuple for relation %u", relOid);
classForm = (Form_pg_class) GETSTRUCT(tuple);
@@ -623,7 +623,7 @@ sepgsql_relation_setattr(Oid relOid)
newtup = systable_getnext(sscan);
if (!HeapTupleIsValid(newtup))
- elog(ERROR, "catalog lookup failed for relation %u", relOid);
+ elog(ERROR, "could not find tuple for relation %u", relOid);
newform = (Form_pg_class) GETSTRUCT(newtup);
/*
@@ -700,7 +700,7 @@ sepgsql_relation_setattr_extra(Relation catalog,
SnapshotSelf, 1, &skey);
tuple = systable_getnext(sscan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "catalog lookup failed for object %u in catalog \"%s\"",
+ elog(ERROR, "could not find tuple for object %u in catalog \"%s\"",
extra_oid, RelationGetRelationName(catalog));
datum = heap_getattr(tuple, anum_relation_id,
diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c
index 940384bf40..d418577b75 100644
--- a/contrib/sepgsql/schema.c
+++ b/contrib/sepgsql/schema.c
@@ -67,7 +67,7 @@ sepgsql_schema_post_create(Oid namespaceId)
SnapshotSelf, 1, &skey);
tuple = systable_getnext(sscan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "catalog lookup failed for namespace %u", namespaceId);
+ elog(ERROR, "could not find tuple for namespace %u", namespaceId);
nspForm = (Form_pg_namespace) GETSTRUCT(tuple);
nsp_name = NameStr(nspForm->nspname);
diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml
index ad11109775..8dcc29925b 100644
--- a/doc/src/sgml/brin.sgml
+++ b/doc/src/sgml/brin.sgml
@@ -81,7 +81,7 @@
occur. (This last trigger is disabled by default and can be enabled
with the <literal>autosummarize</literal> parameter.)
Conversely, a range can be de-summarized using the
- <function>brin_desummarize_range(regclass, bigint)</function> range,
+ <function>brin_desummarize_range(regclass, bigint)</function> function,
which is useful when the index tuple is no longer a very good
representation because the existing values have changed.
</para>
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index 7b7ac9ba6f..407ea2018c 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -3816,7 +3816,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><literal><link linkend="catalog-pg-collation"><structname>pg_collation</structname></link>.oid</literal></entry>
<entry>
For each column in the index key, this contains the OID of the
- collation to use for the index.
+ collation to use for the index, or zero if the column is not
+ of a collatable data type.
</entry>
</row>
@@ -4803,7 +4804,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><literal><link linkend="catalog-pg-opclass"><structname>pg_opclass</structname></link>.oid</literal></entry>
<entry>
For each column in the partition key, this contains the OID of the
- the collation to use for partitioning.
+ collation to use for partitioning, or zero if the column is not
+ of a collatable data type.
</entry>
</row>
@@ -6642,7 +6644,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
This catalog only contains tables known to the subscription after running
either <command>CREATE SUBSCRIPTION</command> or
- <command>ALTER SUBSCRIPTION ... REFRESH</command>.
+ <command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command>.
</para>
<table>
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index b7c3ca8412..89eecb4758 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -3521,7 +3521,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
of these configuration parameters to force the optimizer to
choose a different plan.
Better ways to improve the quality of the
- plans chosen by the optimizer include adjusting the planer cost
+ plans chosen by the optimizer include adjusting the planner cost
constants (see <xref linkend="runtime-config-query-constants">),
running <xref linkend="sql-analyze"> manually, increasing
the value of the <xref
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index 71339bf81d..a70047b340 100755
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -3435,7 +3435,7 @@ CREATE INDEX ON measurement_y2008m01 (logdate);
<para>
Normally the set of partitions established when initially defining the
- the table are not intended to remain static. It is common to want to
+ table are not intended to remain static. It is common to want to
remove old partitions of data and periodically add new partitions for
new data. One of the most important advantages of partitioning is
precisely that it allows this otherwise painful task to be executed
diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml
index 6a4b7d6e97..23af270e32 100644
--- a/doc/src/sgml/dfunc.sgml
+++ b/doc/src/sgml/dfunc.sgml
@@ -63,10 +63,10 @@
<listitem>
<para>
The compiler flag to create <acronym>PIC</acronym> is
- <option>-fpic</option>. To create shared libraries the compiler
+ <option>-fPIC</option>. To create shared libraries the compiler
flag is <option>-shared</option>.
<programlisting>
-gcc -fpic -c foo.c
+gcc -fPIC -c foo.c
gcc -shared -o foo.so foo.o
</programlisting>
This is applicable as of version 3.0 of
@@ -84,14 +84,14 @@ gcc -shared -o foo.so foo.o
<para>
The compiler flag of the system compiler to create
<acronym>PIC</acronym> is <option>+z</option>. When using
- <application>GCC</application> it's <option>-fpic</option>. The
+ <application>GCC</application> it's <option>-fPIC</option>. The
linker flag for shared libraries is <option>-b</option>. So:
<programlisting>
cc +z -c foo.c
</programlisting>
or:
<programlisting>
-gcc -fpic -c foo.c
+gcc -fPIC -c foo.c
</programlisting>
and then:
<programlisting>
@@ -112,13 +112,11 @@ ld -b -o foo.sl foo.o
<listitem>
<para>
The compiler flag to create <acronym>PIC</acronym> is
- <option>-fpic</option>. On some platforms in some situations
- <option>-fPIC</option> must be used if <option>-fpic</option>
- does not work. Refer to the GCC manual for more information.
+ <option>-fPIC</option>.
The compiler flag to create a shared library is
<option>-shared</option>. A complete example looks like this:
<programlisting>
-cc -fpic -c foo.c
+cc -fPIC -c foo.c
cc -shared -o foo.so foo.o
</programlisting>
</para>
@@ -149,12 +147,12 @@ cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o
<listitem>
<para>
The compiler flag to create <acronym>PIC</acronym> is
- <option>-fpic</option>. For <acronym>ELF</acronym> systems, the
+ <option>-fPIC</option>. For <acronym>ELF</acronym> systems, the
compiler with the flag <option>-shared</option> is used to link
shared libraries. On the older non-ELF systems, <literal>ld
-Bshareable</literal> is used.
<programlisting>
-gcc -fpic -c foo.c
+gcc -fPIC -c foo.c
gcc -shared -o foo.so foo.o
</programlisting>
</para>
@@ -169,10 +167,10 @@ gcc -shared -o foo.so foo.o
<listitem>
<para>
The compiler flag to create <acronym>PIC</acronym> is
- <option>-fpic</option>. <literal>ld -Bshareable</literal> is
+ <option>-fPIC</option>. <literal>ld -Bshareable</literal> is
used to link shared libraries.
<programlisting>
-gcc -fpic -c foo.c
+gcc -fPIC -c foo.c
ld -Bshareable -o foo.so foo.o
</programlisting>
</para>
@@ -188,7 +186,7 @@ ld -Bshareable -o foo.so foo.o
<para>
The compiler flag to create <acronym>PIC</acronym> is
<option>-KPIC</option> with the Sun compiler and
- <option>-fpic</option> with <application>GCC</>. To
+ <option>-fPIC</option> with <application>GCC</>. To
link shared libraries, the compiler option is
<option>-G</option> with either compiler or alternatively
<option>-shared</option> with <application>GCC</>.
@@ -198,7 +196,7 @@ cc -G -o foo.so foo.o
</programlisting>
or
<programlisting>
-gcc -fpic -c foo.c
+gcc -fPIC -c foo.c
gcc -G -o foo.so foo.o
</programlisting>
</para>
diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml
index dead4c3f86..f13a0e999f 100644
--- a/doc/src/sgml/ecpg.sgml
+++ b/doc/src/sgml/ecpg.sgml
@@ -240,8 +240,7 @@ EXEC SQL AT <replaceable>connection-name</replaceable> SELECT ...;
<para>
If your application uses multiple threads of execution, they cannot share a
connection concurrently. You must either explicitly control access to the connection
- (using mutexes) or use a connection for each thread. If each thread uses its own connection,
- you will need to use the AT clause to specify which connection the thread will use.
+ (using mutexes) or use a connection for each thread.
</para>
<para>
@@ -251,7 +250,7 @@ EXEC SQL AT <replaceable>connection-name</replaceable> SELECT ...;
EXEC SQL SET CONNECTION <replaceable>connection-name</replaceable>;
</programlisting>
This option is particularly convenient if many statements are to be
- executed on the same connection. It is not thread-aware.
+ executed on the same connection.
</para>
<para>
diff --git a/doc/src/sgml/external-projects.sgml b/doc/src/sgml/external-projects.sgml
index 8457426545..82aaad4e4b 100644
--- a/doc/src/sgml/external-projects.sgml
+++ b/doc/src/sgml/external-projects.sgml
@@ -70,7 +70,7 @@
<row>
<entry>JDBC</entry>
- <entry>JDBC</entry>
+ <entry>Java</entry>
<entry>Type 4 JDBC driver</entry>
<entry><ulink url="https://jdbc.postgresql.org/"></ulink></entry>
</row>
@@ -83,6 +83,13 @@
</row>
<row>
+ <entry>node-postgres</entry>
+ <entry>JavaScript</entry>
+ <entry>Node.js driver</entry>
+ <entry><ulink url="https://node-postgres.com/"></ulink></entry>
+ </row>
+
+ <row>
<entry>Npgsql</entry>
<entry>.NET</entry>
<entry>.NET data provider</entry>
@@ -97,6 +104,13 @@
</row>
<row>
+ <entry>pq</entry>
+ <entry>Go</entry>
+ <entry>Pure Go driver for Go's database/sql</entry>
+ <entry><ulink url="https://github.com/lib/pq"></ulink></entry>
+ </row>
+
+ <row>
<entry>psqlODBC</entry>
<entry>ODBC</entry>
<entry>ODBC driver</entry>
@@ -172,19 +186,13 @@
<row>
<entry>PL/Java</entry>
<entry>Java</entry>
- <entry><ulink url="https://github.com/tada/pljava"></ulink></entry>
+ <entry><ulink url="https://tada.github.io/pljava/"></ulink></entry>
</row>
<row>
- <entry>PL/PHP</entry>
- <entry>PHP</entry>
- <entry><ulink url="https://public.commandprompt.com/projects/plphp"></ulink></entry>
- </row>
-
- <row>
- <entry>PL/Py</entry>
- <entry>Python</entry>
- <entry><ulink url="http://python.projects.postgresql.org/backend/"></ulink></entry>
+ <entry>PL/Lua</entry>
+ <entry>Lua</entry>
+ <entry><ulink url="https://github.com/pllua/pllua"></ulink></entry>
</row>
<row>
@@ -194,22 +202,16 @@
</row>
<row>
- <entry>PL/Ruby</entry>
- <entry>Ruby</entry>
- <entry><ulink url="http://raa.ruby-lang.org/project/pl-ruby/"></ulink></entry>
- </row>
-
- <row>
- <entry>PL/Scheme</entry>
- <entry>Scheme</entry>
- <entry><ulink url="http://plscheme.projects.postgresql.org/"></ulink></entry>
- </row>
-
- <row>
<entry>PL/sh</entry>
<entry>Unix shell</entry>
<entry><ulink url="https://github.com/petere/plsh"></ulink></entry>
</row>
+
+ <row>
+ <entry>PL/v8</entry>
+ <entry>JavaScript</entry>
+ <entry><ulink url="https://github.com/plv8/plv8"></ulink></entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index 7c5cbab2a2..58c1858121 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -4361,7 +4361,7 @@ SELECT (regexp_match('foobarbequebaz', 'bar.*que'))[1];
<para>
Some examples:
<programlisting>
- SELECT regexp_matches('foo', 'not there');
+SELECT regexp_matches('foo', 'not there');
regexp_matches
----------------
(0 rows)
diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml
index 48d5525b92..01f46d39b6 100644
--- a/doc/src/sgml/high-availability.sgml
+++ b/doc/src/sgml/high-availability.sgml
@@ -1175,7 +1175,7 @@ synchronous_standby_names = 'FIRST 2 (s1, s2, s3)'
An example of <varname>synchronous_standby_names</> for
a quorum-based multiple synchronous standbys is:
<programlisting>
- synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
+synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
</programlisting>
In this example, if four standby servers <literal>s1</>, <literal>s2</>,
<literal>s3</> and <literal>s4</> are running, transaction commits will
@@ -1829,7 +1829,7 @@ if (!triggered)
<para>
In normal operation, <quote>read-only</> transactions are allowed to
- update sequences and to use <command>LISTEN</>, <command>UNLISTEN</>, and
+ use <command>LISTEN</>, <command>UNLISTEN</>, and
<command>NOTIFY</>, so Hot Standby sessions operate under slightly tighter
restrictions than ordinary read-only sessions. It is possible that some
of these restrictions might be loosened in a future release.
diff --git a/doc/src/sgml/information_schema.sgml b/doc/src/sgml/information_schema.sgml
index 02f7927436..b85849b258 100644
--- a/doc/src/sgml/information_schema.sgml
+++ b/doc/src/sgml/information_schema.sgml
@@ -1602,31 +1602,47 @@
<row>
<entry><literal>identity_start</literal></entry>
<entry><type>character_data</type></entry>
- <entry>Applies to a feature not available in <productname>PostgreSQL</></entry>
+ <entry>
+ If the column is an identity column, then the start value of the
+ internal sequence, else null.
+ </entry>
</row>
<row>
<entry><literal>identity_increment</literal></entry>
<entry><type>character_data</type></entry>
- <entry>Applies to a feature not available in <productname>PostgreSQL</></entry>
+ <entry>
+ If the column is an identity column, then the increment of the internal
+ sequence, else null.
+ </entry>
</row>
<row>
<entry><literal>identity_maximum</literal></entry>
<entry><type>character_data</type></entry>
- <entry>Applies to a feature not available in <productname>PostgreSQL</></entry>
+ <entry>
+ If the column is an identity column, then the maximum value of the
+ internal sequence, else null.
+ </entry>
</row>
<row>
<entry><literal>identity_minimum</literal></entry>
<entry><type>character_data</type></entry>
- <entry>Applies to a feature not available in <productname>PostgreSQL</></entry>
+ <entry>
+ If the column is an identity column, then the minimum value of the
+ internal sequence, else null.
+ </entry>
</row>
<row>
<entry><literal>identity_cycle</literal></entry>
<entry><type>yes_or_no</type></entry>
- <entry>Applies to a feature not available in <productname>PostgreSQL</></entry>
+ <entry>
+ If the column is an identity column, then <literal>YES</literal> if the
+ internal sequence cycles or <literal>NO</literal> if it does not;
+ otherwise null.
+ </entry>
</row>
<row>
diff --git a/doc/src/sgml/keywords.sgml b/doc/src/sgml/keywords.sgml
index 7b1f8a149c..a369a5b92b 100644
--- a/doc/src/sgml/keywords.sgml
+++ b/doc/src/sgml/keywords.sgml
@@ -794,7 +794,7 @@
</row>
<row>
<entry><token>COLUMNS</token></entry>
- <entry></entry>
+ <entry>non-reserved</entry>
<entry>non-reserved</entry>
<entry>non-reserved</entry>
<entry></entry>
@@ -2013,7 +2013,7 @@
</row>
<row>
<entry><token>GENERATED</token></entry>
- <entry></entry>
+ <entry>non-reserved</entry>
<entry>non-reserved</entry>
<entry>non-reserved</entry>
<entry></entry>
@@ -2601,13 +2601,6 @@
<entry></entry>
</row>
<row>
- <entry><token>LIST</token></entry>
- <entry>non-reserved</entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
<entry><token>LISTEN</token></entry>
<entry>non-reserved</entry>
<entry></entry>
@@ -2946,7 +2939,7 @@
</row>
<row>
<entry><token>NEW</token></entry>
- <entry></entry>
+ <entry>non-reserved</entry>
<entry>reserved</entry>
<entry>reserved</entry>
<entry></entry>
@@ -3170,7 +3163,7 @@
</row>
<row>
<entry><token>OLD</token></entry>
- <entry></entry>
+ <entry>non-reserved</entry>
<entry>reserved</entry>
<entry>reserved</entry>
<entry></entry>
@@ -3296,7 +3289,7 @@
</row>
<row>
<entry><token>OVERRIDING</token></entry>
- <entry></entry>
+ <entry>non-reserved</entry>
<entry>non-reserved</entry>
<entry>non-reserved</entry>
<entry></entry>
@@ -3631,6 +3624,13 @@
<entry>reserved</entry>
</row>
<row>
+ <entry><token>PUBLICATION</token></entry>
+ <entry>non-reserved</entry>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
<entry><token>QUOTE</token></entry>
<entry>non-reserved</entry>
<entry></entry>
@@ -3716,7 +3716,7 @@
</row>
<row>
<entry><token>REFERENCING</token></entry>
- <entry></entry>
+ <entry>non-reserved</entry>
<entry>reserved</entry>
<entry>reserved</entry>
<entry></entry>
@@ -4082,6 +4082,13 @@
<entry>reserved</entry>
</row>
<row>
+ <entry><token>SCHEMAS</token></entry>
+ <entry>non-reserved</entry>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
<entry><token>SCHEMA_NAME</token></entry>
<entry></entry>
<entry>non-reserved</entry>
@@ -4523,6 +4530,13 @@
<entry></entry>
</row>
<row>
+ <entry><token>SUBSCRIPTION</token></entry>
+ <entry>non-reserved</entry>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
<entry><token>SUBSTRING</token></entry>
<entry>non-reserved (cannot be function or type)</entry>
<entry>reserved</entry>
@@ -5357,7 +5371,7 @@
</row>
<row>
<entry><token>XMLNAMESPACES</token></entry>
- <entry></entry>
+ <entry>non-reserved (cannot be function or type)</entry>
<entry>reserved</entry>
<entry>reserved</entry>
<entry></entry>
@@ -5406,7 +5420,7 @@
</row>
<row>
<entry><token>XMLTABLE</token></entry>
- <entry></entry>
+ <entry>non-reserved (cannot be function or type)</entry>
<entry>reserved</entry>
<entry>reserved</entry>
<entry></entry>
diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml
index 67647e1f35..62a4303a6b 100644
--- a/doc/src/sgml/libpq.sgml
+++ b/doc/src/sgml/libpq.sgml
@@ -5947,12 +5947,12 @@ char *PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user,
<listitem>
<para>
Prepares the md5-encrypted form of a <productname>PostgreSQL</> password.
- <synopsis>
+<synopsis>
char *PQencryptPassword(const char *passwd, const char *user);
- </synopsis>
- <function>PQencryptPassword</> is an older, deprecated version of
+</synopsis>
+ <function>PQencryptPassword</> is an older, deprecated version of
<function>PQencryptPasswodConn</>. The difference is that
- <function>PQencryptPassword</> does not
+ <function>PQencryptPassword</> does not
require a connection object, and <literal>md5</> is always used as the
encryption algorithm.
</para>
diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index 79ca45a156..9ff5eea038 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -802,7 +802,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<row>
<entry><structfield>backend_type</structfield></entry>
<entry><type>text</type></entry>
- <entry>Type of current backend. Possible types are
+ <entry>Type of current backend. Possible types are
<literal>autovacuum launcher</>, <literal>autovacuum worker</>,
<literal>background worker</>, <literal>background writer</>,
<literal>client backend</>, <literal>checkpointer</>,
@@ -1827,7 +1827,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
the standby to catch up with the sending server assuming the current
rate of replay. Such a system would show similar times while new WAL is
being generated, but would differ when the sender becomes idle. In
- particular, when the standby has caught up completely,
+ particular, when the standby has caught up completely,
<structname>pg_stat_replication</structname> shows the time taken to
write, flush and replay the most recent reported WAL location rather than
zero as some users might expect. This is consistent with the goal of
diff --git a/doc/src/sgml/parallel.sgml b/doc/src/sgml/parallel.sgml
index a65129078c..ff31e7537e 100644
--- a/doc/src/sgml/parallel.sgml
+++ b/doc/src/sgml/parallel.sgml
@@ -275,7 +275,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
<para>
In a <emphasis>parallel sequential scan</>, the table's blocks will
be divided among the cooperating processes. Blocks are handed out one
- at a time, so that access to the table remains sequential.
+ at a time, so that access to the table remains sequential.
</para>
</listitem>
<listitem>
diff --git a/doc/src/sgml/pgstattuple.sgml b/doc/src/sgml/pgstattuple.sgml
index 63412b03ef..d2bc7916d9 100644
--- a/doc/src/sgml/pgstattuple.sgml
+++ b/doc/src/sgml/pgstattuple.sgml
@@ -124,13 +124,13 @@ free_percent | 1.95
</table>
<note>
- <para>
- The <literal>table_len</literal> will always be greater than the sum
- of the <literal>tuple_len</literal>, <literal>dead_tuple_len</literal>
- and <literal>free_space</literal>. The difference is accounted for by
- fixed page overhead, the per-page table of pointers to tuples, and
- padding to ensure that tuples are correctly aligned.
- </para>
+ <para>
+ The <literal>table_len</literal> will always be greater than the sum
+ of the <literal>tuple_len</literal>, <literal>dead_tuple_len</literal>
+ and <literal>free_space</literal>. The difference is accounted for by
+ fixed page overhead, the per-page table of pointers to tuples, and
+ padding to ensure that tuples are correctly aligned.
+ </para>
</note>
<para>
diff --git a/doc/src/sgml/postgres-fdw.sgml b/doc/src/sgml/postgres-fdw.sgml
index 3dfc0f84ed..23558e7ec0 100644
--- a/doc/src/sgml/postgres-fdw.sgml
+++ b/doc/src/sgml/postgres-fdw.sgml
@@ -566,7 +566,7 @@
</para>
</listitem>
</itemizedlist>
- These are less likely to be problematic than <varname>search_path</>, but
+ These are less likely to be problematic than <varname>search_path</>, but
can be handled with function <literal>SET</> options if the need arises.
</para>
diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml
index 4837be5016..a7a3d3b2f9 100644
--- a/doc/src/sgml/protocol.sgml
+++ b/doc/src/sgml/protocol.sgml
@@ -1352,7 +1352,7 @@ general, while the next subsection gives more details on SCRAM-SHA-256.
<title>SASL Authentication Message Flow</title>
<step id="sasl-auth-begin">
-<para>
+<para>
To begin a SASL authentication exchange, the server sends an
AuthenticationSASL message. It includes a list of SASL authentication
mechanisms that the server can accept, in the server's preferred order.
@@ -1401,7 +1401,7 @@ ErrorMessage.
<para>
<firstterm>SCRAM-SHA-256</> (called just <firstterm>SCRAM</> from now on) is
the only implemented SASL mechanism, at the moment. It is described in detail
- in RFC 7677 and RFC 5802.
+ in RFC 7677 and RFC 5802.
</para>
<para>
diff --git a/doc/src/sgml/ref/alter_aggregate.sgml b/doc/src/sgml/ref/alter_aggregate.sgml
index 3aa7c259da..7b7616ca01 100644
--- a/doc/src/sgml/ref/alter_aggregate.sgml
+++ b/doc/src/sgml/ref/alter_aggregate.sgml
@@ -179,8 +179,7 @@ ALTER AGGREGATE mypercentile(float8 ORDER BY integer) SET SCHEMA myschema;
This will work too:
<programlisting>
ALTER AGGREGATE mypercentile(float8, integer) SET SCHEMA myschema;
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/alter_publication.sgml b/doc/src/sgml/ref/alter_publication.sgml
index 7b8f114f54..f064ec5f32 100644
--- a/doc/src/sgml/ref/alter_publication.sgml
+++ b/doc/src/sgml/ref/alter_publication.sgml
@@ -34,28 +34,40 @@ ALTER PUBLICATION <replaceable class="PARAMETER">name</replaceable> RENAME TO <r
<title>Description</title>
<para>
- The first variant of this command listed in the synopsis can change
+ The command <command>ALTER PUBLICATION</command> can change the attributes
+ of a publication.
+ </para>
+
+ <para>
+ The first three variants change which tables are part of the publication.
+ The <literal>SET TABLE</literal> clause will replace the list of tables in
+ the publication with the specified one. The <literal>ADD TABLE</literal>
+ and <literal>DROP TABLE</literal> clauses will add and remove one or more
+ tables from the publication. Note that adding tables to a publication that
+ is already subscribed to will require a <literal>ALTER SUBSCRIPTION
+ ... REFRESH PUBLICATION</literal> action on the subscribing side in order
+ to become effective.
+ </para>
+
+ <para>
+ The fourth variant of this command listed in the synopsis can change
all of the publication properties specified in
<xref linkend="sql-createpublication">. Properties not mentioned in the
command retain their previous settings.
</para>
<para>
+ The remaining variants change the owner and the name of the publication.
+ </para>
+
+ <para>
+ You must own the publication to use <command>ALTER PUBLICATION</command>.
To alter the owner, you must also be a direct or indirect member of the new
owning role. The new owner must have <literal>CREATE</literal> privilege on
the database. Also, the new owner of a <literal>FOR ALL TABLES</literal>
publication must be a superuser. However, a superuser can change the
ownership of a publication while circumventing these restrictions.
</para>
-
- <para>
- The other variants of this command deal with the table membership of the
- publication. The <literal>SET TABLE</literal> clause will replace the
- list of tables in the publication with the specified one.
- The <literal>ADD TABLE</literal> and
- <literal>DROP TABLE</literal> will add and remove one or more tables from
- the publication.
- </para>
</refsect1>
<refsect1>
@@ -128,8 +140,7 @@ ALTER PUBLICATION noinsert SET (publish = 'update, delete');
Add some tables to the publication:
<programlisting>
ALTER PUBLICATION mypublication ADD TABLE users, departments;
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
@@ -147,6 +158,8 @@ ALTER PUBLICATION mypublication ADD TABLE users, departments;
<simplelist type="inline">
<member><xref linkend="sql-createpublication"></member>
<member><xref linkend="sql-droppublication"></member>
+ <member><xref linkend="sql-createsubscription"></member>
+ <member><xref linkend="sql-altersubscription"></member>
</simplelist>
</refsect1>
</refentry>
diff --git a/doc/src/sgml/ref/alter_sequence.sgml b/doc/src/sgml/ref/alter_sequence.sgml
index 30e5316b8c..3a04d07ecc 100644
--- a/doc/src/sgml/ref/alter_sequence.sgml
+++ b/doc/src/sgml/ref/alter_sequence.sgml
@@ -171,7 +171,7 @@ ALTER SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</replaceable> S
<para>
The optional clause <literal>RESTART [ WITH <replaceable
class="parameter">restart</replaceable> ]</literal> changes the
- current value of the sequence. This is equivalent to calling the
+ current value of the sequence. This is similar to calling the
<function>setval</> function with <literal>is_called</literal> =
<literal>false</>: the specified value will be returned by the
<emphasis>next</> call of <function>nextval</>.
@@ -182,11 +182,11 @@ ALTER SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</replaceable> S
</para>
<para>
- Like a <function>setval</function> call, a <literal>RESTART</literal>
- operation on a sequence is never rolled back, to avoid blocking of
- concurrent transactions that obtain numbers from the same sequence.
- (The other clauses cause ordinary catalog updates that can be rolled
- back.)
+ In contrast to a <function>setval</function> call,
+ a <literal>RESTART</literal> operation on a sequence is transactional
+ and blocks concurrent transactions from obtaining numbers from the
+ same sequence. If that's not the desired mode of
+ operation, <function>setval</> should be used.
</para>
</listitem>
</varlistentry>
@@ -307,8 +307,7 @@ ALTER SEQUENCE [ IF EXISTS ] <replaceable class="parameter">name</replaceable> S
<para>
<command>ALTER SEQUENCE</command> blocks
concurrent <function>nextval</function>, <function>currval</function>,
- <function>lastval</function>, and <command>setval</command> calls, except
- if only the <literal>RESTART</literal> clause is used.
+ <function>lastval</function>, and <command>setval</command> calls.
</para>
<para>
diff --git a/doc/src/sgml/ref/alter_subscription.sgml b/doc/src/sgml/ref/alter_subscription.sgml
index 113e32bfd0..b1b7765d76 100644
--- a/doc/src/sgml/ref/alter_subscription.sgml
+++ b/doc/src/sgml/ref/alter_subscription.sgml
@@ -22,7 +22,7 @@ PostgreSQL documentation
<refsynopsisdiv>
<synopsis>
ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> CONNECTION '<replaceable>conninfo</replaceable>'
-ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> SET PUBLICATION <replaceable class="PARAMETER">publication_name</replaceable> [, ...] { REFRESH [ WITH ( <replaceable class="PARAMETER">refresh_option</replaceable> [= <replaceable class="PARAMETER">value</replaceable>] [, ... ] ) ] | SKIP REFRESH }
+ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> SET PUBLICATION <replaceable class="PARAMETER">publication_name</replaceable> [, ...] [ WITH ( <replaceable class="PARAMETER">set_publication_option</replaceable> [= <replaceable class="PARAMETER">value</replaceable>] [, ... ] ) ]
ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> REFRESH PUBLICATION [ WITH ( <replaceable class="PARAMETER">refresh_option</replaceable> [= <replaceable class="PARAMETER">value</replaceable>] [, ... ] ) ]
ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> ENABLE
ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> DISABLE
@@ -42,8 +42,11 @@ ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> RENAME TO <
</para>
<para>
+ You must own the subscription to use <command>ALTER SUBSCRIPTION</>.
To alter the owner, you must also be a direct or indirect member of the
new owning role. The new owner has to be a superuser.
+ (Currently, all subscription owners must be superusers, so the owner checks
+ will be bypassed in practice. But this might change in the future.)
</para>
</refsect1>
@@ -77,18 +80,29 @@ ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> RENAME TO <
<para>
Changes list of subscribed publications. See
<xref linkend="SQL-CREATESUBSCRIPTION"> for more information.
+ By default this command will also act like <literal>REFRESH
+ PUBLICATION</literal>.
</para>
<para>
- When <literal>REFRESH</literal> is specified, this command will also act
- like <literal>REFRESH
- PUBLICATION</literal>. <literal>refresh_option</literal> specifies
- additional options for the refresh operation, as described
- under <literal>REFRESH PUBLICATION</literal>. When
- <literal>SKIP REFRESH</literal> is specified, the command will not try
- to refresh table information. Note that
- either <literal>REFRESH</literal> or <literal>SKIP REFRESH</literal>
- must be specified.
+ <replaceable>set_publication_option</replaceable> specifies additional
+ options for this operation. The supported options are:
+
+ <variablelist>
+ <varlistentry>
+ <term><literal>refresh</literal> (<type>boolean</type>)</term>
+ <listitem>
+ <para>
+ When false, the command will not try to refresh table information.
+ <literal>REFRESH PUBLICATION</literal> should then be executed separately.
+ The default is <literal>true</literal>.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ Additionally, refresh options as described
+ under <literal>REFRESH PUBLICATION</literal> may be specified.
</para>
</listitem>
</varlistentry>
@@ -104,7 +118,7 @@ ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> RENAME TO <
</para>
<para>
- <literal>refresh_option</literal> specifies additional options for the
+ <replaceable>refresh_option</replaceable> specifies additional options for the
refresh operation. The supported options are:
<variablelist>
@@ -182,7 +196,7 @@ ALTER SUBSCRIPTION <replaceable class="PARAMETER">name</replaceable> RENAME TO <
Change the publication subscribed by a subscription to
<literal>insert_only</literal>:
<programlisting>
-ALTER SUBSCRIPTION mysub SET PUBLICATION insert_only REFRESH;
+ALTER SUBSCRIPTION mysub SET PUBLICATION insert_only;
</programlisting>
</para>
@@ -190,8 +204,7 @@ ALTER SUBSCRIPTION mysub SET PUBLICATION insert_only REFRESH;
Disable (stop) the subscription:
<programlisting>
ALTER SUBSCRIPTION mysub DISABLE;
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/alter_system.sgml b/doc/src/sgml/ref/alter_system.sgml
index c1d27b6564..b234793f3e 100644
--- a/doc/src/sgml/ref/alter_system.sgml
+++ b/doc/src/sgml/ref/alter_system.sgml
@@ -119,8 +119,7 @@ ALTER SYSTEM SET wal_level = replica;
in <filename>postgresql.conf</>:
<programlisting>
ALTER SYSTEM RESET wal_level;
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index 1dfbf6d3c8..c2a484186f 100755
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -185,7 +185,7 @@ ALTER TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
table. Even if there is no <literal>NOT NULL</> constraint on the
parent, such a constraint can still be added to individual partitions,
if desired; that is, the children can disallow nulls even if the parent
- allows them, but not the other way around.
+ allows them, but not the other way around.
</para>
</listitem>
</varlistentry>
@@ -617,7 +617,7 @@ ALTER TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
</para>
<para>
- <literal>SHARE UPDATE EXCLUSIVE</literal> lock will be taken for
+ <literal>SHARE UPDATE EXCLUSIVE</literal> lock will be taken for
fillfactor and autovacuum storage parameters, as well as the
following planner related parameters:
effective_io_concurrency, parallel_workers, seq_page_cost
@@ -1605,7 +1605,7 @@ ALTER TABLE cities
<para>
Detach a partition from partitioned table:
<programlisting>
-ALTER TABLE cities
+ALTER TABLE measurement
DETACH PARTITION measurement_y2015m12;
</programlisting></para>
diff --git a/doc/src/sgml/ref/alter_type.sgml b/doc/src/sgml/ref/alter_type.sgml
index fdb4f3367d..d65f70f674 100644
--- a/doc/src/sgml/ref/alter_type.sgml
+++ b/doc/src/sgml/ref/alter_type.sgml
@@ -356,8 +356,7 @@ ALTER TYPE colors ADD VALUE 'orange' AFTER 'red';
To rename an enum value:
<programlisting>
ALTER TYPE colors RENAME VALUE 'purple' TO 'mauve';
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/copy.sgml b/doc/src/sgml/ref/copy.sgml
index 413e69f4c5..48f0c5c751 100644
--- a/doc/src/sgml/ref/copy.sgml
+++ b/doc/src/sgml/ref/copy.sgml
@@ -431,7 +431,7 @@ COPY <replaceable class="parameter">count</replaceable>
</para>
<para>
- If row-level security is enabled for the table, the relevant
+ If row-level security is enabled for the table, the relevant
<command>SELECT</command> policies will apply to <literal>COPY
<replaceable class="parameter">table</> TO</literal> statements.
Currently, <command>COPY FROM</command> is not supported for tables
diff --git a/doc/src/sgml/ref/create_access_method.sgml b/doc/src/sgml/ref/create_access_method.sgml
index 0a30e6ea3c..891926dba5 100644
--- a/doc/src/sgml/ref/create_access_method.sgml
+++ b/doc/src/sgml/ref/create_access_method.sgml
@@ -93,8 +93,7 @@ CREATE ACCESS METHOD <replaceable class="parameter">name</replaceable>
handler function <literal>heptree_handler</>:
<programlisting>
CREATE ACCESS METHOD heptree TYPE INDEX HANDLER heptree_handler;
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/create_publication.sgml b/doc/src/sgml/ref/create_publication.sgml
index 48be476374..c5299dd74e 100644
--- a/doc/src/sgml/ref/create_publication.sgml
+++ b/doc/src/sgml/ref/create_publication.sgml
@@ -191,8 +191,7 @@ CREATE PUBLICATION alltables FOR ALL TABLES;
<programlisting>
CREATE PUBLICATION insert_only FOR TABLE mydata
WITH (publish = 'insert');
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/create_subscription.sgml b/doc/src/sgml/ref/create_subscription.sgml
index 2c91eb6f50..77bf87681b 100644
--- a/doc/src/sgml/ref/create_subscription.sgml
+++ b/doc/src/sgml/ref/create_subscription.sgml
@@ -226,6 +226,19 @@ CREATE SUBSCRIPTION <replaceable class="PARAMETER">subscription_name</replaceabl
how to configure access control between the subscription and the
publication instance.
</para>
+
+ <para>
+ Creating a subscription that connects to the same database cluster (for
+ example, to replicate between databases in the same cluster or to replicate
+ within the same database) will only succeed if the replication slot is not
+ created as part of the same command. Otherwise, the <command>CREATE
+ SUBSCRIPTION</command> call will hang. To make this work, create the
+ replication slot separately (using the
+ function <function>pg_create_logical_replication_slot</function> with the
+ plugin name <literal>pgoutput</literal>) and create the subscription using
+ the parameter <literal>create_slot = false</literal>. This is an
+ implementation restriction that might be lifted in a future release.
+ </para>
</refsect1>
<refsect1>
@@ -252,8 +265,7 @@ CREATE SUBSCRIPTION mysub
CONNECTION 'host=192.168.1.50 port=5432 user=foo dbname=foodb'
PUBLICATION insert_only
WITH (enabled = false);
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index 8d1e4ee487..5a3821b25e 100755
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -98,8 +98,9 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
<phrase>and <replaceable class="PARAMETER">partition_bound_spec</replaceable> is:</phrase>
-{ IN ( { <replaceable class="PARAMETER">bound_literal</replaceable> | NULL } [, ...] ) |
- FROM ( { <replaceable class="PARAMETER">bound_literal</replaceable> | UNBOUNDED } [, ...] ) TO ( { <replaceable class="PARAMETER">bound_literal</replaceable> | UNBOUNDED } [, ...] ) }
+IN ( { <replaceable class="PARAMETER">numeric_literal</replaceable> | <replaceable class="PARAMETER">string_literal</replaceable> | NULL } [, ...] ) |
+FROM ( { <replaceable class="PARAMETER">numeric_literal</replaceable> | <replaceable class="PARAMETER">string_literal</replaceable> | UNBOUNDED } [, ...] )
+ TO ( { <replaceable class="PARAMETER">numeric_literal</replaceable> | <replaceable class="PARAMETER">string_literal</replaceable> | UNBOUNDED } [, ...] )
<phrase><replaceable class="PARAMETER">index_parameters</replaceable> in <literal>UNIQUE</literal>, <literal>PRIMARY KEY</literal>, and <literal>EXCLUDE</literal> constraints are:</phrase>
@@ -264,21 +265,34 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
<term><literal>PARTITION OF <replaceable class="PARAMETER">parent_table</replaceable> FOR VALUES <replaceable class="PARAMETER">partition_bound_spec</replaceable></literal></term>
<listitem>
<para>
- Creates the table as <firstterm>partition</firstterm> of the specified
+ Creates the table as a <firstterm>partition</firstterm> of the specified
parent table.
</para>
<para>
- The partition bound specification must correspond to the partitioning
- method and partition key of the parent table, and must not overlap with
- any existing partition of that parent.
+ The <replaceable class="PARAMETER">partition_bound_spec</replaceable>
+ must correspond to the partitioning method and partition key of the
+ parent table, and must not overlap with any existing partition of that
+ parent. The form with <literal>IN</> is used for list partitioning,
+ while the form with <literal>FROM</> and <literal>TO</> is used for
+ range partitioning.
</para>
<para>
- Each of the values specified in the partition bound specification is
+ Each of the values specified in
+ the <replaceable class="PARAMETER">partition_bound_spec</> is
a literal, <literal>NULL</literal>, or <literal>UNBOUNDED</literal>.
- A literal is either a numeric constant or a string constant that is
- coercible to the corresponding partition key column's type.
+ Each literal value must be either a numeric constant that is coercible
+ to the corresponding partition key column's type, or a string literal
+ that is valid input for that type.
+ </para>
+
+ <para>
+ When creating a list partition, <literal>NULL</literal> can be
+ specified to signify that the partition allows the partition key
+ column to be null. However, there cannot be more than one such
+ list partition for a given parent table. <literal>NULL</literal>
+ cannot be specified for range partitions.
</para>
<para>
@@ -286,30 +300,25 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
<literal>FROM</literal> is an inclusive bound, whereas the upper
bound specified with <literal>TO</literal> is an exclusive bound.
That is, the values specified in the <literal>FROM</literal> list
- are accepted values of the corresponding partition key columns in a
- given partition, whereas those in the <literal>TO</literal> list are
- not. To be precise, this applies only to the first of the partition
- key columns for which the corresponding values in the <literal>FROM</literal>
- and <literal>TO</literal> lists are not equal. All rows in a given
- partition contain the same values for all preceding columns, equal to
- those specified in <literal>FROM</literal> and <literal>TO</literal>
- lists. On the other hand, any subsequent columns are insignificant
- as far as implicit partition constraint is concerned.
+ are valid values of the corresponding partition key columns for this
+ partition, whereas those in the <literal>TO</literal> list are
+ not. Note that this statement must be understood according to the
+ rules of row-wise comparison (<xref linkend="row-wise-comparison">).
+ For example, given <literal>PARTITION BY RANGE (x,y)</>, a partition
+ bound <literal>FROM (1, 2) TO (3, 4)</literal>
+ allows <literal>x=1</> with any <literal>y&gt;=2</>,
+ <literal>x=2</> with any non-null <literal>y</>,
+ and <literal>x=3</> with any <literal>y&lt;4</>.
</para>
<para>
- Specifying <literal>UNBOUNDED</literal> in <literal>FROM</literal>
+ Writing <literal>UNBOUNDED</literal> in <literal>FROM</literal>
signifies <literal>-infinity</literal> as the lower bound of the
- corresponding column, whereas it signifies <literal>+infinity</literal>
- as the upper bound when specified in <literal>TO</literal>.
- </para>
-
- <para>
- When creating a list partition, <literal>NULL</literal> can be
- specified to signify that the partition allows the partition key
- column to be null. However, there cannot be more than one such
- list partition for a given parent table. <literal>NULL</literal>
- cannot be specified for range partitions.
+ corresponding column, whereas when written in <literal>TO</literal>,
+ it signifies <literal>+infinity</literal> as the upper bound.
+ All items following an <literal>UNBOUNDED</literal> item within
+ a <literal>FROM</literal> or <literal>TO</literal> list must also
+ be <literal>UNBOUNDED</literal>.
</para>
<para>
@@ -330,8 +339,9 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
<para>
Rows inserted into a partitioned table will be automatically routed to
the correct partition. If no suitable partition exists, an error will
- occur. Also, if updating a row in a given partition causes it to move
- to another partition due to the new partition key, an error will occur.
+ occur. Also, if updating a row in a given partition would require it
+ to move to another partition due to new partition key values, an error
+ will occur.
</para>
<para>
diff --git a/doc/src/sgml/ref/drop_aggregate.sgml b/doc/src/sgml/ref/drop_aggregate.sgml
index 631b578df7..dde1ea2444 100644
--- a/doc/src/sgml/ref/drop_aggregate.sgml
+++ b/doc/src/sgml/ref/drop_aggregate.sgml
@@ -160,8 +160,7 @@ DROP AGGREGATE myrank(VARIADIC "any" ORDER BY VARIADIC "any");
To remove multiple aggregate functions in one command:
<programlisting>
DROP AGGREGATE myavg(integer), myavg(bigint);
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/drop_policy.sgml b/doc/src/sgml/ref/drop_policy.sgml
index 69c87c0ade..f474692105 100644
--- a/doc/src/sgml/ref/drop_policy.sgml
+++ b/doc/src/sgml/ref/drop_policy.sgml
@@ -96,8 +96,7 @@ DROP POLICY [ IF EXISTS ] <replaceable class="parameter">name</replaceable> ON <
<programlisting>
DROP POLICY p1 ON my_table;
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/drop_publication.sgml b/doc/src/sgml/ref/drop_publication.sgml
index 1a1be579ad..517d142251 100644
--- a/doc/src/sgml/ref/drop_publication.sgml
+++ b/doc/src/sgml/ref/drop_publication.sgml
@@ -82,9 +82,7 @@ DROP PUBLICATION [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
Drop a publication:
<programlisting>
DROP PUBLICATION mypublication;
-</programlisting>
- </para>
-
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/drop_statistics.sgml b/doc/src/sgml/ref/drop_statistics.sgml
index ef659fca61..37fc402589 100644
--- a/doc/src/sgml/ref/drop_statistics.sgml
+++ b/doc/src/sgml/ref/drop_statistics.sgml
@@ -73,9 +73,7 @@ DROP STATISTICS [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
DROP STATISTICS IF EXISTS
accounting.users_uid_creation,
public.grants_user_role;
-</programlisting>
- </para>
-
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/drop_subscription.sgml b/doc/src/sgml/ref/drop_subscription.sgml
index 4f34a35eef..f535c000c4 100644
--- a/doc/src/sgml/ref/drop_subscription.sgml
+++ b/doc/src/sgml/ref/drop_subscription.sgml
@@ -74,15 +74,35 @@ DROP SUBSCRIPTION [ IF EXISTS ] <replaceable class="parameter">name</replaceable
</refsect1>
<refsect1>
+ <title>Notes</title>
+
+ <para>
+ When dropping a subscription that is associated with a replication slot on
+ the remote host (the normal state), <command>DROP SUBSCRIPTION</command>
+ will connect to the remote host and try to drop the replication slot as
+ part of its operation. This is necessary so that the resources allocated
+ for the subscription on the remote host are released. If this fails,
+ either because the remote host is not reachable or because the remote
+ replication slot cannot be dropped or does not exist or never existed,
+ the <command>DROP SUBSCRIPTION</command> command will fail. To proceed in
+ this situation, disassociate the subscription from the replication slot by
+ executing <literal>ALTER SUBSCRIPTION ... SET (slot_name = NONE)</literal>.
+ After that, <command>DROP SUBSCRIPTION</command> will no longer attempt any
+ actions on a remote host. Note that if the remote replication slot still
+ exists, it should then be dropped manually; otherwise it will continue to
+ reserve WAL and might eventually cause the disk to fill up. See
+ also <xref linkend="logical-replication-subscription-slot">.
+ </para>
+ </refsect1>
+
+ <refsect1>
<title>Examples</title>
<para>
Drop a subscription:
<programlisting>
DROP SUBSCRIPTION mysub;
-</programlisting>
- </para>
-
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/import_foreign_schema.sgml b/doc/src/sgml/ref/import_foreign_schema.sgml
index 331b362db7..b73dee9439 100644
--- a/doc/src/sgml/ref/import_foreign_schema.sgml
+++ b/doc/src/sgml/ref/import_foreign_schema.sgml
@@ -141,9 +141,7 @@ IMPORT FOREIGN SCHEMA foreign_films
<programlisting>
IMPORT FOREIGN SCHEMA foreign_films LIMIT TO (actors, directors)
FROM SERVER film_server INTO films;
-</programlisting>
- </para>
-
+</programlisting></para>
</refsect1>
<refsect1 id="SQL-IMPORTFOREIGNSCHEMA-compatibility">
diff --git a/doc/src/sgml/ref/initdb.sgml b/doc/src/sgml/ref/initdb.sgml
index 4aed43068e..e5b0490999 100644
--- a/doc/src/sgml/ref/initdb.sgml
+++ b/doc/src/sgml/ref/initdb.sgml
@@ -295,8 +295,8 @@ Datanode. A database
</varlistentry>
<varlistentry>
- <term><option>-T <replaceable>CFG</></option></term>
- <term><option>--text-search-config=<replaceable>CFG</></option></term>
+ <term><option>-T <replaceable>config</></option></term>
+ <term><option>--text-search-config=<replaceable>config</></option></term>
<listitem>
<para>
Sets the default text search configuration.
diff --git a/doc/src/sgml/ref/insert.sgml b/doc/src/sgml/ref/insert.sgml
index 95aa77b907..94dad00870 100644
--- a/doc/src/sgml/ref/insert.sgml
+++ b/doc/src/sgml/ref/insert.sgml
@@ -725,8 +725,7 @@ INSERT INTO distributors (did, dname) VALUES (9, 'Antwerp Design')
-- just use a regular unique constraint on "did"
INSERT INTO distributors (did, dname) VALUES (10, 'Conrad International')
ON CONFLICT (did) WHERE is_active DO NOTHING;
-</programlisting>
- </para>
+</programlisting></para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index bb0bf5d566..bafa031e1a 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -817,6 +817,20 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
+ <term><option>--no-sync</option></term>
+ <listitem>
+ <para>
+ By default, <command>pg_dump</command> will wait for all files
+ to be written safely to disk. This option causes
+ <command>pg_dump</command> to return without waiting, which is
+ faster, but means that a subsequent operating system crash can leave
+ the dump corrupt. Generally, this option is useful for testing
+ but should not be used when dumping data from production installation.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>--no-synchronized-snapshots</></term>
<listitem>
<para>
@@ -857,20 +871,6 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>--no-sync</option></term>
- <listitem>
- <para>
- By default, <command>pg_dump</command> will wait for all files
- to be written safely to disk. This option causes
- <command>pg_dump</command> to return without waiting, which is
- faster, but means that a subsequent operating system crash can leave
- the dump corrupt. Generally, this option is useful for testing
- but should not be used when dumping data from production installation.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>--quote-all-identifiers</></term>
<listitem>
<para>
diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml
index b45e813486..2ab570ad4c 100644
--- a/doc/src/sgml/ref/pg_dumpall.sgml
+++ b/doc/src/sgml/ref/pg_dumpall.sgml
@@ -333,23 +333,23 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>--no-role-passwords</option></term>
+ <term><option>--no-publications</option></term>
<listitem>
<para>
- Do not dump passwords for roles. When restored, roles will have a NULL
- password and authentication will always fail until the password is reset.
- Since password values aren't needed when this option is specified we
- use the catalog view pg_roles in preference to pg_authid, since access
- to pg_authid may be restricted by security policy.
+ Do not dump publications.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><option>--no-publications</option></term>
+ <term><option>--no-role-passwords</option></term>
<listitem>
<para>
- Do not dump publications.
+ Do not dump passwords for roles. When restored, roles will have a NULL
+ password and authentication will always fail until the password is reset.
+ Since password values aren't needed when this option is specified we
+ use the catalog view pg_roles in preference to pg_authid, since access
+ to pg_authid may be restricted by security policy.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/pg_recvlogical.sgml b/doc/src/sgml/ref/pg_recvlogical.sgml
index eaea94df8b..9c7bb1907b 100644
--- a/doc/src/sgml/ref/pg_recvlogical.sgml
+++ b/doc/src/sgml/ref/pg_recvlogical.sgml
@@ -114,6 +114,32 @@ PostgreSQL documentation
<variablelist>
<varlistentry>
+ <term><option>-E <replaceable>lsn</replaceable></option></term>
+ <term><option>--endpos=<replaceable>lsn</replaceable></option></term>
+ <listitem>
+ <para>
+ In <option>--start</option> mode, automatically stop replication
+ and exit with normal exit status 0 when receiving reaches the
+ specified LSN. If specified when not in <option>--start</option>
+ mode, an error is raised.
+ </para>
+
+ <para>
+ If there's a record with LSN exactly equal to <replaceable>lsn</>,
+ the record will be output.
+ </para>
+
+ <para>
+ The <option>--endpos</option> option is not aware of transaction
+ boundaries and may truncate output partway through a transaction.
+ Any partially output transaction will not be consumed and will be
+ replayed again when the slot is next read from. Individual messages
+ are never truncated.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>-f <replaceable>filename</replaceable></option></term>
<term><option>--file=<replaceable>filename</replaceable></option></term>
<listitem>
@@ -163,32 +189,6 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>-E <replaceable>lsn</replaceable></option></term>
- <term><option>--endpos=<replaceable>lsn</replaceable></option></term>
- <listitem>
- <para>
- In <option>--start</option> mode, automatically stop replication
- and exit with normal exit status 0 when receiving reaches the
- specified LSN. If specified when not in <option>--start</option>
- mode, an error is raised.
- </para>
-
- <para>
- If there's a record with LSN exactly equal to <replaceable>lsn</>,
- the record will be output.
- </para>
-
- <para>
- The <option>--endpos</option> option is not aware of transaction
- boundaries and may truncate output partway through a transaction.
- Any partially output transaction will not be consumed and will be
- replayed again when the slot is next read from. Individual messages
- are never truncated.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>--if-not-exists</option></term>
<listitem>
<para>
diff --git a/doc/src/sgml/ref/pgbench.sgml b/doc/src/sgml/ref/pgbench.sgml
index c3c8371c69..20912879b8 100644
--- a/doc/src/sgml/ref/pgbench.sgml
+++ b/doc/src/sgml/ref/pgbench.sgml
@@ -605,6 +605,16 @@ pgbench <optional> <replaceable>options</> </optional> <replaceable>dbname</>
</varlistentry>
<varlistentry>
+ <term><option>--log-prefix=<replaceable>prefix</></option></term>
+ <listitem>
+ <para>
+ Set the filename prefix for the log files created by
+ <option>--log</>. The default is <literal>pgbench_log</>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>--progress-timestamp</option></term>
<listitem>
<para>
@@ -635,16 +645,6 @@ pgbench <optional> <replaceable>options</> </optional> <replaceable>dbname</>
</listitem>
</varlistentry>
- <varlistentry>
- <term><option>--log-prefix=<replaceable>prefix</></option></term>
- <listitem>
- <para>
- Set the filename prefix for the log files created by
- <option>--log</>. The default is <literal>pgbench_log</>.
- </para>
- </listitem>
- </varlistentry>
-
</variablelist>
</para>
diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml
index 9e3e98865c..26b6ba14ba 100644
--- a/doc/src/sgml/ref/pgupgrade.sgml
+++ b/doc/src/sgml/ref/pgupgrade.sgml
@@ -44,9 +44,9 @@
<application>pg_upgrade</> (formerly called <application>pg_migrator</>) allows data
stored in <productname>PostgreSQL</> data files to be upgraded to a later <productname>PostgreSQL</>
major version without the data dump/reload typically required for
- major version upgrades, e.g. from 8.4.7 to the current major release
+ major version upgrades, e.g. from 9.6.3 to the current major release
of <productname>PostgreSQL</>. It is not required for minor version upgrades, e.g. from
- 9.0.1 to 9.0.4.
+ 9.6.2 to 9.6.3.
</para>
<para>
@@ -72,7 +72,7 @@
<para>
pg_upgrade supports upgrades from 8.4.X and later to the current
- major release of <productname>PostgreSQL</>, including snapshot and alpha releases.
+ major release of <productname>PostgreSQL</>, including snapshot and beta releases.
</para>
</refsect1>
@@ -213,7 +213,7 @@
<para>
If you are using a version-specific installation directory, e.g.
- <filename>/opt/PostgreSQL/9.1</>, you do not need to move the old cluster. The
+ <filename>/opt/PostgreSQL/&majorversion;</>, you do not need to move the old cluster. The
graphical installers all use version-specific installation directories.
</para>
@@ -304,15 +304,15 @@ make prefix=/usr/local/pgsql.new install
Make sure both database servers are stopped using, on Unix, e.g.:
<programlisting>
-pg_ctl -D /opt/PostgreSQL/8.4 stop
-pg_ctl -D /opt/PostgreSQL/9.0 stop
+pg_ctl -D /opt/PostgreSQL/9.6 stop
+pg_ctl -D /opt/PostgreSQL/&majorversion; stop
</programlisting>
or on Windows, using the proper service names:
<programlisting>
-NET STOP postgresql-8.4
-NET STOP postgresql-9.0
+NET STOP postgresql-9.6
+NET STOP postgresql-&majorversion;
</programlisting>
</para>
@@ -372,17 +372,17 @@ NET STOP postgresql-9.0
<programlisting>
RUNAS /USER:postgres "CMD.EXE"
-SET PATH=%PATH%;C:\Program Files\PostgreSQL\9.0\bin;
+SET PATH=%PATH%;C:\Program Files\PostgreSQL\&majorversion;\bin;
</programlisting>
and then run <application>pg_upgrade</> with quoted directories, e.g.:
<programlisting>
pg_upgrade.exe
- --old-datadir "C:/Program Files/PostgreSQL/8.4/data"
- --new-datadir "C:/Program Files/PostgreSQL/9.0/data"
- --old-bindir "C:/Program Files/PostgreSQL/8.4/bin"
- --new-bindir "C:/Program Files/PostgreSQL/9.0/bin"
+ --old-datadir "C:/Program Files/PostgreSQL/9.6/data"
+ --new-datadir "C:/Program Files/PostgreSQL/&majorversion;/data"
+ --old-bindir "C:/Program Files/PostgreSQL/9.6/bin"
+ --new-bindir "C:/Program Files/PostgreSQL/&majorversion;/bin"
</programlisting>
Once started, <command>pg_upgrade</> will verify the two clusters are compatible
diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml
index 3b86612862..e6eba21eda 100644
--- a/doc/src/sgml/ref/psql-ref.sgml
+++ b/doc/src/sgml/ref/psql-ref.sgml
@@ -1251,6 +1251,23 @@ testdb=&gt;
<varlistentry>
+ <term><literal>\dD[S+] [ <link linkend="APP-PSQL-patterns"><replaceable class="parameter">pattern</replaceable></link> ]</literal></term>
+ <listitem>
+ <para>
+ Lists domains. If <replaceable
+ class="parameter">pattern</replaceable>
+ is specified, only domains whose names match the pattern are shown.
+ By default, only user-created objects are shown; supply a
+ pattern or the <literal>S</literal> modifier to include system
+ objects.
+ If <literal>+</literal> is appended to the command name, each object
+ is listed with its associated permissions and description.
+ </para>
+ </listitem>
+ </varlistentry>
+
+
+ <varlistentry>
<term><literal>\ddp [ <link linkend="APP-PSQL-patterns"><replaceable class="parameter">pattern</replaceable></link> ]</literal></term>
<listitem>
<para>
@@ -1273,23 +1290,6 @@ testdb=&gt;
<varlistentry>
- <term><literal>\dD[S+] [ <link linkend="APP-PSQL-patterns"><replaceable class="parameter">pattern</replaceable></link> ]</literal></term>
- <listitem>
- <para>
- Lists domains. If <replaceable
- class="parameter">pattern</replaceable>
- is specified, only domains whose names match the pattern are shown.
- By default, only user-created objects are shown; supply a
- pattern or the <literal>S</literal> modifier to include system
- objects.
- If <literal>+</literal> is appended to the command name, each object
- is listed with its associated permissions and description.
- </para>
- </listitem>
- </varlistentry>
-
-
- <varlistentry>
<term><literal>\dE[S+] [ <link linkend="APP-PSQL-patterns"><replaceable class="parameter">pattern</replaceable></link> ]</literal></term>
<term><literal>\di[S+] [ <link linkend="APP-PSQL-patterns"><replaceable class="parameter">pattern</replaceable></link> ]</literal></term>
<term><literal>\dm[S+] [ <link linkend="APP-PSQL-patterns"><replaceable class="parameter">pattern</replaceable></link> ]</literal></term>
@@ -1945,18 +1945,6 @@ Tue Oct 26 21:40:57 CEST 1999
<varlistentry>
- <term><literal>\gx [ <replaceable class="parameter">filename</replaceable> ]</literal></term>
- <term><literal>\gx [ |<replaceable class="parameter">command</replaceable> ]</literal></term>
- <listitem>
- <para>
- <literal>\gx</literal> is equivalent to <literal>\g</literal>, but
- forces expanded output mode for this query. See <literal>\x</literal>.
- </para>
- </listitem>
- </varlistentry>
-
-
- <varlistentry>
<term><literal>\gexec</literal></term>
<listitem>
@@ -2046,6 +2034,19 @@ hello 10
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term><literal>\gx [ <replaceable class="parameter">filename</replaceable> ]</literal></term>
+ <term><literal>\gx [ |<replaceable class="parameter">command</replaceable> ]</literal></term>
+ <listitem>
+ <para>
+ <literal>\gx</literal> is equivalent to <literal>\g</literal>, but
+ forces expanded output mode for this query. See <literal>\x</literal>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+
<varlistentry>
<term><literal>\h</literal> or <literal>\help</literal> <literal>[ <replaceable class="parameter">command</replaceable> ]</literal></term>
<listitem>
@@ -2118,21 +2119,6 @@ hello 10
<varlistentry>
- <term><literal>\ir</literal> or <literal>\include_relative</literal> <replaceable class="parameter">filename</replaceable></term>
- <listitem>
- <para>
- The <literal>\ir</> command is similar to <literal>\i</>, but resolves
- relative file names differently. When executing in interactive mode,
- the two commands behave identically. However, when invoked from a
- script, <literal>\ir</literal> interprets file names relative to the
- directory in which the script is located, rather than the current
- working directory.
- </para>
- </listitem>
- </varlistentry>
-
-
- <varlistentry>
<term><literal>\if</literal> <replaceable class="parameter">expression</replaceable></term>
<term><literal>\elif</literal> <replaceable class="parameter">expression</replaceable></term>
<term><literal>\else</literal></term>
@@ -2222,6 +2208,21 @@ SELECT
<varlistentry>
+ <term><literal>\ir</literal> or <literal>\include_relative</literal> <replaceable class="parameter">filename</replaceable></term>
+ <listitem>
+ <para>
+ The <literal>\ir</> command is similar to <literal>\i</>, but resolves
+ relative file names differently. When executing in interactive mode,
+ the two commands behave identically. However, when invoked from a
+ script, <literal>\ir</literal> interprets file names relative to the
+ directory in which the script is located, rather than the current
+ working directory.
+ </para>
+ </listitem>
+ </varlistentry>
+
+
+ <varlistentry>
<term><literal>\l[+]</literal> or <literal>\list[+] [ <link linkend="APP-PSQL-patterns"><replaceable class="parameter">pattern</replaceable></link> ]</literal></term>
<listitem>
<para>
diff --git a/doc/src/sgml/ref/set_transaction.sgml b/doc/src/sgml/ref/set_transaction.sgml
index 11c3bfefcc..e67c29e2ec 100755
--- a/doc/src/sgml/ref/set_transaction.sgml
+++ b/doc/src/sgml/ref/set_transaction.sgml
@@ -226,8 +226,8 @@ SET SESSION CHARACTERISTICS AS TRANSACTION <replaceable class="parameter">transa
BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SELECT pg_export_snapshot();
pg_export_snapshot
---------------------
- 000003A1-1
+---------------------
+ 00000003-0000001B-1
(1 row)
</programlisting>
@@ -237,7 +237,7 @@ SELECT pg_export_snapshot();
<programlisting>
BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-SET TRANSACTION SNAPSHOT '000003A1-1';
+SET TRANSACTION SNAPSHOT '00000003-0000001B-1';
</programlisting></para>
</refsect1>
diff --git a/doc/src/sgml/release-10.sgml b/doc/src/sgml/release-10.sgml
index 3ccac676ad..1918149333 100644
--- a/doc/src/sgml/release-10.sgml
+++ b/doc/src/sgml/release-10.sgml
@@ -129,18 +129,29 @@
<listitem>
<!--
+2017-01-16 [d43a619c6] Fix check_srf_call_placement() to handle VALUES cases co
2017-01-18 [69f4b9c85] Move targetlist SRF handling from expression evaluation
2017-01-18 [f13a1277a] Doc: improve documentation of new SRF-in-tlist behavior.
+2017-06-13 [0436f6bde] Disallow set-returning functions inside CASE or COALESCE
-->
<para>
- Allow <literal>COALESCE</> and <literal>CASE</> to return multiple
- rows when evaluating set-returning functions (Andres Freund).
+ Change the implementation of set-returning functions appearing in
+ a query's <literal>SELECT</> list (Andres Freund)
</para>
<para>
- This also prevents conditionals like <literal>CASE</> from
- controlling the execution of set-returning functions because
- set-returning functions are now executed earlier.
+ Set-returning functions are now evaluated before evaluation of scalar
+ expressions in the <literal>SELECT</> list, much as though they had
+ been placed in a <literal>LATERAL FROM</>-clause item. This allows
+ saner semantics for cases where multiple set-returning functions are
+ present. If they return different numbers of rows, the shorter results
+ are extended to match the longest result by adding nulls. Previously
+ the results were cycled until they all terminated at the same time,
+ producing a number of rows equal to the least common multiple of the
+ functions' periods. In addition, set-returning functions are now
+ disallowed within <literal>CASE</> and <literal>COALESCE</> constructs.
+ For more information
+ see <xref linkend="xfunc-sql-functions-returning-set">.
</para>
</listitem>
@@ -165,6 +176,26 @@
<listitem>
<!--
+2017-02-13 [8df9bd0b4] Change logical replication pg_hba.conf use
+-->
+ <para>
+ Change how logical replication
+ uses <link linkend="auth-pg-hba-conf"><filename>pg_hba.conf</filename></link>.
+ </para>
+
+ <para>
+ In previous releases, a logical replication connection required
+ the <literal>replication</literal> keyword in the database column. As
+ of this release, logical replication matches a normal entry with a
+ database name or keywords such as <literal>all</literal>. Physical
+ replication continues to use the <literal>replication</literal> keyword.
+ Since built-in logical replication is new in this release as well, this
+ change only affects users of third-party logical replication plugins.
+ </para>
+ </listitem>
+
+ <listitem>
+<!--
2017-01-14 [05cd12ed5] pg_ctl: Change default to wait for all actions
-->
<para>
@@ -505,7 +536,7 @@
</para>
<para>
- Specifically, a new <link linkend="SQL-ALTERINDEX"><command>CREATE
+ Specifically, a new <link linkend="SQL-CREATEINDEX"><command>CREATE
INDEX</></> option allows auto-summarizion of the
previous <acronym>BRIN</> page range when a new page
range is created.
@@ -1681,20 +1712,6 @@
</para>
</listitem>
- <listitem>
-<!--
-2017-01-16 [d43a619c6] Fix check_srf_call_placement() to handle VALUES cases co
--->
- <para>
- Fix <function>check_srf_call_placement()</> to handle
- <command>VALUES</> cases correctly (Tom Lane)
- </para>
-
- <para>
- NEED TEXT.
- </para>
- </listitem>
-
</itemizedlist>
</sect3>
diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml
index bbe7475fb5..8acdb0500e 100644
--- a/doc/src/sgml/xfunc.sgml
+++ b/doc/src/sgml/xfunc.sgml
@@ -302,7 +302,7 @@ SELECT add_em(1, 2) AS answer;
bank account:
<programlisting>
-CREATE FUNCTION tf1 (accountno integer, debit numeric) RETURNS integer AS $$
+CREATE FUNCTION tf1 (accountno integer, debit numeric) RETURNS numeric AS $$
UPDATE bank
SET balance = balance - debit
WHERE accountno = tf1.accountno;
@@ -333,7 +333,7 @@ SELECT tf1(17, 100.0);
is:
<programlisting>
-CREATE FUNCTION tf1 (accountno integer, debit numeric) RETURNS integer AS $$
+CREATE FUNCTION tf1 (accountno integer, debit numeric) RETURNS numeric AS $$
UPDATE bank
SET balance = balance - debit
WHERE accountno = tf1.accountno;
@@ -345,7 +345,7 @@ $$ LANGUAGE SQL;
The same thing could be done in one command using <literal>RETURNING</>:
<programlisting>
-CREATE FUNCTION tf1 (accountno integer, debit numeric) RETURNS integer AS $$
+CREATE FUNCTION tf1 (accountno integer, debit numeric) RETURNS numeric AS $$
UPDATE bank
SET balance = balance - debit
WHERE accountno = tf1.accountno
@@ -1004,6 +1004,29 @@ SELECT name, listchildren(name) FROM nodes;
</para>
<para>
+ <productname>PostgreSQL</>'s behavior for a set-returning function in a
+ query's select list is almost exactly the same as if the set-returning
+ function had been written in a <literal>LATERAL FROM</>-clause item
+ instead. For example,
+<programlisting>
+SELECT x, generate_series(1,5) AS g FROM tab;
+</programlisting>
+ is almost equivalent to
+<programlisting>
+SELECT x, g FROM tab, LATERAL generate_series(1,5) AS g;
+</programlisting>
+ It would be exactly the same, except that in this specific example,
+ the planner could choose to put <structname>g</> on the outside of the
+ nestloop join, since <structname>g</> has no actual lateral dependency
+ on <structname>tab</>. That would result in a different output row
+ order. Set-returning functions in the select list are always evaluated
+ as though they are on the inside of a nestloop join with the rest of
+ the <literal>FROM</> clause, so that the function(s) are run to
+ completion before the next row from the <literal>FROM</> clause is
+ considered.
+ </para>
+
+ <para>
If there is more than one set-returning function in the query's select
list, the behavior is similar to what you get from putting the functions
into a single <literal>LATERAL ROWS FROM( ... )</> <literal>FROM</>-clause
@@ -1034,32 +1057,19 @@ SELECT srf1(srf2(x), srf3(y)), srf4(srf5(z)) FROM tab;
</para>
<para>
- This behavior also means that set-returning functions will be evaluated
- even when it might appear that they should be skipped because of a
- conditional-evaluation construct, such as <literal>CASE</>
- or <literal>COALESCE</>. For example, consider
+ Set-returning functions cannot be used within conditional-evaluation
+ constructs, such as <literal>CASE</> or <literal>COALESCE</>. For
+ example, consider
<programlisting>
SELECT x, CASE WHEN x &gt; 0 THEN generate_series(1, 5) ELSE 0 END FROM tab;
</programlisting>
- It might seem that this should produce five repetitions of input
- rows that have <literal>x &gt; 0</>, and a single repetition of those
- that do not; but actually it will produce five repetitions of every
- input row. This is because <function>generate_series()</> is run first,
- and then the <literal>CASE</> expression is applied to its result rows.
- The behavior is thus comparable to
-<programlisting>
-SELECT x, CASE WHEN x &gt; 0 THEN g ELSE 0 END
- FROM tab, LATERAL generate_series(1,5) AS g;
-</programlisting>
- It would be exactly the same, except that in this specific example,
- the planner could choose to put <structname>g</> on the outside of the
- nestloop join, since <structname>g</> has no actual lateral dependency
- on <structname>tab</>. That would result in a different output row
- order. Set-returning functions in the select list are always evaluated
- as though they are on the inside of a nestloop join with the rest of
- the <literal>FROM</> clause, so that the function(s) are run to
- completion before the next row from the <literal>FROM</> clause is
- considered.
+ It might seem that this should produce five repetitions of input rows
+ that have <literal>x &gt; 0</>, and a single repetition of those that do
+ not; but actually, because <function>generate_series(1, 5)</> would be
+ run in an implicit <literal>LATERAL FROM</> item before
+ the <literal>CASE</> expression is ever evaluated, it would produce five
+ repetitions of every input row. To reduce confusion, such cases produce
+ a parse-time error instead.
</para>
<note>
@@ -1084,11 +1094,34 @@ SELECT x, CASE WHEN x &gt; 0 THEN g ELSE 0 END
functions. Also, nested set-returning functions did not work as
described above; instead, a set-returning function could have at most
one set-returning argument, and each nest of set-returning functions
- was run independently. The behavior for conditional execution
- (set-returning functions inside <literal>CASE</> etc) was different too.
+ was run independently. Also, conditional execution (set-returning
+ functions inside <literal>CASE</> etc) was previously allowed,
+ complicating things even more.
Use of the <literal>LATERAL</> syntax is recommended when writing
queries that need to work in older <productname>PostgreSQL</> versions,
because that will give consistent results across different versions.
+ If you have a query that is relying on conditional execution of a
+ set-returning function, you may be able to fix it by moving the
+ conditional test into a custom set-returning function. For example,
+<programlisting>
+SELECT x, CASE WHEN y &gt; 0 THEN generate_series(1, z) ELSE 5 END FROM tab;
+</programlisting>
+ could become
+<programlisting>
+CREATE FUNCTION case_generate_series(cond bool, start int, fin int, els int)
+ RETURNS SETOF int AS $$
+BEGIN
+ IF cond THEN
+ RETURN QUERY SELECT generate_series(start, fin);
+ ELSE
+ RETURN QUERY SELECT els;
+ END IF;
+END$$ LANGUAGE plpgsql;
+
+SELECT x, case_generate_series(y &gt; 0, 1, z, 5) FROM tab;
+</programlisting>
+ This formulation will work the same in all versions
+ of <productname>PostgreSQL</>.
</para>
</note>
</sect2>
diff --git a/src/Makefile.global.in b/src/Makefile.global.in
index dc7b801dff..fb06141e24 100644
--- a/src/Makefile.global.in
+++ b/src/Makefile.global.in
@@ -233,6 +233,8 @@ PTHREAD_LIBS = @PTHREAD_LIBS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
+override CPPFLAGS := $(CPPFLAGS) $(ICU_CFLAGS)
+
ifdef PGXS
override CPPFLAGS := -I$(includedir_server) -I$(includedir_internal) $(CPPFLAGS)
else # not PGXS
@@ -343,7 +345,7 @@ PROVE = @PROVE@
# extra perl modules in their own directory.
PG_PROVE_FLAGS = -I $(top_srcdir)/src/test/perl/ -I $(srcdir)
# User-supplied prove flags such as --verbose can be provided in PROVE_FLAGS.
-
+PROVE_FLAGS =
# prepend to path if already set, else just set it
define add_to_path
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 442a46140d..b0e89ace5e 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -190,7 +190,8 @@ brininsert(Relation idxRel, Datum *values, bool *nulls,
AutoVacuumRequestWork(AVW_BRINSummarizeRange,
RelationGetRelid(idxRel),
lastPageRange);
- brin_free_tuple(lastPageTuple);
+ else
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
}
brtup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off,
diff --git a/src/backend/access/brin/brin_pageops.c b/src/backend/access/brin/brin_pageops.c
index 1725591b05..3609c8ae7c 100644
--- a/src/backend/access/brin/brin_pageops.c
+++ b/src/backend/access/brin/brin_pageops.c
@@ -73,10 +73,8 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange,
{
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) newsz,
- (unsigned long) BrinMaxItemSize,
- RelationGetRelationName(idxrel))));
+ errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
+ newsz, BrinMaxItemSize, RelationGetRelationName(idxrel))));
return false; /* keep compiler quiet */
}
@@ -357,10 +355,8 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange,
{
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) itemsz,
- (unsigned long) BrinMaxItemSize,
- RelationGetRelationName(idxrel))));
+ errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
+ itemsz, BrinMaxItemSize, RelationGetRelationName(idxrel))));
return InvalidOffsetNumber; /* keep compiler quiet */
}
@@ -669,7 +665,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz,
BlockNumber oldblk;
BlockNumber newblk;
Page page;
- int freespace;
+ Size freespace;
/* callers must have checked */
Assert(itemsz <= BrinMaxItemSize);
@@ -825,10 +821,8 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz,
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) itemsz,
- (unsigned long) freespace,
- RelationGetRelationName(irel))));
+ errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
+ itemsz, freespace, RelationGetRelationName(irel))));
return InvalidBuffer; /* keep compiler quiet */
}
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index fc8b10ab39..e778cbcacd 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -179,13 +179,16 @@ brinSetHeapBlockItemptr(Buffer buf, BlockNumber pagesPerRange,
/*
* Fetch the BrinTuple for a given heap block.
*
- * The buffer containing the tuple is locked, and returned in *buf. As an
- * optimization, the caller can pass a pinned buffer *buf on entry, which will
- * avoid a pin-unpin cycle when the next tuple is on the same page as a
- * previous one.
+ * The buffer containing the tuple is locked, and returned in *buf. The
+ * returned tuple points to the shared buffer and must not be freed; if caller
+ * wants to use it after releasing the buffer lock, it must create its own
+ * palloc'ed copy. As an optimization, the caller can pass a pinned buffer
+ * *buf on entry, which will avoid a pin-unpin cycle when the next tuple is on
+ * the same page as a previous one.
*
* If no tuple is found for the given heap range, returns NULL. In that case,
- * *buf might still be updated, but it's not locked.
+ * *buf might still be updated (and pin must be released by caller), but it's
+ * not locked.
*
* The output tuple offset within the buffer is returned in *off, and its size
* is returned in *size.
diff --git a/src/backend/access/brin/brin_validate.c b/src/backend/access/brin/brin_validate.c
index dc23e00e89..b4acf2b6f3 100644
--- a/src/backend/access/brin/brin_validate.c
+++ b/src/backend/access/brin/brin_validate.c
@@ -113,8 +113,8 @@ brinvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator family \"%s\" contains function %s with invalid support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with invalid support number %d",
+ opfamilyname, "brin",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -129,8 +129,8 @@ brinvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator family \"%s\" contains function %s with wrong signature for support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d",
+ opfamilyname, "brin",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -151,8 +151,8 @@ brinvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator family \"%s\" contains operator %s with invalid strategy number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d",
+ opfamilyname, "brin",
format_operator(oprform->amopopr),
oprform->amopstrategy)));
result = false;
@@ -180,8 +180,8 @@ brinvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator family \"%s\" contains invalid ORDER BY specification for operator %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s",
+ opfamilyname, "brin",
format_operator(oprform->amopopr))));
result = false;
}
@@ -193,8 +193,8 @@ brinvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator family \"%s\" contains operator %s with wrong signature",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with wrong signature",
+ opfamilyname, "brin",
format_operator(oprform->amopopr))));
result = false;
}
@@ -231,8 +231,8 @@ brinvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator family \"%s\" is missing operator(s) for types %s and %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s is missing operator(s) for types %s and %s",
+ opfamilyname, "brin",
format_type_be(thisgroup->lefttype),
format_type_be(thisgroup->righttype))));
result = false;
@@ -241,8 +241,8 @@ brinvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator family \"%s\" is missing support function(s) for types %s and %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s is missing support function(s) for types %s and %s",
+ opfamilyname, "brin",
format_type_be(thisgroup->lefttype),
format_type_be(thisgroup->righttype))));
result = false;
@@ -254,8 +254,8 @@ brinvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator class \"%s\" is missing operator(s)",
- opclassname)));
+ errmsg("operator class \"%s\" of access method %s is missing operator(s)",
+ opclassname, "brin")));
result = false;
}
for (i = 1; i <= BRIN_MANDATORY_NPROCS; i++)
@@ -265,8 +265,8 @@ brinvalidate(Oid opclassoid)
continue; /* got it */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator class \"%s\" is missing support function %d",
- opclassname, i)));
+ errmsg("operator class \"%s\" of access method %s is missing support function %d",
+ opclassname, "brin", i)));
result = false;
}
diff --git a/src/backend/access/gin/ginvalidate.c b/src/backend/access/gin/ginvalidate.c
index 0d2847456e..4c8e563545 100644
--- a/src/backend/access/gin/ginvalidate.c
+++ b/src/backend/access/gin/ginvalidate.c
@@ -90,8 +90,8 @@ ginvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator family \"%s\" contains support procedure %s with cross-type registration",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains support procedure %s with different left and right input types",
+ opfamilyname, "gin",
format_procedure(procform->amproc))));
result = false;
}
@@ -146,8 +146,8 @@ ginvalidate(Oid opclassoid)
default:
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator family \"%s\" contains function %s with invalid support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with invalid support number %d",
+ opfamilyname, "gin",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -158,8 +158,8 @@ ginvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator family \"%s\" contains function %s with wrong signature for support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d",
+ opfamilyname, "gin",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -177,8 +177,8 @@ ginvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator family \"%s\" contains operator %s with invalid strategy number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d",
+ opfamilyname, "gin",
format_operator(oprform->amopopr),
oprform->amopstrategy)));
result = false;
@@ -190,8 +190,8 @@ ginvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator family \"%s\" contains invalid ORDER BY specification for operator %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s",
+ opfamilyname, "gin",
format_operator(oprform->amopopr))));
result = false;
}
@@ -203,8 +203,8 @@ ginvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator family \"%s\" contains operator %s with wrong signature",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with wrong signature",
+ opfamilyname, "gin",
format_operator(oprform->amopopr))));
result = false;
}
@@ -244,8 +244,8 @@ ginvalidate(Oid opclassoid)
continue; /* don't need both, see check below loop */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator class \"%s\" is missing support function %d",
- opclassname, i)));
+ errmsg("operator class \"%s\" of access method %s is missing support function %d",
+ opclassname, "gin", i)));
result = false;
}
if (!opclassgroup ||
@@ -254,8 +254,8 @@ ginvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator class \"%s\" is missing support function %d or %d",
- opclassname,
+ errmsg("operator class \"%s\" of access method %s is missing support function %d or %d",
+ opclassname, "gin",
GIN_CONSISTENT_PROC, GIN_TRICONSISTENT_PROC)));
result = false;
}
diff --git a/src/backend/access/gist/gistvalidate.c b/src/backend/access/gist/gistvalidate.c
index 585c92be26..42254c5f15 100644
--- a/src/backend/access/gist/gistvalidate.c
+++ b/src/backend/access/gist/gistvalidate.c
@@ -90,8 +90,8 @@ gistvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator family \"%s\" contains support procedure %s with cross-type registration",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains support procedure %s with different left and right input types",
+ opfamilyname, "gist",
format_procedure(procform->amproc))));
result = false;
}
@@ -143,8 +143,8 @@ gistvalidate(Oid opclassoid)
default:
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator family \"%s\" contains function %s with invalid support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with invalid support number %d",
+ opfamilyname, "gist",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -155,8 +155,8 @@ gistvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator family \"%s\" contains function %s with wrong signature for support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d",
+ opfamilyname, "gist",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -175,8 +175,8 @@ gistvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator family \"%s\" contains operator %s with invalid strategy number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d",
+ opfamilyname, "gist",
format_operator(oprform->amopopr),
oprform->amopstrategy)));
result = false;
@@ -193,8 +193,8 @@ gistvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator family \"%s\" contains unsupported ORDER BY specification for operator %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains unsupported ORDER BY specification for operator %s",
+ opfamilyname, "gist",
format_operator(oprform->amopopr))));
result = false;
}
@@ -204,8 +204,8 @@ gistvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator family \"%s\" contains incorrect ORDER BY opfamily specification for operator %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains incorrect ORDER BY opfamily specification for operator %s",
+ opfamilyname, "gist",
format_operator(oprform->amopopr))));
result = false;
}
@@ -223,8 +223,8 @@ gistvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator family \"%s\" contains operator %s with wrong signature",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with wrong signature",
+ opfamilyname, "gist",
format_operator(oprform->amopopr))));
result = false;
}
@@ -262,8 +262,8 @@ gistvalidate(Oid opclassoid)
continue; /* optional methods */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator class \"%s\" is missing support function %d",
- opclassname, i)));
+ errmsg("operator class \"%s\" of access method %s is missing support function %d",
+ opclassname, "gist", i)));
result = false;
}
diff --git a/src/backend/access/hash/hashvalidate.c b/src/backend/access/hash/hashvalidate.c
index f914c015bd..30b29cb100 100644
--- a/src/backend/access/hash/hashvalidate.c
+++ b/src/backend/access/hash/hashvalidate.c
@@ -96,8 +96,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" contains support procedure %s with cross-type registration",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains support procedure %s with different left and right input types",
+ opfamilyname, "hash",
format_procedure(procform->amproc))));
result = false;
}
@@ -111,8 +111,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" contains function %s with wrong signature for support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d",
+ opfamilyname, "hash",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -128,8 +128,8 @@ hashvalidate(Oid opclassoid)
default:
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" contains function %s with invalid support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with invalid support number %d",
+ opfamilyname, "hash",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -149,8 +149,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" contains operator %s with invalid strategy number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d",
+ opfamilyname, "hash",
format_operator(oprform->amopopr),
oprform->amopstrategy)));
result = false;
@@ -162,8 +162,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" contains invalid ORDER BY specification for operator %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s",
+ opfamilyname, "hash",
format_operator(oprform->amopopr))));
result = false;
}
@@ -175,8 +175,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" contains operator %s with wrong signature",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with wrong signature",
+ opfamilyname, "hash",
format_operator(oprform->amopopr))));
result = false;
}
@@ -187,8 +187,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" lacks support function for operator %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s lacks support function for operator %s",
+ opfamilyname, "hash",
format_operator(oprform->amopopr))));
result = false;
}
@@ -215,8 +215,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" is missing operator(s) for types %s and %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s is missing operator(s) for types %s and %s",
+ opfamilyname, "hash",
format_type_be(thisgroup->lefttype),
format_type_be(thisgroup->righttype))));
result = false;
@@ -229,8 +229,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator class \"%s\" is missing operator(s)",
- opclassname)));
+ errmsg("operator class \"%s\" of access method %s is missing operator(s)",
+ opclassname, "hash")));
result = false;
}
@@ -245,8 +245,8 @@ hashvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" is missing cross-type operator(s)",
- opfamilyname)));
+ errmsg("operator family \"%s\" of access method %s is missing cross-type operator(s)",
+ opfamilyname, "hash")));
result = false;
}
diff --git a/src/backend/access/nbtree/nbtvalidate.c b/src/backend/access/nbtree/nbtvalidate.c
index 88e33f54cd..5aae53ac68 100644
--- a/src/backend/access/nbtree/nbtvalidate.c
+++ b/src/backend/access/nbtree/nbtvalidate.c
@@ -98,8 +98,8 @@ btvalidate(Oid opclassoid)
default:
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator family \"%s\" contains function %s with invalid support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with invalid support number %d",
+ opfamilyname, "btree",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -110,8 +110,8 @@ btvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator family \"%s\" contains function %s with wrong signature for support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d",
+ opfamilyname, "btree",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -130,8 +130,8 @@ btvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator family \"%s\" contains operator %s with invalid strategy number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d",
+ opfamilyname, "btree",
format_operator(oprform->amopopr),
oprform->amopstrategy)));
result = false;
@@ -143,8 +143,8 @@ btvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator family \"%s\" contains invalid ORDER BY specification for operator %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s",
+ opfamilyname, "btree",
format_operator(oprform->amopopr))));
result = false;
}
@@ -156,8 +156,8 @@ btvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator family \"%s\" contains operator %s with wrong signature",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with wrong signature",
+ opfamilyname, "btree",
format_operator(oprform->amopopr))));
result = false;
}
@@ -198,8 +198,8 @@ btvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator family \"%s\" is missing operator(s) for types %s and %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s is missing operator(s) for types %s and %s",
+ opfamilyname, "btree",
format_type_be(thisgroup->lefttype),
format_type_be(thisgroup->righttype))));
result = false;
@@ -208,8 +208,8 @@ btvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator family \"%s\" is missing support function for types %s and %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s is missing support function for types %s and %s",
+ opfamilyname, "btree",
format_type_be(thisgroup->lefttype),
format_type_be(thisgroup->righttype))));
result = false;
@@ -222,8 +222,8 @@ btvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator class \"%s\" is missing operator(s)",
- opclassname)));
+ errmsg("operator class \"%s\" of access method %s is missing operator(s)",
+ opclassname, "btree")));
result = false;
}
@@ -239,8 +239,8 @@ btvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operator family \"%s\" is missing cross-type operator(s)",
- opfamilyname)));
+ errmsg("operator family \"%s\" of access method %s is missing cross-type operator(s)",
+ opfamilyname, "btree")));
result = false;
}
diff --git a/src/backend/access/spgist/spgvalidate.c b/src/backend/access/spgist/spgvalidate.c
index 1bc5bce72e..157cf2a028 100644
--- a/src/backend/access/spgist/spgvalidate.c
+++ b/src/backend/access/spgist/spgvalidate.c
@@ -90,8 +90,8 @@ spgvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator family \"%s\" contains support procedure %s with cross-type registration",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains support procedure %s with different left and right input types",
+ opfamilyname, "spgist",
format_procedure(procform->amproc))));
result = false;
}
@@ -113,8 +113,8 @@ spgvalidate(Oid opclassoid)
default:
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator family \"%s\" contains function %s with invalid support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with invalid support number %d",
+ opfamilyname, "spgist",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -125,8 +125,8 @@ spgvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator family \"%s\" contains function %s with wrong signature for support number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d",
+ opfamilyname, "spgist",
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
@@ -144,8 +144,8 @@ spgvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator family \"%s\" contains operator %s with invalid strategy number %d",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d",
+ opfamilyname, "spgist",
format_operator(oprform->amopopr),
oprform->amopstrategy)));
result = false;
@@ -157,8 +157,8 @@ spgvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator family \"%s\" contains invalid ORDER BY specification for operator %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s",
+ opfamilyname, "spgist",
format_operator(oprform->amopopr))));
result = false;
}
@@ -170,8 +170,8 @@ spgvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator family \"%s\" contains operator %s with wrong signature",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s contains operator %s with wrong signature",
+ opfamilyname, "spgist",
format_operator(oprform->amopopr))));
result = false;
}
@@ -198,8 +198,8 @@ spgvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator family \"%s\" is missing operator(s) for types %s and %s",
- opfamilyname,
+ errmsg("operator family \"%s\" of access method %s is missing operator(s) for types %s and %s",
+ opfamilyname, "spgist",
format_type_be(thisgroup->lefttype),
format_type_be(thisgroup->righttype))));
result = false;
@@ -218,8 +218,8 @@ spgvalidate(Oid opclassoid)
continue; /* got it */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator family \"%s\" is missing support function %d for type %s",
- opfamilyname, i,
+ errmsg("operator family \"%s\" of access method %s is missing support function %d for type %s",
+ opfamilyname, "spgist", i,
format_type_be(thisgroup->lefttype))));
result = false;
}
@@ -231,8 +231,8 @@ spgvalidate(Oid opclassoid)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("spgist operator class \"%s\" is missing operator(s)",
- opclassname)));
+ errmsg("operator class \"%s\" of access method %s is missing operator(s)",
+ opclassname, "spgist")));
result = false;
}
diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c
index d3585c8449..afb54ada9f 100644
--- a/src/backend/access/transam/parallel.c
+++ b/src/backend/access/transam/parallel.c
@@ -393,12 +393,12 @@ ReinitializeParallelDSM(ParallelContext *pcxt)
}
/* Reset a few bits of fixed parallel state to a clean state. */
- fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
+ fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
fps->last_xlog_end = 0;
/* Recreate error queues. */
error_queue_space =
- shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE);
+ shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, false);
for (i = 0; i < pcxt->nworkers; ++i)
{
char *start;
@@ -528,16 +528,16 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
if (!anyone_alive)
break;
- WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1,
+ WaitLatch(MyLatch, WL_LATCH_SET, -1,
WAIT_EVENT_PARALLEL_FINISH);
- ResetLatch(&MyProc->procLatch);
+ ResetLatch(MyLatch);
}
if (pcxt->toc != NULL)
{
FixedParallelState *fps;
- fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
+ fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
if (fps->last_xlog_end > XactLastRecEnd)
XactLastRecEnd = fps->last_xlog_end;
}
@@ -974,8 +974,7 @@ ParallelWorkerMain(Datum main_arg)
errmsg("invalid magic number in dynamic shared memory segment")));
/* Look up fixed parallel state. */
- fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
- Assert(fps != NULL);
+ fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED, false);
MyFixedParallelState = fps;
/*
@@ -984,7 +983,7 @@ ParallelWorkerMain(Datum main_arg)
* errors that happen here will not be reported back to the process that
* requested that this worker be launched.
*/
- error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
+ error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE, false);
mq = (shm_mq *) (error_queue_space +
ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
shm_mq_set_sender(mq, MyProc);
@@ -1028,8 +1027,7 @@ ParallelWorkerMain(Datum main_arg)
* this before restoring GUCs, because the libraries might define custom
* variables.
*/
- libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
- Assert(libraryspace != NULL);
+ libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY, false);
RestoreLibraryState(libraryspace);
/*
@@ -1037,8 +1035,7 @@ ParallelWorkerMain(Datum main_arg)
* loading an additional library, though most likely the entry point is in
* the core backend or in a library we just loaded.
*/
- entrypointstate = shm_toc_lookup(toc, PARALLEL_KEY_ENTRYPOINT);
- Assert(entrypointstate != NULL);
+ entrypointstate = shm_toc_lookup(toc, PARALLEL_KEY_ENTRYPOINT, false);
library_name = entrypointstate;
function_name = entrypointstate + strlen(library_name) + 1;
@@ -1060,30 +1057,26 @@ ParallelWorkerMain(Datum main_arg)
SetClientEncoding(GetDatabaseEncoding());
/* Restore GUC values from launching backend. */
- gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
- Assert(gucspace != NULL);
+ gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC, false);
StartTransactionCommand();
RestoreGUCState(gucspace);
CommitTransactionCommand();
/* Crank up a transaction state appropriate to a parallel worker. */
- tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
+ tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE, false);
StartParallelWorkerTransaction(tstatespace);
/* Restore combo CID state. */
- combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
- Assert(combocidspace != NULL);
+ combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID, false);
RestoreComboCIDState(combocidspace);
/* Restore transaction snapshot. */
- tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
- Assert(tsnapspace != NULL);
+ tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT, false);
RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
fps->parallel_master_pgproc);
/* Restore active snapshot. */
- asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
- Assert(asnapspace != NULL);
+ asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, false);
PushActiveSnapshot(RestoreSnapshot(asnapspace));
/*
diff --git a/src/backend/access/transam/recovery.conf.sample b/src/backend/access/transam/recovery.conf.sample
index acb81afd66..37fbaedaa5 100644
--- a/src/backend/access/transam/recovery.conf.sample
+++ b/src/backend/access/transam/recovery.conf.sample
@@ -66,11 +66,11 @@
# If you want to stop rollforward at a specific point, you
# must set a recovery target.
#
-# You may set a recovery target either by transactionId, by name,
-# or by timestamp or by WAL location (LSN) or by barrier. Recovery may either
-# include or exclude the transaction(s) with the recovery target value (ie,
-# stop either just after or just before the given target, respectively). In
-# case of barrier, the recovery stops exactly at that point.
+# You may set a recovery target either by transactionId, by name, by
+# timestamp or by WAL location (LSN) or by barrier. Recovery may either include or
+# exclude the transaction(s) with the recovery target value (i.e.,
+# stop either just after or just before the given target,
+# respectively). In case of barrier, the recovery stops exactly at that point.
#
#
#recovery_target_name = '' # e.g. 'daily backup 2011-01-26'
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index a0390bf25b..cd452c5139 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -10,6 +10,7 @@
* The tree can easily be walked from child to parent, but not in the
* opposite direction.
*
+ * This code is based on xact.c, but the robustness requirements
* are completely different from pg_xact, because we only need to remember
* pg_subtrans information for currently-open transactions. Thus, there is
* no need to preserve data over a crash and restart.
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index f6986d37db..3c1df5166e 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -204,7 +204,10 @@ typedef struct TwoPhaseStateData
static TwoPhaseStateData *TwoPhaseState;
/*
- * Global transaction entry currently locked by us, if any.
+ * Global transaction entry currently locked by us, if any. Note that any
+ * access to the entry pointed to by this variable must be protected by
+ * TwoPhaseStateLock, though obviously the pointer itself doesn't need to be
+ * (since it's just local memory).
*/
static GlobalTransaction MyLockedGxact = NULL;
@@ -347,18 +350,13 @@ AtAbort_Twophase(void)
* resources held by the transaction yet. In those cases, the in-memory
* state can be wrong, but it's too late to back out.
*/
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
if (!MyLockedGxact->valid)
- {
RemoveGXact(MyLockedGxact);
- }
else
- {
- LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
-
MyLockedGxact->locking_backend = InvalidBackendId;
+ LWLockRelease(TwoPhaseStateLock);
- LWLockRelease(TwoPhaseStateLock);
- }
MyLockedGxact = NULL;
}
@@ -463,6 +461,8 @@ MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid, const char *gid,
PGXACT *pgxact;
int i;
+ Assert(LWLockHeldByMeInMode(TwoPhaseStateLock, LW_EXCLUSIVE));
+
Assert(gxact != NULL);
proc = &ProcGlobal->allProcs[gxact->pgprocno];
pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
@@ -539,15 +539,19 @@ GXactLoadSubxactData(GlobalTransaction gxact, int nsubxacts,
/*
* MarkAsPrepared
* Mark the GXACT as fully valid, and enter it into the global ProcArray.
+ *
+ * lock_held indicates whether caller already holds TwoPhaseStateLock.
*/
static void
-MarkAsPrepared(GlobalTransaction gxact)
+MarkAsPrepared(GlobalTransaction gxact, bool lock_held)
{
/* Lock here may be overkill, but I'm not convinced of that ... */
- LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
+ if (!lock_held)
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
Assert(!gxact->valid);
gxact->valid = true;
- LWLockRelease(TwoPhaseStateLock);
+ if (!lock_held)
+ LWLockRelease(TwoPhaseStateLock);
/*
* Put it into the global ProcArray so TransactionIdIsInProgress considers
@@ -652,7 +656,7 @@ RemoveGXact(GlobalTransaction gxact)
{
int i;
- LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
+ Assert(LWLockHeldByMeInMode(TwoPhaseStateLock, LW_EXCLUSIVE));
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
@@ -666,14 +670,10 @@ RemoveGXact(GlobalTransaction gxact)
gxact->next = TwoPhaseState->freeGXacts;
TwoPhaseState->freeGXacts = gxact;
- LWLockRelease(TwoPhaseStateLock);
-
return;
}
}
- LWLockRelease(TwoPhaseStateLock);
-
elog(ERROR, "failed to find %p in GlobalTransaction array", gxact);
}
@@ -1147,7 +1147,7 @@ EndPrepare(GlobalTransaction gxact)
* the xact crashed. Instead we have a window where the same XID appears
* twice in ProcArray, which is OK.
*/
- MarkAsPrepared(gxact);
+ MarkAsPrepared(gxact, false);
/*
* Now we can mark ourselves as out of the commit critical section: a
@@ -1540,7 +1540,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
if (gxact->ondisk)
RemoveTwoPhaseFile(xid, true);
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
RemoveGXact(gxact);
+ LWLockRelease(TwoPhaseStateLock);
MyLockedGxact = NULL;
pfree(buf);
@@ -1768,6 +1770,7 @@ restoreTwoPhaseData(void)
struct dirent *clde;
cldir = AllocateDir(TWOPHASE_DIR);
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
while ((clde = ReadDir(cldir, TWOPHASE_DIR)) != NULL)
{
if (strlen(clde->d_name) == 8 &&
@@ -1786,6 +1789,7 @@ restoreTwoPhaseData(void)
PrepareRedoAdd(buf, InvalidXLogRecPtr, InvalidXLogRecPtr);
}
}
+ LWLockRelease(TwoPhaseStateLock);
FreeDir(cldir);
}
@@ -1826,7 +1830,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
int allocsize = 0;
int i;
- LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
TransactionId xid;
@@ -1901,7 +1905,7 @@ StandbyRecoverPreparedTransactions(void)
{
int i;
- LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
TransactionId xid;
@@ -1927,7 +1931,8 @@ StandbyRecoverPreparedTransactions(void)
* Scan the shared memory entries of TwoPhaseState and reload the state for
* each prepared transaction (reacquire locks, etc).
*
- * This is run during database startup.
+ * This is run at the end of recovery, but before we allow backends to write
+ * WAL.
*
* At the end of recovery the way we take snapshots will change. We now need
* to mark all running transactions with their full SubTransSetParent() info
@@ -1941,9 +1946,7 @@ RecoverPreparedTransactions(void)
{
int i;
- /*
- * Don't need a lock in the recovery phase.
- */
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
TransactionId xid;
@@ -1989,7 +1992,6 @@ RecoverPreparedTransactions(void)
* Recreate its GXACT and dummy PGPROC. But, check whether it was
* added in redo and already has a shmem entry for it.
*/
- LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
MarkAsPreparingGuts(gxact, xid, gid,
hdr->prepared_at,
hdr->owner, hdr->database);
@@ -1997,13 +1999,13 @@ RecoverPreparedTransactions(void)
/* recovered, so reset the flag for entries generated by redo */
gxact->inredo = false;
- LWLockRelease(TwoPhaseStateLock);
-
GXactLoadSubxactData(gxact, hdr->nsubxacts, subxids);
- MarkAsPrepared(gxact);
+ MarkAsPrepared(gxact, true);
+
+ LWLockRelease(TwoPhaseStateLock);
/*
- * Recover other state (notably locks) using resource managers
+ * Recover other state (notably locks) using resource managers.
*/
ProcessRecords(bufptr, xid, twophase_recover_callbacks);
@@ -2022,7 +2024,11 @@ RecoverPreparedTransactions(void)
PostPrepare_Twophase();
pfree(buf);
+
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
}
+
+ LWLockRelease(TwoPhaseStateLock);
}
/*
@@ -2048,6 +2054,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
TwoPhaseFileHeader *hdr;
int i;
+ Assert(LWLockHeldByMeInMode(TwoPhaseStateLock, LW_EXCLUSIVE));
+
if (!fromdisk)
Assert(prepare_start_lsn != InvalidXLogRecPtr);
@@ -2064,8 +2072,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
else
{
ereport(WARNING,
- (errmsg("removing stale two-phase state from"
- " shared memory for \"%u\"", xid)));
+ (errmsg("removing stale two-phase state from shared memory for \"%u\"",
+ xid)));
PrepareRedoRemove(xid, true);
}
return NULL;
@@ -2342,6 +2350,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
const char *gid;
GlobalTransaction gxact;
+ Assert(LWLockHeldByMeInMode(TwoPhaseStateLock, LW_EXCLUSIVE));
Assert(RecoveryInProgress());
bufptr = buf + MAXALIGN(sizeof(TwoPhaseFileHeader));
@@ -2358,7 +2367,6 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
* that it got added in the redo phase
*/
- LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
/* Get a free gxact from the freelist */
if (TwoPhaseState->freeGXacts == NULL)
ereport(ERROR,
@@ -2384,17 +2392,17 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
Assert(TwoPhaseState->numPrepXacts < max_prepared_xacts);
TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact;
- LWLockRelease(TwoPhaseStateLock);
-
- elog(DEBUG2, "Adding 2PC data to shared memory %u", gxact->xid);
+ elog(DEBUG2, "added 2PC data in shared memory for transaction %u", gxact->xid);
}
/*
* PrepareRedoRemove
*
- * Remove the corresponding gxact entry from TwoPhaseState. Also
- * remove the 2PC file if a prepared transaction was saved via
- * an earlier checkpoint.
+ * Remove the corresponding gxact entry from TwoPhaseState. Also remove
+ * the 2PC file if a prepared transaction was saved via an earlier checkpoint.
+ *
+ * Caller must hold TwoPhaseStateLock in exclusive mode, because TwoPhaseState
+ * is updated.
*/
void
PrepareRedoRemove(TransactionId xid, bool giveWarning)
@@ -2403,9 +2411,9 @@ PrepareRedoRemove(TransactionId xid, bool giveWarning)
int i;
bool found = false;
+ Assert(LWLockHeldByMeInMode(TwoPhaseStateLock, LW_EXCLUSIVE));
Assert(RecoveryInProgress());
- LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
gxact = TwoPhaseState->prepXacts[i];
@@ -2417,7 +2425,6 @@ PrepareRedoRemove(TransactionId xid, bool giveWarning)
break;
}
}
- LWLockRelease(TwoPhaseStateLock);
/*
* Just leave if there is nothing, this is expected during WAL replay.
@@ -2428,7 +2435,7 @@ PrepareRedoRemove(TransactionId xid, bool giveWarning)
/*
* And now we can clean up any files we may have left.
*/
- elog(DEBUG2, "Removing 2PC data from shared memory %u", xid);
+ elog(DEBUG2, "removing 2PC data for transaction %u", xid);
if (gxact->ondisk)
RemoveTwoPhaseFile(xid, giveWarning);
RemoveGXact(gxact);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 77666c4b80..481231d13a 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -6215,6 +6215,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
int i;
TimestampTz commit_time;
+ Assert(TransactionIdIsValid(xid));
+
max_xid = TransactionIdLatest(xid, parsed->nsubxacts, parsed->subxacts);
/*
@@ -6382,6 +6384,8 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
int i;
TransactionId max_xid;
+ Assert(TransactionIdIsValid(xid));
+
/*
* Make sure nextXid is beyond any XID mentioned in the record.
*
@@ -6462,51 +6466,49 @@ xact_redo(XLogReaderState *record)
/* Backup blocks are not used in xact records */
Assert(!XLogRecHasAnyBlockRefs(record));
- if (info == XLOG_XACT_COMMIT || info == XLOG_XACT_COMMIT_PREPARED)
+ if (info == XLOG_XACT_COMMIT)
{
xl_xact_commit *xlrec = (xl_xact_commit *) XLogRecGetData(record);
xl_xact_parsed_commit parsed;
- ParseCommitRecord(XLogRecGetInfo(record), xlrec,
- &parsed);
+ ParseCommitRecord(XLogRecGetInfo(record), xlrec, &parsed);
+ xact_redo_commit(&parsed, XLogRecGetXid(record),
+ record->EndRecPtr, XLogRecGetOrigin(record));
+ }
+ else if (info == XLOG_XACT_COMMIT_PREPARED)
+ {
+ xl_xact_commit *xlrec = (xl_xact_commit *) XLogRecGetData(record);
+ xl_xact_parsed_commit parsed;
- if (info == XLOG_XACT_COMMIT)
- {
- Assert(!TransactionIdIsValid(parsed.twophase_xid));
- xact_redo_commit(&parsed, XLogRecGetXid(record),
- record->EndRecPtr, XLogRecGetOrigin(record));
- }
- else
- {
- Assert(TransactionIdIsValid(parsed.twophase_xid));
- xact_redo_commit(&parsed, parsed.twophase_xid,
- record->EndRecPtr, XLogRecGetOrigin(record));
+ ParseCommitRecord(XLogRecGetInfo(record), xlrec, &parsed);
+ xact_redo_commit(&parsed, parsed.twophase_xid,
+ record->EndRecPtr, XLogRecGetOrigin(record));
- /* Delete TwoPhaseState gxact entry and/or 2PC file. */
- PrepareRedoRemove(parsed.twophase_xid, false);
- }
+ /* Delete TwoPhaseState gxact entry and/or 2PC file. */
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
+ PrepareRedoRemove(parsed.twophase_xid, false);
+ LWLockRelease(TwoPhaseStateLock);
}
- else if (info == XLOG_XACT_ABORT || info == XLOG_XACT_ABORT_PREPARED)
+ else if (info == XLOG_XACT_ABORT)
{
xl_xact_abort *xlrec = (xl_xact_abort *) XLogRecGetData(record);
xl_xact_parsed_abort parsed;
- ParseAbortRecord(XLogRecGetInfo(record), xlrec,
- &parsed);
+ ParseAbortRecord(XLogRecGetInfo(record), xlrec, &parsed);
+ xact_redo_abort(&parsed, XLogRecGetXid(record));
+ }
+ else if (info == XLOG_XACT_ABORT_PREPARED)
+ {
+ xl_xact_abort *xlrec = (xl_xact_abort *) XLogRecGetData(record);
+ xl_xact_parsed_abort parsed;
- if (info == XLOG_XACT_ABORT)
- {
- Assert(!TransactionIdIsValid(parsed.twophase_xid));
- xact_redo_abort(&parsed, XLogRecGetXid(record));
- }
- else
- {
- Assert(TransactionIdIsValid(parsed.twophase_xid));
- xact_redo_abort(&parsed, parsed.twophase_xid);
+ ParseAbortRecord(XLogRecGetInfo(record), xlrec, &parsed);
+ xact_redo_abort(&parsed, parsed.twophase_xid);
- /* Delete TwoPhaseState gxact entry and/or 2PC file. */
- PrepareRedoRemove(parsed.twophase_xid, false);
- }
+ /* Delete TwoPhaseState gxact entry and/or 2PC file. */
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
+ PrepareRedoRemove(parsed.twophase_xid, false);
+ LWLockRelease(TwoPhaseStateLock);
}
else if (info == XLOG_XACT_PREPARE)
{
@@ -6514,9 +6516,11 @@ xact_redo(XLogReaderState *record)
* Store xid and start/end pointers of the WAL record in TwoPhaseState
* gxact entry.
*/
+ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
PrepareRedoAdd(XLogRecGetData(record),
record->ReadRecPtr,
record->EndRecPtr);
+ LWLockRelease(TwoPhaseStateLock);
}
else if (info == XLOG_XACT_ASSIGNMENT)
{
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b29f283e6a..a07bb572ea 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -8402,6 +8402,11 @@ ShutdownXLOG(int code, Datum arg)
(errmsg("shutting down")));
/*
+ * Signal walsenders to move to stopping state.
+ */
+ WalSndInitStopping();
+
+ /*
* Wait for WAL senders to be in stopping state. This prevents commands
* from writing new WAL.
*/
diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c
index b3223d691d..fb905c0a1c 100644
--- a/src/backend/access/transam/xlogfuncs.c
+++ b/src/backend/access/transam/xlogfuncs.c
@@ -449,7 +449,7 @@ pg_last_wal_replay_lsn(PG_FUNCTION_ARGS)
/*
* Compute an xlog file name and decimal byte offset given a WAL location,
- * such as is returned by pg_stop_backup() or pg_xlog_switch().
+ * such as is returned by pg_stop_backup() or pg_switch_wal().
*
* Note that a location exactly at a segment boundary is taken to be in
* the previous segment. This is usually the right thing, since the
@@ -515,7 +515,7 @@ pg_walfile_name_offset(PG_FUNCTION_ARGS)
/*
* Compute an xlog file name given a WAL location,
- * such as is returned by pg_stop_backup() or pg_xlog_switch().
+ * such as is returned by pg_stop_backup() or pg_switch_wal().
*/
Datum
pg_walfile_name(PG_FUNCTION_ARGS)
diff --git a/src/backend/bootstrap/bootparse.y b/src/backend/bootstrap/bootparse.y
index de3695c7e0..2e1fef0350 100644
--- a/src/backend/bootstrap/bootparse.y
+++ b/src/backend/bootstrap/bootparse.y
@@ -323,6 +323,7 @@ Boot_DeclareIndexStmt:
$4,
false,
false,
+ false,
true, /* skip_build */
false);
do_end();
@@ -366,6 +367,7 @@ Boot_DeclareUniqueIndexStmt:
$5,
false,
false,
+ false,
true, /* skip_build */
false);
do_end();
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index c2274ae2ff..7be0e30a74 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -878,6 +878,11 @@ InsertOneNull(int i)
{
elog(DEBUG4, "inserting column %d NULL", i);
Assert(i >= 0 && i < MAXATTR);
+ if (boot_reldesc->rd_att->attrs[i]->attnotnull)
+ elog(ERROR,
+ "NULL value specified for not-null column \"%s\" of relation \"%s\"",
+ NameStr(boot_reldesc->rd_att->attrs[i]->attname),
+ RelationGetRelationName(boot_reldesc));
values[i] = PointerGetDatum(NULL);
Nulls[i] = true;
}
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 387a3be701..304e3c4bc3 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -2738,7 +2738,7 @@ ExecGrant_Largeobject(InternalGrant *istmt)
tuple = systable_getnext(scan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "cache lookup failed for large object %u", loid);
+ elog(ERROR, "could not find tuple for large object %u", loid);
form_lo_meta = (Form_pg_largeobject_metadata) GETSTRUCT(tuple);
@@ -5503,7 +5503,7 @@ recordExtObjInitPriv(Oid objoid, Oid classoid)
tuple = systable_getnext(scan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "cache lookup failed for large object %u", objoid);
+ elog(ERROR, "could not find tuple for large object %u", objoid);
aclDatum = heap_getattr(tuple,
Anum_pg_largeobject_metadata_lomacl,
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 2e8cd10ebb..0f7ffef6d0 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -39,6 +39,7 @@
#include "catalog/pg_shseclabel.h"
#include "catalog/pg_subscription.h"
#include "catalog/pg_tablespace.h"
+#include "catalog/pg_type.h"
#include "catalog/toasting.h"
#include "catalog/pgxc_node.h"
#include "catalog/pgxc_group.h"
@@ -357,6 +358,14 @@ GetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn)
ScanKeyData key;
bool collides;
+ /*
+ * We should never be asked to generate a new pg_type OID during
+ * pg_upgrade; doing so would risk collisions with the OIDs it wants to
+ * assign. Hitting this assert means there's some path where we failed to
+ * ensure that a type OID is determined by commands in the dump script.
+ */
+ Assert(!IsBinaryUpgrade || RelationGetRelid(relation) != TypeRelationId);
+
InitDirtySnapshot(SnapshotDirty);
/* Generate new OIDs until we find one not in the table */
@@ -408,6 +417,13 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
bool collides;
BackendId backend;
+ /*
+ * If we ever get here during pg_upgrade, there's something wrong; all
+ * relfilenode assignments during a binary-upgrade run should be
+ * determined by commands in the dump script.
+ */
+ Assert(!IsBinaryUpgrade);
+
switch (relpersistence)
{
case RELPERSISTENCE_TEMP:
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index ea3d2ade21..a1b7bd2f72 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -3541,9 +3541,14 @@ StorePartitionKey(Relation rel,
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
- referenced.classId = CollationRelationId;
- referenced.objectId = partcollation[i];
- referenced.objectSubId = 0;
+ /* The default collation is pinned, so don't bother recording it */
+ if (OidIsValid(partcollation[i]) &&
+ partcollation[i] != DEFAULT_COLLATION_OID)
+ {
+ referenced.classId = CollationRelationId;
+ referenced.objectId = partcollation[i];
+ referenced.objectSubId = 0;
+ }
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
}
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 9104855ce2..d0d208e98d 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -3477,8 +3477,8 @@ reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
ereport(INFO,
(errmsg("index \"%s\" was reindexed",
get_rel_name(indexId)),
- errdetail("%s.",
- pg_rusage_show(&ru0))));
+ errdetail_internal("%s",
+ pg_rusage_show(&ru0))));
/* Close rels, but keep locks */
index_close(iRel, NoLock);
diff --git a/src/backend/catalog/information_schema.sql b/src/backend/catalog/information_schema.sql
index cbcd6cfbc1..98bcfa08c6 100644
--- a/src/backend/catalog/information_schema.sql
+++ b/src/backend/catalog/information_schema.sql
@@ -2936,12 +2936,14 @@ CREATE VIEW user_mapping_options AS
SELECT authorization_identifier,
foreign_server_catalog,
foreign_server_name,
- CAST((pg_options_to_table(um.umoptions)).option_name AS sql_identifier) AS option_name,
+ CAST(opts.option_name AS sql_identifier) AS option_name,
CAST(CASE WHEN (umuser <> 0 AND authorization_identifier = current_user)
OR (umuser = 0 AND pg_has_role(srvowner, 'USAGE'))
- OR (SELECT rolsuper FROM pg_authid WHERE rolname = current_user) THEN (pg_options_to_table(um.umoptions)).option_value
+ OR (SELECT rolsuper FROM pg_authid WHERE rolname = current_user)
+ THEN opts.option_value
ELSE NULL END AS character_data) AS option_value
- FROM _pg_user_mappings um;
+ FROM _pg_user_mappings um,
+ pg_options_to_table(um.umoptions) opts;
GRANT SELECT ON user_mapping_options TO PUBLIC;
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index d7f6075b13..a29a232e8b 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -3898,7 +3898,7 @@ InitTempTableNamespace(void)
if (IsParallelWorker())
ereport(ERROR,
(errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
- errmsg("cannot create temporary tables in parallel mode")));
+ errmsg("cannot create temporary tables during a parallel operation")));
#ifdef XCP
/*
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 6a365dceec..05096959de 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -1849,8 +1849,13 @@ get_object_address_defacl(List *object, bool missing_ok)
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized default ACL object type %c", objtype),
- errhint("Valid object types are \"r\", \"S\", \"f\", \"T\" and \"s\".")));
+ errmsg("unrecognized default ACL object type \"%c\"", objtype),
+ errhint("Valid object types are \"%c\", \"%c\", \"%c\", \"%c\", \"%c\".",
+ DEFACLOBJ_RELATION,
+ DEFACLOBJ_SEQUENCE,
+ DEFACLOBJ_FUNCTION,
+ DEFACLOBJ_TYPE,
+ DEFACLOBJ_NAMESPACE)));
}
/*
@@ -3345,7 +3350,7 @@ getObjectDescription(const ObjectAddress *object)
tuple = systable_getnext(sscan);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "cache lookup failed for policy %u",
+ elog(ERROR, "could not find tuple for policy %u",
object->objectId);
form_policy = (Form_pg_policy) GETSTRUCT(tuple);
diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c
index 37fa1458be..a7c9b9a46c 100644
--- a/src/backend/catalog/partition.c
+++ b/src/backend/catalog/partition.c
@@ -454,6 +454,7 @@ RelationBuildPartitionDesc(Relation rel)
palloc0(sizeof(PartitionBoundInfoData));
boundinfo->strategy = key->strategy;
boundinfo->ndatums = ndatums;
+ boundinfo->null_index = -1;
boundinfo->datums = (Datum **) palloc0(ndatums * sizeof(Datum *));
/* Initialize mapping array with invalid values */
@@ -503,8 +504,6 @@ RelationBuildPartitionDesc(Relation rel)
mapping[null_index] = next_index++;
boundinfo->null_index = mapping[null_index];
}
- else
- boundinfo->null_index = -1;
/* All partition must now have a valid mapping */
Assert(next_index == nparts);
@@ -874,7 +873,8 @@ get_partition_parent(Oid relid)
NULL, 2, key);
tuple = systable_getnext(scan);
- Assert(HeapTupleIsValid(tuple));
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "could not find tuple for parent of relation %u", relid);
form = (Form_pg_inherits) GETSTRUCT(tuple);
result = form->inhparent;
diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c
index ab5f3719fc..c69c461b62 100644
--- a/src/backend/catalog/pg_subscription.c
+++ b/src/backend/catalog/pg_subscription.c
@@ -227,17 +227,22 @@ textarray_to_stringlist(ArrayType *textarray)
/*
* Set the state of a subscription table.
*
+ * If update_only is true and the record for given table doesn't exist, do
+ * nothing. This can be used to avoid inserting a new record that was deleted
+ * by someone else. Generally, subscription DDL commands should use false,
+ * workers should use true.
+ *
* The insert-or-update logic in this function is not concurrency safe so it
* might raise an error in rare circumstances. But if we took a stronger lock
* such as ShareRowExclusiveLock, we would risk more deadlocks.
*/
Oid
SetSubscriptionRelState(Oid subid, Oid relid, char state,
- XLogRecPtr sublsn)
+ XLogRecPtr sublsn, bool update_only)
{
Relation rel;
HeapTuple tup;
- Oid subrelid;
+ Oid subrelid = InvalidOid;
bool nulls[Natts_pg_subscription_rel];
Datum values[Natts_pg_subscription_rel];
@@ -252,7 +257,7 @@ SetSubscriptionRelState(Oid subid, Oid relid, char state,
* If the record for given table does not exist yet create new record,
* otherwise update the existing one.
*/
- if (!HeapTupleIsValid(tup))
+ if (!HeapTupleIsValid(tup) && !update_only)
{
/* Form the tuple. */
memset(values, 0, sizeof(values));
@@ -272,7 +277,7 @@ SetSubscriptionRelState(Oid subid, Oid relid, char state,
heap_freetuple(tup);
}
- else
+ else if (HeapTupleIsValid(tup))
{
bool replaces[Natts_pg_subscription_rel];
@@ -396,7 +401,7 @@ RemoveSubscriptionRel(Oid subid, Oid relid)
scan = heap_beginscan_catalog(rel, nkeys, skey);
while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection)))
{
- simple_heap_delete(rel, &tup->t_self);
+ CatalogTupleDelete(rel, &tup->t_self);
}
heap_endscan(scan);
diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c
index 110fb7ef65..91b65b174d 100644
--- a/src/backend/commands/collationcmds.c
+++ b/src/backend/commands/collationcmds.c
@@ -120,6 +120,18 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e
collprovider = ((Form_pg_collation) GETSTRUCT(tp))->collprovider;
ReleaseSysCache(tp);
+
+ /*
+ * Copying the "default" collation is not allowed because most code
+ * checks for DEFAULT_COLLATION_OID instead of COLLPROVIDER_DEFAULT,
+ * and so having a second collation with COLLPROVIDER_DEFAULT would
+ * not work and potentially confuse or crash some code. This could be
+ * fixed with some legwork.
+ */
+ if (collprovider == COLLPROVIDER_DEFAULT)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("collation \"default\" cannot be copied")));
}
if (localeEl)
@@ -411,10 +423,10 @@ get_icu_locale_comment(const char *localename)
Datum
pg_import_system_collations(PG_FUNCTION_ARGS)
{
-#if defined(HAVE_LOCALE_T) && !defined(WIN32)
bool if_not_exists = PG_GETARG_BOOL(0);
Oid nspid = PG_GETARG_OID(1);
+#if defined(HAVE_LOCALE_T) && !defined(WIN32)
FILE *locale_a_handle;
char localebuf[NAMEDATALEN]; /* we assume ASCII so this is fine */
int count = 0;
@@ -431,6 +443,12 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to import system collations"))));
+#if !(defined(HAVE_LOCALE_T) && !defined(WIN32)) && !defined(USE_ICU)
+ /* silence compiler warnings */
+ (void) if_not_exists;
+ (void) nspid;
+#endif
+
#if defined(HAVE_LOCALE_T) && !defined(WIN32)
locale_a_handle = OpenPipeStream("locale -a", "r");
if (locale_a_handle == NULL)
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 5d5e409c7d..3ae52116f4 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -1622,7 +1622,7 @@ BeginCopy(ParseState *pstate,
}
/* plan the query */
- plan = pg_plan_query(query, 0, NULL);
+ plan = pg_plan_query(query, CURSOR_OPT_PARALLEL_OK, NULL);
/*
* With row level security and a user using "COPY relation TO", we
@@ -2827,9 +2827,24 @@ CopyFrom(CopyState cstate)
}
else
{
+ /*
+ * We always check the partition constraint, including when
+ * the tuple got here via tuple-routing. However we don't
+ * need to in the latter case if no BR trigger is defined on
+ * the partition. Note that a BR trigger might modify the
+ * tuple such that the partition constraint is no longer
+ * satisfied, so we need to check in that case.
+ */
+ bool check_partition_constr =
+ (resultRelInfo->ri_PartitionCheck != NIL);
+
+ if (saved_resultRelInfo != NULL &&
+ !(resultRelInfo->ri_TrigDesc &&
+ resultRelInfo->ri_TrigDesc->trig_insert_before_row))
+ check_partition_constr = false;
+
/* Check the constraints of the tuple */
- if (cstate->rel->rd_att->constr ||
- resultRelInfo->ri_PartitionCheck)
+ if (cstate->rel->rd_att->constr || check_partition_constr)
ExecConstraints(resultRelInfo, slot, estate);
if (useHeapMultiInsert)
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index 80c352ed0c..197955624f 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -2390,7 +2390,7 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
extTup = systable_getnext(extScan);
if (!HeapTupleIsValid(extTup)) /* should not happen */
- elog(ERROR, "extension with oid %u does not exist",
+ elog(ERROR, "could not find tuple for extension %u",
CurrentExtensionObject);
memset(repl_val, 0, sizeof(repl_val));
@@ -2538,7 +2538,7 @@ extension_config_remove(Oid extensionoid, Oid tableoid)
extTup = systable_getnext(extScan);
if (!HeapTupleIsValid(extTup)) /* should not happen */
- elog(ERROR, "extension with oid %u does not exist",
+ elog(ERROR, "could not find tuple for extension %u",
extensionoid);
/* Search extconfig for the tableoid */
@@ -2739,7 +2739,8 @@ AlterExtensionNamespace(const char *extensionName, const char *newschema, Oid *o
extTup = systable_getnext(extScan);
if (!HeapTupleIsValid(extTup)) /* should not happen */
- elog(ERROR, "extension with oid %u does not exist", extensionOid);
+ elog(ERROR, "could not find tuple for extension %u",
+ extensionOid);
/* Copy tuple so we can modify it below */
extTup = heap_copytuple(extTup);
@@ -3060,7 +3061,7 @@ ApplyExtensionUpdates(Oid extensionOid,
extTup = systable_getnext(extScan);
if (!HeapTupleIsValid(extTup)) /* should not happen */
- elog(ERROR, "extension with oid %u does not exist",
+ elog(ERROR, "could not find tuple for extension %u",
extensionOid);
extForm = (Form_pg_extension) GETSTRUCT(extTup);
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 87ff7faf48..f611e3e394 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -301,6 +301,9 @@ CheckIndexCompatible(Oid oldId,
* 'is_alter_table': this is due to an ALTER rather than a CREATE operation.
* 'check_rights': check for CREATE rights in namespace and tablespace. (This
* should be true except when ALTER is deleting/recreating an index.)
+ * 'check_not_in_use': check for table not already in use in current session.
+ * This should be true unless caller is holding the table open, in which
+ * case the caller had better have checked it earlier.
* 'skip_build': make the catalog entries but leave the index file empty;
* it will be filled later.
* 'quiet': suppress the NOTICE chatter ordinarily provided for constraints.
@@ -313,6 +316,7 @@ DefineIndex(Oid relationId,
Oid indexRelationId,
bool is_alter_table,
bool check_rights,
+ bool check_not_in_use,
bool skip_build,
bool quiet)
{
@@ -411,6 +415,15 @@ DefineIndex(Oid relationId,
errmsg("cannot create indexes on temporary tables of other sessions")));
/*
+ * Unless our caller vouches for having checked this already, insist that
+ * the table not be in use by our own session, either. Otherwise we might
+ * fail to make entries in the new index (for instance, if an INSERT or
+ * UPDATE is in progress and has already made its list of target indexes).
+ */
+ if (check_not_in_use)
+ CheckTableNotInUse(rel, "CREATE INDEX");
+
+ /*
* Verify we (still) have CREATE rights in the rel's namespace.
* (Presumably we did when the rel was created, but maybe not anymore.)
* Skip check if caller doesn't want it. Also skip check if
diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c
index 4a758426c3..dad31df517 100644
--- a/src/backend/commands/policy.c
+++ b/src/backend/commands/policy.c
@@ -474,7 +474,8 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
rel = relation_open(relid, AccessExclusiveLock);
- if (rel->rd_rel->relkind != RELKIND_RELATION)
+ if (rel->rd_rel->relkind != RELKIND_RELATION &&
+ rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table",
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index ed50208d51..13f818a036 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -142,12 +142,12 @@ static void create_seq_hashtable(void);
static void init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence_data read_seq_tuple(Relation rel,
Buffer *buf, HeapTuple seqdatatuple);
-static LOCKMODE alter_sequence_get_lock_level(List *options);
static void init_params(ParseState *pstate, List *options, bool for_identity,
bool isInit,
Form_pg_sequence seqform,
- bool *changed_seqform,
- Form_pg_sequence_data seqdataform, List **owned_by,
+ Form_pg_sequence_data seqdataform,
+ bool *need_seq_rewrite,
+ List **owned_by,
bool *is_restart);
static void do_setval(Oid relid, int64 next, bool iscalled);
static void process_owned_by(Relation seqrel, List *owned_by, bool for_identity);
@@ -162,7 +162,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq)
{
FormData_pg_sequence seqform;
FormData_pg_sequence_data seqdataform;
- bool changed_seqform = false; /* not used here */
+ bool need_seq_rewrite;
List *owned_by;
CreateStmt *stmt = makeNode(CreateStmt);
Oid seqoid;
@@ -210,8 +210,10 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq)
}
/* Check and set all option values */
- init_params(pstate, seq->options, seq->for_identity, true, &seqform,
- &changed_seqform, &seqdataform, &owned_by, &is_restart);
+ init_params(pstate, seq->options, seq->for_identity, true,
+ &seqform, &seqdataform,
+ &need_seq_rewrite, &owned_by,
+ &is_restart);
/*
* Create relation (and fill value[] and null[] for the tuple)
@@ -497,11 +499,10 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt)
SeqTable elm;
Relation seqrel;
Buffer buf;
- HeapTupleData seqdatatuple;
+ HeapTupleData datatuple;
Form_pg_sequence seqform;
- Form_pg_sequence_data seqdata;
- FormData_pg_sequence_data newseqdata;
- bool changed_seqform = false;
+ Form_pg_sequence_data newdataform;
+ bool need_seq_rewrite;
List *owned_by;
#ifdef PGXC
GTM_Sequence start_value;
@@ -514,11 +515,12 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt)
#endif
ObjectAddress address;
Relation rel;
- HeapTuple tuple;
+ HeapTuple seqtuple;
+ HeapTuple newdatatuple;
/* Open and lock sequence. */
relid = RangeVarGetRelid(stmt->sequence,
- alter_sequence_get_lock_level(stmt->options),
+ ShareRowExclusiveLock,
stmt->missing_ok);
if (relid == InvalidOid)
{
@@ -536,32 +538,33 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt)
stmt->sequence->relname);
rel = heap_open(SequenceRelationId, RowExclusiveLock);
- tuple = SearchSysCacheCopy1(SEQRELID,
- ObjectIdGetDatum(relid));
- if (!HeapTupleIsValid(tuple))
+ seqtuple = SearchSysCacheCopy1(SEQRELID,
+ ObjectIdGetDatum(relid));
+ if (!HeapTupleIsValid(seqtuple))
elog(ERROR, "cache lookup failed for sequence %u",
relid);
- seqform = (Form_pg_sequence) GETSTRUCT(tuple);
+ seqform = (Form_pg_sequence) GETSTRUCT(seqtuple);
/* lock page's buffer and read tuple into new sequence structure */
- seqdata = read_seq_tuple(seqrel, &buf, &seqdatatuple);
+ (void) read_seq_tuple(seqrel, &buf, &datatuple);
+
+ /* copy the existing sequence data tuple, so it can be modified localy */
+ newdatatuple = heap_copytuple(&datatuple);
+ newdataform = (Form_pg_sequence_data) GETSTRUCT(newdatatuple);
- /* Copy old sequence data into workspace */
- memcpy(&newseqdata, seqdata, sizeof(FormData_pg_sequence_data));
+ UnlockReleaseBuffer(buf);
/* Check and set new values */
- init_params(pstate, stmt->options, stmt->for_identity, false, seqform,
- &changed_seqform, &newseqdata, &owned_by, &is_restart);
+ init_params(pstate, stmt->options, stmt->for_identity, false,
+ seqform, newdataform,
+ &need_seq_rewrite, &owned_by,
+ &is_restart);
/* Clear local cache so that we don't think we have cached numbers */
/* Note that we do not change the currval() state */
elm->cached = elm->last;
- /* check the comment above nextval_internal()'s equivalent call. */
- if (RelationNeedsWAL(seqrel))
- GetTopTransactionId();
-
/* Now okay to update the on-disk tuple */
#ifdef PGXC
increment = seqform->seqincrement;
@@ -572,48 +575,40 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt)
cycle = seqform->seqcycle;
#endif
- START_CRIT_SECTION();
-
- memcpy(seqdata, &newseqdata, sizeof(FormData_pg_sequence_data));
-
- MarkBufferDirty(buf);
-
- /* XLOG stuff */
- if (RelationNeedsWAL(seqrel))
+ /* If needed, rewrite the sequence relation itself */
+ if (need_seq_rewrite)
{
- xl_seq_rec xlrec;
- XLogRecPtr recptr;
- Page page = BufferGetPage(buf);
-
- XLogBeginInsert();
- XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
-
- xlrec.node = seqrel->rd_node;
- XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
-
- XLogRegisterData((char *) seqdatatuple.t_data, seqdatatuple.t_len);
+ /* check the comment above nextval_internal()'s equivalent call. */
+ if (RelationNeedsWAL(seqrel))
+ GetTopTransactionId();
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG);
+ /*
+ * Create a new storage file for the sequence, making the state
+ * changes transactional. We want to keep the sequence's relfrozenxid
+ * at 0, since it won't contain any unfrozen XIDs. Same with
+ * relminmxid, since a sequence will never contain multixacts.
+ */
+ RelationSetNewRelfilenode(seqrel, seqrel->rd_rel->relpersistence,
+ InvalidTransactionId, InvalidMultiXactId);
- PageSetLSN(page, recptr);
+ /*
+ * Insert the modified tuple into the new storage file.
+ */
+ fill_seq_with_data(seqrel, newdatatuple);
}
- END_CRIT_SECTION();
-
- UnlockReleaseBuffer(buf);
-
/* process OWNED BY if given */
if (owned_by)
process_owned_by(seqrel, owned_by, stmt->for_identity);
+ /* update the pg_sequence tuple (we could skip this in some cases...) */
+ CatalogTupleUpdate(rel, &seqtuple->t_self, seqtuple);
+
InvokeObjectPostAlterHook(RelationRelationId, relid, 0);
ObjectAddressSet(address, RelationRelationId, relid);
- if (changed_seqform)
- CatalogTupleUpdate(rel, &tuple->t_self, tuple);
heap_close(rel, RowExclusiveLock);
-
relation_close(seqrel, NoLock);
#ifdef PGXC
@@ -1447,46 +1442,30 @@ read_seq_tuple(Relation rel, Buffer *buf, HeapTuple seqdatatuple)
}
/*
- * Check the sequence options list and return the appropriate lock level for
- * ALTER SEQUENCE.
- *
- * Most sequence option changes require a self-exclusive lock and should block
- * concurrent nextval() et al. But RESTART does not, because it's not
- * transactional. Also take a lower lock if no option at all is present.
- */
-static LOCKMODE
-alter_sequence_get_lock_level(List *options)
-{
- ListCell *option;
-
- foreach(option, options)
- {
- DefElem *defel = (DefElem *) lfirst(option);
-
- if (strcmp(defel->defname, "restart") != 0)
- return ShareRowExclusiveLock;
- }
-
- return RowExclusiveLock;
-}
-
-/*
* init_params: process the options list of CREATE or ALTER SEQUENCE, and
* store the values into appropriate fields of seqform, for changes that go
- * into the pg_sequence catalog, and seqdataform for changes to the sequence
- * relation itself. Set *changed_seqform to true if seqform was changed
- * (interesting for ALTER SEQUENCE). Also set *owned_by to any OWNED BY
- * option, or to NIL if there is none.
+ * into the pg_sequence catalog, and fields of seqdataform for changes to the
+ * sequence relation itself. Set *need_seq_rewrite to true if we changed any
+ * parameters that require rewriting the sequence's relation (interesting for
+ * ALTER SEQUENCE). Also set *owned_by to any OWNED BY option, or to NIL if
+ * there is none.
*
* If isInit is true, fill any unspecified options with default values;
* otherwise, do not change existing options that aren't explicitly overridden.
+ *
+ * Note: we force a sequence rewrite whenever we change parameters that affect
+ * generation of future sequence values, even if the seqdataform per se is not
+ * changed. This allows ALTER SEQUENCE to behave transactionally. Currently,
+ * the only option that doesn't cause that is OWNED BY. It's *necessary* for
+ * ALTER SEQUENCE OWNED BY to not rewrite the sequence, because that would
+ * break pg_upgrade by causing unwanted changes in the sequence's relfilenode.
*/
static void
init_params(ParseState *pstate, List *options, bool for_identity,
bool isInit,
Form_pg_sequence seqform,
- bool *changed_seqform,
Form_pg_sequence_data seqdataform,
+ bool *need_seq_rewrite,
List **owned_by,
bool *is_restart)
{
@@ -1506,6 +1485,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
*is_restart = false;
#endif
+ *need_seq_rewrite = false;
*owned_by = NIL;
foreach(option, options)
@@ -1520,6 +1500,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
errmsg("conflicting or redundant options"),
parser_errposition(pstate, defel->location)));
as_type = defel;
+ *need_seq_rewrite = true;
}
else if (strcmp(defel->defname, "increment") == 0)
{
@@ -1529,6 +1510,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
errmsg("conflicting or redundant options"),
parser_errposition(pstate, defel->location)));
increment_by = defel;
+ *need_seq_rewrite = true;
}
else if (strcmp(defel->defname, "start") == 0)
{
@@ -1538,6 +1520,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
errmsg("conflicting or redundant options"),
parser_errposition(pstate, defel->location)));
start_value = defel;
+ *need_seq_rewrite = true;
}
else if (strcmp(defel->defname, "restart") == 0)
{
@@ -1547,6 +1530,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
errmsg("conflicting or redundant options"),
parser_errposition(pstate, defel->location)));
restart_value = defel;
+ *need_seq_rewrite = true;
}
else if (strcmp(defel->defname, "maxvalue") == 0)
{
@@ -1556,6 +1540,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
errmsg("conflicting or redundant options"),
parser_errposition(pstate, defel->location)));
max_value = defel;
+ *need_seq_rewrite = true;
}
else if (strcmp(defel->defname, "minvalue") == 0)
{
@@ -1565,6 +1550,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
errmsg("conflicting or redundant options"),
parser_errposition(pstate, defel->location)));
min_value = defel;
+ *need_seq_rewrite = true;
}
else if (strcmp(defel->defname, "cache") == 0)
{
@@ -1574,6 +1560,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
errmsg("conflicting or redundant options"),
parser_errposition(pstate, defel->location)));
cache_value = defel;
+ *need_seq_rewrite = true;
}
else if (strcmp(defel->defname, "cycle") == 0)
{
@@ -1583,6 +1570,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
errmsg("conflicting or redundant options"),
parser_errposition(pstate, defel->location)));
is_cycled = defel;
+ *need_seq_rewrite = true;
}
else if (strcmp(defel->defname, "owned_by") == 0)
{
@@ -1610,8 +1598,6 @@ init_params(ParseState *pstate, List *options, bool for_identity,
defel->defname);
}
- *changed_seqform = false;
-
/*
* We must reset log_cnt when isInit or when changing any parameters that
* would affect future nextval allocations.
@@ -1652,19 +1638,16 @@ init_params(ParseState *pstate, List *options, bool for_identity,
}
seqform->seqtypid = newtypid;
- *changed_seqform = true;
}
else if (isInit)
{
seqform->seqtypid = INT8OID;
- *changed_seqform = true;
}
/* INCREMENT BY */
if (increment_by != NULL)
{
seqform->seqincrement = defGetInt64(increment_by);
- *changed_seqform = true;
if (seqform->seqincrement == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -1674,28 +1657,24 @@ init_params(ParseState *pstate, List *options, bool for_identity,
else if (isInit)
{
seqform->seqincrement = 1;
- *changed_seqform = true;
}
/* CYCLE */
if (is_cycled != NULL)
{
seqform->seqcycle = intVal(is_cycled->arg);
- *changed_seqform = true;
Assert(BoolIsValid(seqform->seqcycle));
seqdataform->log_cnt = 0;
}
else if (isInit)
{
seqform->seqcycle = false;
- *changed_seqform = true;
}
/* MAXVALUE (null arg means NO MAXVALUE) */
if (max_value != NULL && max_value->arg)
{
seqform->seqmax = defGetInt64(max_value);
- *changed_seqform = true;
seqdataform->log_cnt = 0;
}
else if (isInit || max_value != NULL || reset_max_value)
@@ -1712,7 +1691,6 @@ init_params(ParseState *pstate, List *options, bool for_identity,
}
else
seqform->seqmax = -1; /* descending seq */
- *changed_seqform = true;
seqdataform->log_cnt = 0;
}
@@ -1734,7 +1712,6 @@ init_params(ParseState *pstate, List *options, bool for_identity,
if (min_value != NULL && min_value->arg)
{
seqform->seqmin = defGetInt64(min_value);
- *changed_seqform = true;
seqdataform->log_cnt = 0;
}
else if (isInit || min_value != NULL || reset_min_value)
@@ -1751,7 +1728,6 @@ init_params(ParseState *pstate, List *options, bool for_identity,
}
else
seqform->seqmin = 1; /* ascending seq */
- *changed_seqform = true;
seqdataform->log_cnt = 0;
}
@@ -1787,7 +1763,6 @@ init_params(ParseState *pstate, List *options, bool for_identity,
if (start_value != NULL)
{
seqform->seqstart = defGetInt64(start_value);
- *changed_seqform = true;
}
else if (isInit)
{
@@ -1795,7 +1770,6 @@ init_params(ParseState *pstate, List *options, bool for_identity,
seqform->seqstart = seqform->seqmin; /* ascending seq */
else
seqform->seqstart = seqform->seqmax; /* descending seq */
- *changed_seqform = true;
}
/* crosscheck START */
@@ -1874,7 +1848,6 @@ init_params(ParseState *pstate, List *options, bool for_identity,
if (cache_value != NULL)
{
seqform->seqcache = defGetInt64(cache_value);
- *changed_seqform = true;
if (seqform->seqcache <= 0)
{
char buf[100];
@@ -1890,7 +1863,6 @@ init_params(ParseState *pstate, List *options, bool for_identity,
else if (isInit)
{
seqform->seqcache = 1;
- *changed_seqform = true;
}
}
diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c
index 2b3785f394..ea0a561401 100644
--- a/src/backend/commands/statscmds.c
+++ b/src/backend/commands/statscmds.c
@@ -301,8 +301,7 @@ CreateStatistics(CreateStatsStmt *stmt)
/* insert it into pg_statistic_ext */
statrel = heap_open(StatisticExtRelationId, RowExclusiveLock);
htup = heap_form_tuple(statrel->rd_att, values, nulls);
- CatalogTupleInsert(statrel, htup);
- statoid = HeapTupleGetOid(htup);
+ statoid = CatalogTupleInsert(statrel, htup);
heap_freetuple(htup);
relation_close(statrel, RowExclusiveLock);
@@ -372,7 +371,7 @@ RemoveStatisticsById(Oid statsOid)
CacheInvalidateRelcacheByRelid(relid);
- simple_heap_delete(relation, &tup->t_self);
+ CatalogTupleDelete(relation, &tup->t_self);
ReleaseSysCache(tup);
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index 86eb31df93..5aae7b6f91 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -64,12 +64,14 @@ static void
parse_subscription_options(List *options, bool *connect, bool *enabled_given,
bool *enabled, bool *create_slot,
bool *slot_name_given, char **slot_name,
- bool *copy_data, char **synchronous_commit)
+ bool *copy_data, char **synchronous_commit,
+ bool *refresh)
{
ListCell *lc;
bool connect_given = false;
bool create_slot_given = false;
bool copy_data_given = false;
+ bool refresh_given = false;
/* If connect is specified, the others also need to be. */
Assert(!connect || (enabled && create_slot && copy_data));
@@ -92,6 +94,8 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
*copy_data = true;
if (synchronous_commit)
*synchronous_commit = NULL;
+ if (refresh)
+ *refresh = true;
/* Parse options */
foreach(lc, options)
@@ -167,6 +171,16 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
PGC_BACKEND, PGC_S_TEST, GUC_ACTION_SET,
false, 0, false);
}
+ else if (strcmp(defel->defname, "refresh") == 0 && refresh)
+ {
+ if (refresh_given)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+
+ refresh_given = true;
+ *refresh = defGetBoolean(defel);
+ }
else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@@ -315,7 +329,8 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
*/
parse_subscription_options(stmt->options, &connect, &enabled_given,
&enabled, &create_slot, &slotname_given,
- &slotname, &copy_data, &synchronous_commit);
+ &slotname, &copy_data, &synchronous_commit,
+ NULL);
/*
* Since creating a replication slot is not transactional, rolling back
@@ -436,12 +451,9 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
rv->schemaname, rv->relname);
SetSubscriptionRelState(subid, relid, table_state,
- InvalidXLogRecPtr);
+ InvalidXLogRecPtr, false);
}
- ereport(NOTICE,
- (errmsg("synchronized table states")));
-
/*
* If requested, create permanent slot for the subscription. We
* won't use the initial snapshot for anything, so no need to
@@ -559,7 +571,7 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
{
SetSubscriptionRelState(sub->oid, relid,
copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY,
- InvalidXLogRecPtr);
+ InvalidXLogRecPtr, false);
ereport(NOTICE,
(errmsg("added subscription for table %s.%s",
quote_identifier(rv->schemaname),
@@ -585,6 +597,8 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
RemoveSubscriptionRel(sub->oid, relid);
+ logicalrep_worker_stop(sub->oid, relid);
+
namespace = get_namespace_name(get_rel_namespace(relid));
ereport(NOTICE,
(errmsg("removed subscription for table %s.%s",
@@ -645,7 +659,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
parse_subscription_options(stmt->options, NULL, NULL, NULL,
NULL, &slotname_given, &slotname,
- NULL, &synchronous_commit);
+ NULL, &synchronous_commit, NULL);
if (slotname_given)
{
@@ -680,7 +694,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
parse_subscription_options(stmt->options, NULL,
&enabled_given, &enabled, NULL,
- NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, NULL);
Assert(enabled_given);
if (!sub->slotname && enabled)
@@ -712,13 +726,13 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
break;
case ALTER_SUBSCRIPTION_PUBLICATION:
- case ALTER_SUBSCRIPTION_PUBLICATION_REFRESH:
{
bool copy_data;
+ bool refresh;
parse_subscription_options(stmt->options, NULL, NULL, NULL,
NULL, NULL, NULL, &copy_data,
- NULL);
+ NULL, &refresh);
values[Anum_pg_subscription_subpublications - 1] =
publicationListToArray(stmt->publication);
@@ -727,12 +741,13 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
update_tuple = true;
/* Refresh if user asked us to. */
- if (stmt->kind == ALTER_SUBSCRIPTION_PUBLICATION_REFRESH)
+ if (refresh)
{
if (!sub->enabled)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions")));
+ errmsg("ALTER SUBSCRIPTION with refresh is not allowed for disabled subscriptions"),
+ errhint("Use ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false).")));
/* Make sure refresh sees the new list of publications. */
sub->publications = stmt->publication;
@@ -754,7 +769,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
parse_subscription_options(stmt->options, NULL, NULL, NULL,
NULL, NULL, NULL, &copy_data,
- NULL);
+ NULL, NULL);
AlterSubscription_refresh(sub, copy_data);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 78ab65f600..7c260082bd 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -299,6 +299,14 @@ struct DropRelationCallbackState
#define ATT_COMPOSITE_TYPE 0x0010
#define ATT_FOREIGN_TABLE 0x0020
+/*
+ * Partition tables are expected to be dropped when the parent partitioned
+ * table gets dropped. Hence for partitioning we use AUTO dependency.
+ * Otherwise, for regular inheritance use NORMAL dependency.
+ */
+#define child_dependency_type(child_is_partition) \
+ ((child_is_partition) ? DEPENDENCY_AUTO : DEPENDENCY_NORMAL)
+
static void truncate_check_rel(Relation rel);
static List *MergeAttributes(List *schema, List *supers, char relpersistence,
bool is_partition, List **supOids, List **supconstr,
@@ -455,7 +463,8 @@ static void ATExecEnableDisableRule(Relation rel, char *rulename,
static void ATPrepAddInherit(Relation child_rel);
static ObjectAddress ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode);
static ObjectAddress ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode);
-static void drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid);
+static void drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid,
+ DependencyType deptype);
static ObjectAddress ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode);
static void ATExecDropOf(Relation rel, LOCKMODE lockmode);
static void ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode);
@@ -2428,14 +2437,8 @@ StoreCatalogInheritance1(Oid relationId, Oid parentOid,
childobject.objectId = relationId;
childobject.objectSubId = 0;
- /*
- * Partition tables are expected to be dropped when the parent partitioned
- * table gets dropped.
- */
- if (child_is_partition)
- recordDependencyOn(&childobject, &parentobject, DEPENDENCY_AUTO);
- else
- recordDependencyOn(&childobject, &parentobject, DEPENDENCY_NORMAL);
+ recordDependencyOn(&childobject, &parentobject,
+ child_dependency_type(child_is_partition));
/*
* Post creation hook of this inheritance. Since object_access_hook
@@ -6904,6 +6907,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
InvalidOid, /* no predefined OID */
true, /* is_alter_table */
check_rights,
+ false, /* check_not_in_use - we did it already */
skip_build,
quiet);
@@ -11900,7 +11904,8 @@ RemoveInheritance(Relation child_rel, Relation parent_rel)
drop_parent_dependency(RelationGetRelid(child_rel),
RelationRelationId,
- RelationGetRelid(parent_rel));
+ RelationGetRelid(parent_rel),
+ child_dependency_type(child_is_partition));
/*
* Post alter hook of this inherits. Since object_access_hook doesn't take
@@ -11920,7 +11925,8 @@ RemoveInheritance(Relation child_rel, Relation parent_rel)
* through pg_depend.
*/
static void
-drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid)
+drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid,
+ DependencyType deptype)
{
Relation catalogRelation;
SysScanDesc scan;
@@ -11952,7 +11958,7 @@ drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid)
if (dep->refclassid == refclassid &&
dep->refobjid == refobjid &&
dep->refobjsubid == 0 &&
- dep->deptype == DEPENDENCY_NORMAL)
+ dep->deptype == deptype)
CatalogTupleDelete(catalogRelation, &depTuple->t_self);
}
@@ -12073,7 +12079,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
/* If the table was already typed, drop the existing dependency. */
if (rel->rd_rel->reloftype)
- drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);
+ drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype,
+ DEPENDENCY_NORMAL);
/* Record a dependency on the new type. */
tableobj.classId = RelationRelationId;
@@ -12126,7 +12133,8 @@ ATExecDropOf(Relation rel, LOCKMODE lockmode)
* table is presumed enough rights. No lock required on the type, either.
*/
- drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);
+ drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype,
+ DEPENDENCY_NORMAL);
/* Clear pg_class.reloftype */
relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
@@ -14227,7 +14235,6 @@ ComputePartitionAttrs(Relation rel, List *partParams, AttrNumber *partattrs,
static ObjectAddress
ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
{
- PartitionKey key = RelationGetPartitionKey(rel);
Relation attachRel,
catalog;
List *childrels;
@@ -14413,11 +14420,6 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
{
int num_check = attachRel_constr->num_check;
int i;
- Bitmapset *not_null_attrs = NULL;
- List *part_constr;
- ListCell *lc;
- bool partition_accepts_null = true;
- int partnatts;
if (attachRel_constr->has_not_null)
{
@@ -14447,7 +14449,6 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
ntest->argisrow = false;
ntest->location = -1;
existConstraint = lappend(existConstraint, ntest);
- not_null_attrs = bms_add_member(not_null_attrs, i);
}
}
}
@@ -14481,59 +14482,8 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
existConstraint = list_make1(make_ands_explicit(existConstraint));
/* And away we go ... */
- if (predicate_implied_by(partConstraint, existConstraint))
+ if (predicate_implied_by(partConstraint, existConstraint, true))
skip_validate = true;
-
- /*
- * We choose to err on the safer side, i.e., give up on skipping the
- * validation scan, if the partition key column doesn't have the NOT
- * NULL constraint and the table is to become a list partition that
- * does not accept nulls. In this case, the partition predicate
- * (partConstraint) does include an 'key IS NOT NULL' expression,
- * however, because of the way predicate_implied_by_simple_clause() is
- * designed to handle IS NOT NULL predicates in the absence of a IS
- * NOT NULL clause, we cannot rely on just the above proof.
- *
- * That is not an issue in case of a range partition, because if there
- * were no NOT NULL constraint defined on the key columns, an error
- * would be thrown before we get here anyway. That is not true,
- * however, if any of the partition keys is an expression, which is
- * handled below.
- */
- part_constr = linitial(partConstraint);
- part_constr = make_ands_implicit((Expr *) part_constr);
-
- /*
- * part_constr contains an IS NOT NULL expression, if this is a list
- * partition that does not accept nulls (in fact, also if this is a
- * range partition and some partition key is an expression, but we
- * never skip validation in that case anyway; see below)
- */
- foreach(lc, part_constr)
- {
- Node *expr = lfirst(lc);
-
- if (IsA(expr, NullTest) &&
- ((NullTest *) expr)->nulltesttype == IS_NOT_NULL)
- {
- partition_accepts_null = false;
- break;
- }
- }
-
- partnatts = get_partition_natts(key);
- for (i = 0; i < partnatts; i++)
- {
- AttrNumber partattno;
-
- partattno = get_partition_col_attnum(key, i);
-
- /* If partition key is an expression, must not skip validation */
- if (!partition_accepts_null &&
- (partattno == 0 ||
- !bms_is_member(partattno, not_null_attrs)))
- skip_validate = false;
- }
}
/* It's safe to skip the validation scan after all */
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 56356de670..fc9c4f0fb1 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -1353,8 +1353,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
"%u pages are entirely empty.\n",
empty_pages),
empty_pages);
- appendStringInfo(&buf, _("%s."),
- pg_rusage_show(&ru0));
+ appendStringInfo(&buf, "%s.", pg_rusage_show(&ru0));
ereport(elevel,
(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
@@ -1429,8 +1428,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
(errmsg("\"%s\": removed %d row versions in %d pages",
RelationGetRelationName(onerel),
tupindex, npages),
- errdetail("%s.",
- pg_rusage_show(&ru0))));
+ errdetail_internal("%s", pg_rusage_show(&ru0))));
}
/*
@@ -1618,7 +1616,7 @@ lazy_vacuum_index(Relation indrel,
(errmsg("scanned index \"%s\" to remove %d row versions",
RelationGetRelationName(indrel),
vacrelstats->num_dead_tuples),
- errdetail("%s.", pg_rusage_show(&ru0))));
+ errdetail_internal("%s", pg_rusage_show(&ru0))));
}
/*
@@ -1828,8 +1826,8 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
(errmsg("\"%s\": truncated %u to %u pages",
RelationGetRelationName(onerel),
old_rel_pages, new_rel_pages),
- errdetail("%s.",
- pg_rusage_show(&ru0))));
+ errdetail_internal("%s",
+ pg_rusage_show(&ru0))));
old_rel_pages = new_rel_pages;
} while (new_rel_pages > vacrelstats->nonempty_pages &&
vacrelstats->lock_waiter_detected);
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index ed3b2484ae..717bc39c01 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -791,7 +791,7 @@ assign_client_encoding(const char *newval, void *extra)
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot change client_encoding in a parallel worker")));
+ errmsg("cannot change client_encoding during a parallel operation")));
}
/* We do not expect an error if PrepareClientEncoding succeeded */
diff --git a/src/backend/common.mk b/src/backend/common.mk
index 0b57543bc4..5d599dbd0c 100644
--- a/src/backend/common.mk
+++ b/src/backend/common.mk
@@ -8,8 +8,6 @@
# this directory and SUBDIRS to subdirectories containing more things
# to build.
-override CPPFLAGS := $(CPPFLAGS) $(ICU_CFLAGS)
-
ifdef PARTIAL_LINKING
# old style: linking using SUBSYS.o
subsysfilename = SUBSYS.o
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 7232b0911f..34cca85563 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -112,6 +112,8 @@ static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
int maxfieldlen);
static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
Plan *planTree);
+static void ExecPartitionCheck(ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot, EState *estate);
/*
* Note that GetUpdatedColumns() also exists in commands/trigger.c. There does
@@ -1404,34 +1406,19 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_projectReturning = NULL;
/*
- * If partition_root has been specified, that means we are building the
- * ResultRelInfo for one of its leaf partitions. In that case, we need
- * *not* initialize the leaf partition's constraint, but rather the
- * partition_root's (if any). We must do that explicitly like this,
- * because implicit partition constraints are not inherited like user-
- * defined constraints and would fail to be enforced by ExecConstraints()
- * after a tuple is routed to a leaf partition.
+ * Partition constraint, which also includes the partition constraint of
+ * all the ancestors that are partitions. Note that it will be checked
+ * even in the case of tuple-routing where this table is the target leaf
+ * partition, if there any BR triggers defined on the table. Although
+ * tuple-routing implicitly preserves the partition constraint of the
+ * target partition for a given row, the BR triggers may change the row
+ * such that the constraint is no longer satisfied, which we must fail for
+ * by checking it explicitly.
+ *
+ * If this is a partitioned table, the partition constraint (if any) of a
+ * given row will be checked just before performing tuple-routing.
*/
- if (partition_root)
- {
- /*
- * Root table itself may or may not be a partition; partition_check
- * would be NIL in the latter case.
- */
- partition_check = RelationGetPartitionQual(partition_root);
-
- /*
- * This is not our own partition constraint, but rather an ancestor's.
- * So any Vars in it bear the ancestor's attribute numbers. We must
- * switch them to our own. (dummy varno = 1)
- */
- if (partition_check != NIL)
- partition_check = map_partition_varattnos(partition_check, 1,
- resultRelationDesc,
- partition_root);
- }
- else
- partition_check = RelationGetPartitionQual(resultRelationDesc);
+ partition_check = RelationGetPartitionQual(resultRelationDesc);
resultRelInfo->ri_PartitionCheck = partition_check;
resultRelInfo->ri_PartitionRoot = partition_root;
@@ -1900,13 +1887,16 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
/*
* ExecPartitionCheck --- check that tuple meets the partition constraint.
- *
- * Note: This is called *iff* resultRelInfo is the main target table.
*/
-static bool
+static void
ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
EState *estate)
{
+ Relation rel = resultRelInfo->ri_RelationDesc;
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ Bitmapset *modifiedCols;
+ Bitmapset *insertedCols;
+ Bitmapset *updatedCols;
ExprContext *econtext;
/*
@@ -1934,7 +1924,44 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
* As in case of the catalogued constraints, we treat a NULL result as
* success here, not a failure.
*/
- return ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
+ if (!ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext))
+ {
+ char *val_desc;
+ Relation orig_rel = rel;
+
+ /* See the comment above. */
+ if (resultRelInfo->ri_PartitionRoot)
+ {
+ HeapTuple tuple = ExecFetchSlotTuple(slot);
+ TupleDesc old_tupdesc = RelationGetDescr(rel);
+ TupleConversionMap *map;
+
+ rel = resultRelInfo->ri_PartitionRoot;
+ tupdesc = RelationGetDescr(rel);
+ /* a reverse map */
+ map = convert_tuples_by_name(old_tupdesc, tupdesc,
+ gettext_noop("could not convert row type"));
+ if (map != NULL)
+ {
+ tuple = do_convert_tuple(tuple, map);
+ ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+ }
+ }
+
+ insertedCols = GetInsertedColumns(resultRelInfo, estate);
+ updatedCols = GetUpdatedColumns(resultRelInfo, estate);
+ modifiedCols = bms_union(insertedCols, updatedCols);
+ val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
+ slot,
+ tupdesc,
+ modifiedCols,
+ 64);
+ ereport(ERROR,
+ (errcode(ERRCODE_CHECK_VIOLATION),
+ errmsg("new row for relation \"%s\" violates partition constraint",
+ RelationGetRelationName(orig_rel)),
+ val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
+ }
}
/*
@@ -2062,47 +2089,11 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
}
}
- if (resultRelInfo->ri_PartitionCheck &&
- !ExecPartitionCheck(resultRelInfo, slot, estate))
- {
- char *val_desc;
- Relation orig_rel = rel;
-
- /* See the comment above. */
- if (resultRelInfo->ri_PartitionRoot)
- {
- HeapTuple tuple = ExecFetchSlotTuple(slot);
- TupleDesc old_tupdesc = RelationGetDescr(rel);
- TupleConversionMap *map;
-
- rel = resultRelInfo->ri_PartitionRoot;
- tupdesc = RelationGetDescr(rel);
- /* a reverse map */
- map = convert_tuples_by_name(old_tupdesc, tupdesc,
- gettext_noop("could not convert row type"));
- if (map != NULL)
- {
- tuple = do_convert_tuple(tuple, map);
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- }
- }
-
- insertedCols = GetInsertedColumns(resultRelInfo, estate);
- updatedCols = GetUpdatedColumns(resultRelInfo, estate);
- modifiedCols = bms_union(insertedCols, updatedCols);
- val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
- slot,
- tupdesc,
- modifiedCols,
- 64);
- ereport(ERROR,
- (errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("new row for relation \"%s\" violates partition constraint",
- RelationGetRelationName(orig_rel)),
- val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
- }
+ if (resultRelInfo->ri_PartitionCheck)
+ ExecPartitionCheck(resultRelInfo, slot, estate);
}
+
/*
* ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
* of the specified kind.
@@ -3387,6 +3378,13 @@ ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd,
PartitionDispatchData *failed_at;
TupleTableSlot *failed_slot;
+ /*
+ * First check the root table's partition constraint, if any. No point in
+ * routing the tuple it if it doesn't belong in the root table itself.
+ */
+ if (resultRelInfo->ri_PartitionCheck)
+ ExecPartitionCheck(resultRelInfo, slot, estate);
+
result = get_partition_for_tuple(pd, slot, estate,
&failed_at, &failed_slot);
if (result < 0)
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 0610180016..1c02fa140b 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -341,7 +341,7 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
pcxt->nworkers));
else
- tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE);
+ tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, false);
/* Create the queues, and become the receiver for each. */
for (i = 0; i < pcxt->nworkers; ++i)
@@ -684,7 +684,7 @@ ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
char *mqspace;
shm_mq *mq;
- mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE);
+ mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE, false);
mqspace += ParallelWorkerNumber * PARALLEL_TUPLE_QUEUE_SIZE;
mq = (shm_mq *) mqspace;
shm_mq_set_sender(mq, MyProc);
@@ -705,14 +705,14 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
char *queryString;
/* Get the query string from shared memory */
- queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT);
+ queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, false);
/* Reconstruct leader-supplied PlannedStmt. */
- pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT);
+ pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT, false);
pstmt = (PlannedStmt *) stringToNode(pstmtspace);
/* Reconstruct ParamListInfo. */
- paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS);
+ paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS, false);
paramLI = RestoreParamList(&paramspace);
/*
@@ -843,7 +843,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
/* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
receiver = ExecParallelGetReceiver(seg, toc);
- instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION);
+ instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
if (instrumentation != NULL)
instrument_options = instrumentation->instrument_options;
queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
@@ -858,7 +858,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
InstrStartParallelQuery();
/* Attach to the dynamic shared memory area. */
- area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA);
+ area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false);
area = dsa_attach_in_place(area_space, seg);
/* Start up the executor */
@@ -875,7 +875,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
ExecutorFinish(queryDesc);
/* Report buffer usage during parallel execution. */
- buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE);
+ buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber]);
/* Report instrumentation data if any instrumentation options are set. */
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index f1a71e26c8..bb5c609e54 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -392,6 +392,7 @@ sql_fn_post_column_ref(ParseState *pstate, ColumnRef *cref, Node *var)
param = ParseFuncOrColumn(pstate,
list_make1(subfield),
list_make1(param),
+ pstate->p_last_srf,
NULL,
cref->location);
}
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index c453362230..77f65db0ca 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -1005,7 +1005,7 @@ ExecBitmapHeapInitializeWorker(BitmapHeapScanState *node, shm_toc *toc)
ParallelBitmapHeapState *pstate;
Snapshot snapshot;
- pstate = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ pstate = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false);
node->pstate = pstate;
snapshot = RestoreSnapshot(pstate->phs_snapshot_data);
diff --git a/src/backend/executor/nodeCustom.c b/src/backend/executor/nodeCustom.c
index 5d309828ef..69e27047f1 100644
--- a/src/backend/executor/nodeCustom.c
+++ b/src/backend/executor/nodeCustom.c
@@ -194,7 +194,7 @@ ExecCustomScanInitializeWorker(CustomScanState *node, shm_toc *toc)
int plan_node_id = node->ss.ps.plan->plan_node_id;
void *coordinate;
- coordinate = shm_toc_lookup(toc, plan_node_id);
+ coordinate = shm_toc_lookup(toc, plan_node_id, false);
methods->InitializeWorkerCustomScan(node, toc, coordinate);
}
}
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index 707db92178..2bb28a70ff 100644
--- a/src/backend/executor/nodeForeignscan.c
+++ b/src/backend/executor/nodeForeignscan.c
@@ -352,7 +352,7 @@ ExecForeignScanInitializeWorker(ForeignScanState *node, shm_toc *toc)
int plan_node_id = node->ss.ps.plan->plan_node_id;
void *coordinate;
- coordinate = shm_toc_lookup(toc, plan_node_id);
+ coordinate = shm_toc_lookup(toc, plan_node_id, false);
fdwroutine->InitializeWorkerForeignScan(node, toc, coordinate);
}
}
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 5550f6c0a4..fb3d3bb121 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -676,7 +676,7 @@ ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, shm_toc *toc)
{
ParallelIndexScanDesc piscan;
- piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false);
node->ioss_ScanDesc =
index_beginscan_parallel(node->ss.ss_currentRelation,
node->ioss_RelationDesc,
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 5afd02e09d..0fb3fb5e7e 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -1714,7 +1714,7 @@ ExecIndexScanInitializeWorker(IndexScanState *node, shm_toc *toc)
{
ParallelIndexScanDesc piscan;
- piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false);
node->iss_ScanDesc =
index_beginscan_parallel(node->ss.ss_currentRelation,
node->iss_RelationDesc,
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 0ee82e3add..bdff68513b 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -415,6 +415,16 @@ ExecInsert(ModifyTableState *mtstate,
else
{
/*
+ * We always check the partition constraint, including when the tuple
+ * got here via tuple-routing. However we don't need to in the latter
+ * case if no BR trigger is defined on the partition. Note that a BR
+ * trigger might modify the tuple such that the partition constraint
+ * is no longer satisfied, so we need to check in that case.
+ */
+ bool check_partition_constr =
+ (resultRelInfo->ri_PartitionCheck != NIL);
+
+ /*
* Constraints might reference the tableoid column, so initialize
* t_tableOid before evaluating them.
*/
@@ -431,9 +441,16 @@ ExecInsert(ModifyTableState *mtstate,
resultRelInfo, slot, estate);
/*
- * Check the constraints of the tuple
+ * No need though if the tuple has been routed, and a BR trigger
+ * doesn't exist.
*/
- if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
+ if (saved_resultRelInfo != NULL &&
+ !(resultRelInfo->ri_TrigDesc &&
+ resultRelInfo->ri_TrigDesc->trig_insert_before_row))
+ check_partition_constr = false;
+
+ /* Check the constraints of the tuple */
+ if (resultRelationDesc->rd_att->constr || check_partition_constr)
ExecConstraints(resultRelInfo, slot, estate);
if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
@@ -1826,10 +1843,21 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
{
List *wcoList;
+ PlanState *plan;
- Assert(operation == CMD_INSERT);
- resultRelInfo = mtstate->mt_partitions;
+ /*
+ * In case of INSERT on partitioned tables, there is only one plan.
+ * Likewise, there is only one WITH CHECK OPTIONS list, not one per
+ * partition. We make a copy of the WCO qual for each partition; note
+ * that, if there are SubPlans in there, they all end up attached to
+ * the one parent Plan node.
+ */
+ Assert(operation == CMD_INSERT &&
+ list_length(node->withCheckOptionLists) == 1 &&
+ mtstate->mt_nplans == 1);
wcoList = linitial(node->withCheckOptionLists);
+ plan = mtstate->mt_plans[0];
+ resultRelInfo = mtstate->mt_partitions;
for (i = 0; i < mtstate->mt_num_partitions; i++)
{
Relation partrel = resultRelInfo->ri_RelationDesc;
@@ -1843,9 +1871,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
partrel, rel);
foreach(ll, mapped_wcoList)
{
- WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
- ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
- mtstate->mt_plans[i]);
+ WithCheckOption *wco = castNode(WithCheckOption, lfirst(ll));
+ ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
+ plan);
wcoExprs = lappend(wcoExprs, wcoExpr);
}
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 5680464fa2..c0e37dcd83 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -332,7 +332,7 @@ ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc)
{
ParallelHeapScanDesc pscan;
- pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false);
node->ss.ss_currentScanDesc =
heap_beginscan_parallel(node->ss.ss_currentRelation, pscan);
}
diff --git a/src/backend/libpq/auth-scram.c b/src/backend/libpq/auth-scram.c
index 99feb0ce94..a6042b8013 100644
--- a/src/backend/libpq/auth-scram.c
+++ b/src/backend/libpq/auth-scram.c
@@ -195,7 +195,9 @@ pg_be_scram_init(const char *username, const char *shadow_pass)
* The password looked like a SCRAM verifier, but could not be
* parsed.
*/
- elog(LOG, "invalid SCRAM verifier for user \"%s\"", username);
+ ereport(LOG,
+ (errmsg("invalid SCRAM verifier for user \"%s\"",
+ username)));
got_verifier = false;
}
}
@@ -283,11 +285,13 @@ pg_be_scram_exchange(void *opaq, char *input, int inputlen,
if (inputlen == 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (empty message)"))));
+ errmsg("malformed SCRAM message"),
+ errdetail("The message is empty.")));
if (inputlen != strlen(input))
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (length mismatch)"))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Message length does not match input length.")));
switch (state->state)
{
@@ -319,7 +323,8 @@ pg_be_scram_exchange(void *opaq, char *input, int inputlen,
if (!verify_final_nonce(state))
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("invalid SCRAM response (nonce mismatch)"))));
+ errmsg("invalid SCRAM response"),
+ errdetail("Nonce does not match.")));
/*
* Now check the final nonce and the client proof.
@@ -391,14 +396,9 @@ pg_be_scram_build_verifier(const char *password)
/* Generate random salt */
if (!pg_backend_random(saltbuf, SCRAM_DEFAULT_SALT_LEN))
- {
- ereport(LOG,
+ ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("could not generate random salt")));
- if (prep_password)
- pfree(prep_password);
- return NULL;
- }
result = scram_build_verifier(saltbuf, SCRAM_DEFAULT_SALT_LEN,
SCRAM_DEFAULT_ITERATIONS, password);
@@ -435,7 +435,8 @@ scram_verify_plain_password(const char *username, const char *password,
/*
* The password looked like a SCRAM verifier, but could not be parsed.
*/
- elog(LOG, "invalid SCRAM verifier for user \"%s\"", username);
+ ereport(LOG,
+ (errmsg("invalid SCRAM verifier for user \"%s\"", username)));
return false;
}
@@ -443,7 +444,8 @@ scram_verify_plain_password(const char *username, const char *password,
saltlen = pg_b64_decode(encoded_salt, strlen(encoded_salt), salt);
if (saltlen == -1)
{
- elog(LOG, "invalid SCRAM verifier for user \"%s\"", username);
+ ereport(LOG,
+ (errmsg("invalid SCRAM verifier for user \"%s\"", username)));
return false;
}
@@ -582,14 +584,16 @@ read_attr_value(char **input, char attr)
if (*begin != attr)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (attribute '%c' expected, %s found)",
- attr, sanitize_char(*begin)))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Expected attribute '%c' but found %s.",
+ attr, sanitize_char(*begin))));
begin++;
if (*begin != '=')
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (expected = in attr %c)", attr))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Expected character = for attribute %c.", attr)));
begin++;
end = begin;
@@ -669,8 +673,9 @@ read_any_attr(char **input, char *attr_p)
(attr >= 'a' && attr <= 'z')))
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (attribute expected, invalid char %s found)",
- sanitize_char(attr)))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Attribute expected, but found invalid character %s.",
+ sanitize_char(attr))));
if (attr_p)
*attr_p = attr;
begin++;
@@ -678,7 +683,8 @@ read_any_attr(char **input, char *attr_p)
if (*begin != '=')
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (expected = in attr %c)", attr))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Expected character = for attribute %c.", attr)));
begin++;
end = begin;
@@ -795,14 +801,16 @@ read_client_first_message(scram_state *state, char *input)
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (unexpected channel-binding flag %s)",
- sanitize_char(*input)))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Unexpected channel-binding flag %s.",
+ sanitize_char(*input))));
}
if (*input != ',')
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("malformed SCRAM message (comma expected, got %s)",
- sanitize_char(*input))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Comma expected, but found character %s.",
+ sanitize_char(*input))));
input++;
/*
@@ -815,8 +823,9 @@ read_client_first_message(scram_state *state, char *input)
if (*input != ',')
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("malformed SCRAM message (unexpected attribute %s in client-first-message)",
- sanitize_char(*input))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Unexpected attribute %s in client-first-message.",
+ sanitize_char(*input))));
input++;
state->client_first_message_bare = pstrdup(input);
@@ -831,7 +840,7 @@ read_client_first_message(scram_state *state, char *input)
if (*input == 'm')
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("client requires mandatory SCRAM extension")));
+ errmsg("client requires an unsupported SCRAM extension")));
/*
* Read username. Note: this is ignored. We use the username from the
@@ -960,7 +969,7 @@ build_server_first_message(scram_state *state)
int encoded_len;
if (!pg_backend_random(raw_nonce, SCRAM_RAW_NONCE_LEN))
- ereport(COMMERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("could not generate random nonce")));
@@ -1044,14 +1053,16 @@ read_client_final_message(scram_state *state, char *input)
if (pg_b64_decode(value, strlen(value), client_proof) != SCRAM_KEY_LEN)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (malformed proof in client-final-message"))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Malformed proof in client-final-message.")));
memcpy(state->ClientProof, client_proof, SCRAM_KEY_LEN);
pfree(client_proof);
if (*p != '\0')
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- (errmsg("malformed SCRAM message (garbage at end of client-final-message)"))));
+ errmsg("malformed SCRAM message"),
+ errdetail("Garbage found at the end of client-final-message.")));
state->client_final_message_without_proof = palloc(proof - begin + 1);
memcpy(state->client_final_message_without_proof, input, proof - begin);
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 5b68e3b7a1..081c06a1e6 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -656,7 +656,7 @@ recv_password_packet(Port *port)
* log.
*/
if (mtype != EOF)
- ereport(COMMERROR,
+ ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("expected password response, got message type %d",
mtype)));
@@ -684,7 +684,7 @@ recv_password_packet(Port *port)
* StringInfo is guaranteed to have an appended '\0'.
*/
if (strlen(buf.data) + 1 != buf.len)
- ereport(COMMERROR,
+ ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid password packet size")));
@@ -897,11 +897,10 @@ CheckSCRAMAuth(Port *port, char *shadow_pass, char **logdetail)
/* Only log error if client didn't disconnect. */
if (mtype != EOF)
{
- ereport(COMMERROR,
+ ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("expected SASL response, got message type %d",
mtype)));
- return STATUS_ERROR;
}
else
return STATUS_EOF;
@@ -935,11 +934,9 @@ CheckSCRAMAuth(Port *port, char *shadow_pass, char **logdetail)
selected_mech = pq_getmsgrawstring(&buf);
if (strcmp(selected_mech, SCRAM_SHA256_NAME) != 0)
{
- ereport(COMMERROR,
+ ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("client selected an invalid SASL authentication mechanism")));
- pfree(buf.data);
- return STATUS_ERROR;
}
inputlen = pq_getmsgint(&buf, 4);
@@ -1144,7 +1141,7 @@ pg_GSS_recvauth(Port *port)
{
/* Only log error if client didn't disconnect. */
if (mtype != EOF)
- ereport(COMMERROR,
+ ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("expected GSS response, got message type %d",
mtype)));
@@ -1384,7 +1381,7 @@ pg_SSPI_recvauth(Port *port)
{
/* Only log error if client didn't disconnect. */
if (mtype != EOF)
- ereport(COMMERROR,
+ ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("expected SSPI response, got message type %d",
mtype)));
diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c
index 96939327c3..8fbc03819d 100644
--- a/src/backend/libpq/pqmq.c
+++ b/src/backend/libpq/pqmq.c
@@ -172,9 +172,9 @@ mq_putmessage(char msgtype, const char *s, size_t len)
if (result != SHM_MQ_WOULD_BLOCK)
break;
- WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0,
+ WaitLatch(MyLatch, WL_LATCH_SET, 0,
WAIT_EVENT_MQ_PUT_MESSAGE);
- ResetLatch(&MyProc->procLatch);
+ ResetLatch(MyLatch);
CHECK_FOR_INTERRUPTS();
}
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index fc21909ea3..a1d056ff9f 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -2480,10 +2480,11 @@ _copyRangeTblEntry(const RangeTblEntry *from)
COPY_STRING_FIELD(ctename);
COPY_SCALAR_FIELD(ctelevelsup);
COPY_SCALAR_FIELD(self_reference);
- COPY_STRING_FIELD(enrname);
COPY_NODE_FIELD(coltypes);
COPY_NODE_FIELD(coltypmods);
COPY_NODE_FIELD(colcollations);
+ COPY_STRING_FIELD(enrname);
+ COPY_SCALAR_FIELD(enrtuples);
COPY_NODE_FIELD(alias);
COPY_NODE_FIELD(eref);
COPY_SCALAR_FIELD(lateral);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index c644aba4c1..14a8167b04 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -2664,6 +2664,8 @@ _equalRangeTblEntry(const RangeTblEntry *a, const RangeTblEntry *b)
COMPARE_NODE_FIELD(coltypes);
COMPARE_NODE_FIELD(coltypmods);
COMPARE_NODE_FIELD(colcollations);
+ COMPARE_STRING_FIELD(enrname);
+ COMPARE_SCALAR_FIELD(enrtuples);
COMPARE_NODE_FIELD(alias);
COMPARE_NODE_FIELD(eref);
COMPARE_SCALAR_FIELD(lateral);
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index eb3e1ce1c1..2496a9a43c 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -694,39 +694,11 @@ expression_returns_set_walker(Node *node, void *context)
/* else fall through to check args */
}
- /* Avoid recursion for some cases that can't return a set */
+ /* Avoid recursion for some cases that parser checks not to return a set */
if (IsA(node, Aggref))
return false;
if (IsA(node, WindowFunc))
return false;
- if (IsA(node, DistinctExpr))
- return false;
- if (IsA(node, NullIfExpr))
- return false;
- if (IsA(node, ScalarArrayOpExpr))
- return false;
- if (IsA(node, BoolExpr))
- return false;
- if (IsA(node, SubLink))
- return false;
- if (IsA(node, SubPlan))
- return false;
- if (IsA(node, AlternativeSubPlan))
- return false;
- if (IsA(node, ArrayExpr))
- return false;
- if (IsA(node, RowExpr))
- return false;
- if (IsA(node, RowCompareExpr))
- return false;
- if (IsA(node, CoalesceExpr))
- return false;
- if (IsA(node, MinMaxExpr))
- return false;
- if (IsA(node, SQLValueFunction))
- return false;
- if (IsA(node, XmlExpr))
- return false;
return expression_tree_walker(node, expression_returns_set_walker,
context);
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index be3413436a..b56b04a82f 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -4135,6 +4135,7 @@ _outRangeTblEntry(StringInfo str, const RangeTblEntry *node)
break;
case RTE_NAMEDTUPLESTORE:
WRITE_STRING_FIELD(enrname);
+ WRITE_FLOAT_FIELD(enrtuples, "%.0f");
WRITE_OID_FIELD(relid);
WRITE_NODE_FIELD(coltypes);
WRITE_NODE_FIELD(coltypmods);
@@ -4640,7 +4641,7 @@ _outPartitionElem(StringInfo str, const PartitionElem *node)
static void
_outPartitionSpec(StringInfo str, const PartitionSpec *node)
{
- WRITE_NODE_TYPE("PARTITIONBY");
+ WRITE_NODE_TYPE("PARTITIONSPEC");
WRITE_STRING_FIELD(strategy);
WRITE_NODE_FIELD(partParams);
@@ -4650,23 +4651,23 @@ _outPartitionSpec(StringInfo str, const PartitionSpec *node)
static void
_outPartitionBoundSpec(StringInfo str, const PartitionBoundSpec *node)
{
- WRITE_NODE_TYPE("PARTITIONBOUND");
+ WRITE_NODE_TYPE("PARTITIONBOUNDSPEC");
WRITE_CHAR_FIELD(strategy);
WRITE_NODE_FIELD(listdatums);
WRITE_NODE_FIELD(lowerdatums);
WRITE_NODE_FIELD(upperdatums);
- /* XXX somebody forgot location field; too late to change for v10 */
+ WRITE_LOCATION_FIELD(location);
}
static void
_outPartitionRangeDatum(StringInfo str, const PartitionRangeDatum *node)
{
- WRITE_NODE_TYPE("PARTRANGEDATUM");
+ WRITE_NODE_TYPE("PARTITIONRANGEDATUM");
WRITE_BOOL_FIELD(infinite);
WRITE_NODE_FIELD(value);
- /* XXX somebody forgot location field; too late to change for v10 */
+ WRITE_LOCATION_FIELD(location);
}
/*
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 5147eaa4d3..935bb196f7 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -1991,6 +1991,7 @@ _readRangeTblEntry(void)
break;
case RTE_NAMEDTUPLESTORE:
READ_STRING_FIELD(enrname);
+ READ_FLOAT_FIELD(enrtuples);
READ_OID_FIELD(relid);
READ_NODE_FIELD(coltypes);
READ_NODE_FIELD(coltypmods);
@@ -3761,8 +3762,7 @@ _readPartitionBoundSpec(void)
READ_NODE_FIELD(listdatums);
READ_NODE_FIELD(lowerdatums);
READ_NODE_FIELD(upperdatums);
- /* XXX somebody forgot location field; too late to change for v10 */
- local_node->location = -1;
+ READ_LOCATION_FIELD(location);
READ_DONE();
}
@@ -3777,8 +3777,7 @@ _readPartitionRangeDatum(void)
READ_BOOL_FIELD(infinite);
READ_NODE_FIELD(value);
- /* XXX somebody forgot location field; too late to change for v10 */
- local_node->location = -1;
+ READ_LOCATION_FIELD(location);
READ_DONE();
}
@@ -4029,9 +4028,9 @@ parseNodeString(void)
return_value = _readRemoteStmt();
else if (MATCH("SIMPLESORT", 10))
return_value = _readSimpleSort();
- else if (MATCH("PARTITIONBOUND", 14))
+ else if (MATCH("PARTITIONBOUNDSPEC", 18))
return_value = _readPartitionBoundSpec();
- else if (MATCH("PARTRANGEDATUM", 14))
+ else if (MATCH("PARTITIONRANGEDATUM", 19))
return_value = _readPartitionRangeDatum();
else
{
diff --git a/src/backend/optimizer/geqo/geqo_cx.c b/src/backend/optimizer/geqo/geqo_cx.c
index 9f6d5e478a..c72081e81a 100644
--- a/src/backend/optimizer/geqo/geqo_cx.c
+++ b/src/backend/optimizer/geqo/geqo_cx.c
@@ -38,6 +38,7 @@
#include "optimizer/geqo_recombination.h"
#include "optimizer/geqo_random.h"
+#if defined(CX)
/* cx
*
@@ -119,3 +120,5 @@ cx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
return num_diffs;
}
+
+#endif /* defined(CX) */
diff --git a/src/backend/optimizer/geqo/geqo_erx.c b/src/backend/optimizer/geqo/geqo_erx.c
index 133fe32348..173be44409 100644
--- a/src/backend/optimizer/geqo/geqo_erx.c
+++ b/src/backend/optimizer/geqo/geqo_erx.c
@@ -35,6 +35,7 @@
#include "optimizer/geqo_recombination.h"
#include "optimizer/geqo_random.h"
+#if defined(ERX)
static int gimme_edge(PlannerInfo *root, Gene gene1, Gene gene2, Edge *edge_table);
static void remove_gene(PlannerInfo *root, Gene gene, Edge edge, Edge *edge_table);
@@ -466,3 +467,5 @@ edge_failure(PlannerInfo *root, Gene *gene, int index, Edge *edge_table, int num
elog(ERROR, "no edge found");
return 0; /* to keep the compiler quiet */
}
+
+#endif /* defined(ERX) */
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
index 52bd428187..86213ac5a0 100644
--- a/src/backend/optimizer/geqo/geqo_main.c
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -46,14 +46,14 @@ double Geqo_seed;
static int gimme_pool_size(int nr_rel);
static int gimme_number_generations(int pool_size);
-/* define edge recombination crossover [ERX] per default */
+/* complain if no recombination mechanism is #define'd */
#if !defined(ERX) && \
!defined(PMX) && \
!defined(CX) && \
!defined(PX) && \
!defined(OX1) && \
!defined(OX2)
-#define ERX
+#error "must choose one GEQO recombination mechanism in geqo.h"
#endif
diff --git a/src/backend/optimizer/geqo/geqo_mutation.c b/src/backend/optimizer/geqo/geqo_mutation.c
index 1a06d49775..c6af00a2a7 100644
--- a/src/backend/optimizer/geqo/geqo_mutation.c
+++ b/src/backend/optimizer/geqo/geqo_mutation.c
@@ -35,6 +35,8 @@
#include "optimizer/geqo_mutation.h"
#include "optimizer/geqo_random.h"
+#if defined(CX) /* currently used only in CX mode */
+
void
geqo_mutation(PlannerInfo *root, Gene *tour, int num_gene)
{
@@ -60,3 +62,5 @@ geqo_mutation(PlannerInfo *root, Gene *tour, int num_gene)
num_swaps -= 1;
}
}
+
+#endif /* defined(CX) */
diff --git a/src/backend/optimizer/geqo/geqo_ox1.c b/src/backend/optimizer/geqo/geqo_ox1.c
index fbf15282ad..891cfa2403 100644
--- a/src/backend/optimizer/geqo/geqo_ox1.c
+++ b/src/backend/optimizer/geqo/geqo_ox1.c
@@ -37,6 +37,7 @@
#include "optimizer/geqo_random.h"
#include "optimizer/geqo_recombination.h"
+#if defined(OX1)
/* ox1
*
@@ -90,3 +91,5 @@ ox1(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
}
}
+
+#endif /* defined(OX1) */
diff --git a/src/backend/optimizer/geqo/geqo_ox2.c b/src/backend/optimizer/geqo/geqo_ox2.c
index 01c55bea41..b43455d3eb 100644
--- a/src/backend/optimizer/geqo/geqo_ox2.c
+++ b/src/backend/optimizer/geqo/geqo_ox2.c
@@ -37,6 +37,7 @@
#include "optimizer/geqo_random.h"
#include "optimizer/geqo_recombination.h"
+#if defined(OX2)
/* ox2
*
@@ -107,3 +108,5 @@ ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
}
}
+
+#endif /* defined(OX2) */
diff --git a/src/backend/optimizer/geqo/geqo_pmx.c b/src/backend/optimizer/geqo/geqo_pmx.c
index deb0f7b353..e9485cc8b5 100644
--- a/src/backend/optimizer/geqo/geqo_pmx.c
+++ b/src/backend/optimizer/geqo/geqo_pmx.c
@@ -37,6 +37,7 @@
#include "optimizer/geqo_random.h"
#include "optimizer/geqo_recombination.h"
+#if defined(PMX)
/* pmx
*
@@ -219,3 +220,5 @@ pmx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene)
pfree(indx);
pfree(check_list);
}
+
+#endif /* defined(PMX) */
diff --git a/src/backend/optimizer/geqo/geqo_px.c b/src/backend/optimizer/geqo/geqo_px.c
index 99289bc11f..f7f615462c 100644
--- a/src/backend/optimizer/geqo/geqo_px.c
+++ b/src/backend/optimizer/geqo/geqo_px.c
@@ -37,6 +37,7 @@
#include "optimizer/geqo_random.h"
#include "optimizer/geqo_recombination.h"
+#if defined(PX)
/* px
*
@@ -105,3 +106,5 @@ px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
}
}
+
+#endif /* defined(PX) */
diff --git a/src/backend/optimizer/geqo/geqo_recombination.c b/src/backend/optimizer/geqo/geqo_recombination.c
index ef433e54e5..a61547c16d 100644
--- a/src/backend/optimizer/geqo/geqo_recombination.c
+++ b/src/backend/optimizer/geqo/geqo_recombination.c
@@ -58,6 +58,9 @@ init_tour(PlannerInfo *root, Gene *tour, int num_gene)
}
}
+/* city table is used in these recombination methods: */
+#if defined(CX) || defined(PX) || defined(OX1) || defined(OX2)
+
/* alloc_city_table
*
* allocate memory for city table
@@ -85,3 +88,5 @@ free_city_table(PlannerInfo *root, City *city_table)
{
pfree(city_table);
}
+
+#endif /* CX || PX || OX1 || OX2 */
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 6e4808d51b..dfb1c973c5 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -2220,6 +2220,7 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
Cost inner_run_cost = workspace->inner_run_cost;
Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
double outer_matched_rows;
+ double outer_unmatched_rows;
Selectivity inner_scan_frac;
/*
@@ -2232,6 +2233,7 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
* least 1, no such clamp is needed now.)
*/
outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
+ outer_unmatched_rows = outer_path_rows - outer_matched_rows;
inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
/*
@@ -2275,7 +2277,7 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
* of a nonempty scan. We consider that these are all rescans,
* since we used inner_run_cost once already.
*/
- run_cost += (outer_path_rows - outer_matched_rows) *
+ run_cost += outer_unmatched_rows *
inner_rescan_run_cost / inner_path_rows;
/*
@@ -2293,20 +2295,28 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
* difficult to estimate whether that will happen (and it could
* not happen if there are any unmatched outer rows!), so be
* conservative and always charge the whole first-scan cost once.
+ * We consider this charge to correspond to the first unmatched
+ * outer row, unless there isn't one in our estimate, in which
+ * case blame it on the first matched row.
*/
+
+ /* First, count all unmatched join tuples as being processed */
+ ntuples += outer_unmatched_rows * inner_path_rows;
+
+ /* Now add the forced full scan, and decrement appropriate count */
run_cost += inner_run_cost;
+ if (outer_unmatched_rows >= 1)
+ outer_unmatched_rows -= 1;
+ else
+ outer_matched_rows -= 1;
/* Add inner run cost for additional outer tuples having matches */
- if (outer_matched_rows > 1)
- run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
-
- /* Add inner run cost for unmatched outer tuples */
- run_cost += (outer_path_rows - outer_matched_rows) *
- inner_rescan_run_cost;
+ if (outer_matched_rows > 0)
+ run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
- /* And count the unmatched join tuples as being processed */
- ntuples += (outer_path_rows - outer_matched_rows) *
- inner_path_rows;
+ /* Add inner run cost for additional unmatched outer tuples */
+ if (outer_unmatched_rows > 0)
+ run_cost += outer_unmatched_rows * inner_rescan_run_cost;
}
}
else
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 607a8f97bf..07ab33902b 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -1210,10 +1210,10 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
all_clauses = list_concat(list_copy(clauses),
other_clauses);
- if (!predicate_implied_by(index->indpred, all_clauses))
+ if (!predicate_implied_by(index->indpred, all_clauses, false))
continue; /* can't use it at all */
- if (!predicate_implied_by(index->indpred, other_clauses))
+ if (!predicate_implied_by(index->indpred, other_clauses, false))
useful_predicate = true;
}
}
@@ -1519,7 +1519,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
{
Node *np = (Node *) lfirst(l);
- if (predicate_implied_by(list_make1(np), qualsofar))
+ if (predicate_implied_by(list_make1(np), qualsofar, false))
{
redundant = true;
break; /* out of inner foreach loop */
@@ -2871,7 +2871,8 @@ check_index_predicates(PlannerInfo *root, RelOptInfo *rel)
continue; /* ignore non-partial indexes here */
if (!index->predOK) /* don't repeat work if already proven OK */
- index->predOK = predicate_implied_by(index->indpred, clauselist);
+ index->predOK = predicate_implied_by(index->indpred, clauselist,
+ false);
/* If rel is an update target, leave indrestrictinfo as set above */
if (is_target_rel)
@@ -2886,7 +2887,7 @@ check_index_predicates(PlannerInfo *root, RelOptInfo *rel)
/* predicate_implied_by() assumes first arg is immutable */
if (contain_mutable_functions((Node *) rinfo->clause) ||
!predicate_implied_by(list_make1(rinfo->clause),
- index->indpred))
+ index->indpred, false))
index->indrestrictinfo = lappend(index->indrestrictinfo, rinfo);
}
}
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index af89e9d288..5c833e933d 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -2860,7 +2860,7 @@ create_indexscan_plan(PlannerInfo *root,
if (is_redundant_derived_clause(rinfo, indexquals))
continue; /* derived from same EquivalenceClass */
if (!contain_mutable_functions((Node *) rinfo->clause) &&
- predicate_implied_by(list_make1(rinfo->clause), indexquals))
+ predicate_implied_by(list_make1(rinfo->clause), indexquals, false))
continue; /* provably implied by indexquals */
qpqual = lappend(qpqual, rinfo);
}
@@ -3021,7 +3021,7 @@ create_bitmap_scan_plan(PlannerInfo *root,
if (rinfo->parent_ec && list_member_ptr(indexECs, rinfo->parent_ec))
continue; /* derived from same EquivalenceClass */
if (!contain_mutable_functions(clause) &&
- predicate_implied_by(list_make1(clause), indexquals))
+ predicate_implied_by(list_make1(clause), indexquals, false))
continue; /* provably implied by indexquals */
qpqual = lappend(qpqual, rinfo);
}
@@ -3252,7 +3252,8 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
* the conditions that got pushed into the bitmapqual. Avoid
* generating redundant conditions.
*/
- if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
+ if (!predicate_implied_by(list_make1(pred), ipath->indexclauses,
+ false))
{
*qual = lappend(*qual, pred);
*indexqual = lappend(*indexqual, pred);
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index aa8f6cf020..dec1589ec5 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -789,7 +789,7 @@ infer_arbiter_indexes(PlannerInfo *root)
*/
predExprs = RelationGetIndexPredicate(idxRel);
- if (!predicate_implied_by(predExprs, (List *) onconflict->arbiterWhere))
+ if (!predicate_implied_by(predExprs, (List *) onconflict->arbiterWhere, false))
goto next;
results = lappend_oid(results, idxForm->indexrelid);
@@ -1424,7 +1424,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
safe_restrictions = lappend(safe_restrictions, rinfo->clause);
}
- if (predicate_refuted_by(safe_restrictions, safe_restrictions))
+ if (predicate_refuted_by(safe_restrictions, safe_restrictions, false))
return true;
/* Only plain relations have constraints */
@@ -1463,7 +1463,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
* have volatile and nonvolatile subclauses, and it's OK to make
* deductions with the nonvolatile parts.
*/
- if (predicate_refuted_by(safe_constraints, rel->baserestrictinfo))
+ if (predicate_refuted_by(safe_constraints, rel->baserestrictinfo, false))
return true;
return false;
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index c4a04cfa95..06fce8458c 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -77,8 +77,10 @@ typedef struct PredIterInfoData
} while (0)
-static bool predicate_implied_by_recurse(Node *clause, Node *predicate);
-static bool predicate_refuted_by_recurse(Node *clause, Node *predicate);
+static bool predicate_implied_by_recurse(Node *clause, Node *predicate,
+ bool clause_is_check);
+static bool predicate_refuted_by_recurse(Node *clause, Node *predicate,
+ bool clause_is_check);
static PredClass predicate_classify(Node *clause, PredIterInfo info);
static void list_startup_fn(Node *clause, PredIterInfo info);
static Node *list_next_fn(PredIterInfo info);
@@ -90,8 +92,10 @@ static void arrayconst_cleanup_fn(PredIterInfo info);
static void arrayexpr_startup_fn(Node *clause, PredIterInfo info);
static Node *arrayexpr_next_fn(PredIterInfo info);
static void arrayexpr_cleanup_fn(PredIterInfo info);
-static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause);
-static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause);
+static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause,
+ bool clause_is_check);
+static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause,
+ bool clause_is_check);
static Node *extract_not_arg(Node *clause);
static Node *extract_strong_not_arg(Node *clause);
static bool list_member_strip(List *list, Expr *datum);
@@ -107,8 +111,11 @@ static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashv
/*
* predicate_implied_by
- * Recursively checks whether the clauses in restrictinfo_list imply
- * that the given predicate is true.
+ * Recursively checks whether the clauses in clause_list imply that the
+ * given predicate is true. If clause_is_check is true, assume that the
+ * clauses in clause_list are CHECK constraints (where null is
+ * effectively true) rather than WHERE clauses (where null is effectively
+ * false).
*
* The top-level List structure of each list corresponds to an AND list.
* We assume that eval_const_expressions() has been applied and so there
@@ -125,14 +132,15 @@ static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashv
* the plan and the time we execute the plan.
*/
bool
-predicate_implied_by(List *predicate_list, List *restrictinfo_list)
+predicate_implied_by(List *predicate_list, List *clause_list,
+ bool clause_is_check)
{
Node *p,
*r;
if (predicate_list == NIL)
return true; /* no predicate: implication is vacuous */
- if (restrictinfo_list == NIL)
+ if (clause_list == NIL)
return false; /* no restriction: implication must fail */
/*
@@ -145,19 +153,22 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
p = (Node *) linitial(predicate_list);
else
p = (Node *) predicate_list;
- if (list_length(restrictinfo_list) == 1)
- r = (Node *) linitial(restrictinfo_list);
+ if (list_length(clause_list) == 1)
+ r = (Node *) linitial(clause_list);
else
- r = (Node *) restrictinfo_list;
+ r = (Node *) clause_list;
/* And away we go ... */
- return predicate_implied_by_recurse(r, p);
+ return predicate_implied_by_recurse(r, p, clause_is_check);
}
/*
* predicate_refuted_by
- * Recursively checks whether the clauses in restrictinfo_list refute
- * the given predicate (that is, prove it false).
+ * Recursively checks whether the clauses in clause_list refute the given
+ * predicate (that is, prove it false). If clause_is_check is true, assume
+ * that the clauses in clause_list are CHECK constraints (where null is
+ * effectively true) rather than WHERE clauses (where null is effectively
+ * false).
*
* This is NOT the same as !(predicate_implied_by), though it is similar
* in the technique and structure of the code.
@@ -183,14 +194,15 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
* time we make the plan and the time we execute the plan.
*/
bool
-predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
+predicate_refuted_by(List *predicate_list, List *clause_list,
+ bool clause_is_check)
{
Node *p,
*r;
if (predicate_list == NIL)
return false; /* no predicate: no refutation is possible */
- if (restrictinfo_list == NIL)
+ if (clause_list == NIL)
return false; /* no restriction: refutation must fail */
/*
@@ -203,13 +215,13 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
p = (Node *) linitial(predicate_list);
else
p = (Node *) predicate_list;
- if (list_length(restrictinfo_list) == 1)
- r = (Node *) linitial(restrictinfo_list);
+ if (list_length(clause_list) == 1)
+ r = (Node *) linitial(clause_list);
else
- r = (Node *) restrictinfo_list;
+ r = (Node *) clause_list;
/* And away we go ... */
- return predicate_refuted_by_recurse(r, p);
+ return predicate_refuted_by_recurse(r, p, clause_is_check);
}
/*----------
@@ -248,7 +260,8 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
*----------
*/
static bool
-predicate_implied_by_recurse(Node *clause, Node *predicate)
+predicate_implied_by_recurse(Node *clause, Node *predicate,
+ bool clause_is_check)
{
PredIterInfoData clause_info;
PredIterInfoData pred_info;
@@ -275,7 +288,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(pitem, predicate, pred_info)
{
- if (!predicate_implied_by_recurse(clause, pitem))
+ if (!predicate_implied_by_recurse(clause, pitem,
+ clause_is_check))
{
result = false;
break;
@@ -294,7 +308,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
result = false;
iterate_begin(pitem, predicate, pred_info)
{
- if (predicate_implied_by_recurse(clause, pitem))
+ if (predicate_implied_by_recurse(clause, pitem,
+ clause_is_check))
{
result = true;
break;
@@ -311,7 +326,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
*/
iterate_begin(citem, clause, clause_info)
{
- if (predicate_implied_by_recurse(citem, predicate))
+ if (predicate_implied_by_recurse(citem, predicate,
+ clause_is_check))
{
result = true;
break;
@@ -328,7 +344,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
result = false;
iterate_begin(citem, clause, clause_info)
{
- if (predicate_implied_by_recurse(citem, predicate))
+ if (predicate_implied_by_recurse(citem, predicate,
+ clause_is_check))
{
result = true;
break;
@@ -355,7 +372,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
iterate_begin(pitem, predicate, pred_info)
{
- if (predicate_implied_by_recurse(citem, pitem))
+ if (predicate_implied_by_recurse(citem, pitem,
+ clause_is_check))
{
presult = true;
break;
@@ -382,7 +400,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(citem, clause, clause_info)
{
- if (!predicate_implied_by_recurse(citem, predicate))
+ if (!predicate_implied_by_recurse(citem, predicate,
+ clause_is_check))
{
result = false;
break;
@@ -404,7 +423,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(pitem, predicate, pred_info)
{
- if (!predicate_implied_by_recurse(clause, pitem))
+ if (!predicate_implied_by_recurse(clause, pitem,
+ clause_is_check))
{
result = false;
break;
@@ -421,7 +441,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
result = false;
iterate_begin(pitem, predicate, pred_info)
{
- if (predicate_implied_by_recurse(clause, pitem))
+ if (predicate_implied_by_recurse(clause, pitem,
+ clause_is_check))
{
result = true;
break;
@@ -437,7 +458,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
*/
return
predicate_implied_by_simple_clause((Expr *) predicate,
- clause);
+ clause,
+ clause_is_check);
}
break;
}
@@ -478,7 +500,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
*----------
*/
static bool
-predicate_refuted_by_recurse(Node *clause, Node *predicate)
+predicate_refuted_by_recurse(Node *clause, Node *predicate,
+ bool clause_is_check)
{
PredIterInfoData clause_info;
PredIterInfoData pred_info;
@@ -508,7 +531,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
result = false;
iterate_begin(pitem, predicate, pred_info)
{
- if (predicate_refuted_by_recurse(clause, pitem))
+ if (predicate_refuted_by_recurse(clause, pitem,
+ clause_is_check))
{
result = true;
break;
@@ -525,7 +549,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
*/
iterate_begin(citem, clause, clause_info)
{
- if (predicate_refuted_by_recurse(citem, predicate))
+ if (predicate_refuted_by_recurse(citem, predicate,
+ clause_is_check))
{
result = true;
break;
@@ -542,7 +567,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(pitem, predicate, pred_info)
{
- if (!predicate_refuted_by_recurse(clause, pitem))
+ if (!predicate_refuted_by_recurse(clause, pitem,
+ clause_is_check))
{
result = false;
break;
@@ -558,7 +584,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
*/
not_arg = extract_not_arg(predicate);
if (not_arg &&
- predicate_implied_by_recurse(clause, not_arg))
+ predicate_implied_by_recurse(clause, not_arg,
+ clause_is_check))
return true;
/*
@@ -567,7 +594,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
result = false;
iterate_begin(citem, clause, clause_info)
{
- if (predicate_refuted_by_recurse(citem, predicate))
+ if (predicate_refuted_by_recurse(citem, predicate,
+ clause_is_check))
{
result = true;
break;
@@ -589,7 +617,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(pitem, predicate, pred_info)
{
- if (!predicate_refuted_by_recurse(clause, pitem))
+ if (!predicate_refuted_by_recurse(clause, pitem,
+ clause_is_check))
{
result = false;
break;
@@ -611,7 +640,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
iterate_begin(pitem, predicate, pred_info)
{
- if (predicate_refuted_by_recurse(citem, pitem))
+ if (predicate_refuted_by_recurse(citem, pitem,
+ clause_is_check))
{
presult = true;
break;
@@ -634,7 +664,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
*/
not_arg = extract_not_arg(predicate);
if (not_arg &&
- predicate_implied_by_recurse(clause, not_arg))
+ predicate_implied_by_recurse(clause, not_arg,
+ clause_is_check))
return true;
/*
@@ -643,7 +674,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(citem, clause, clause_info)
{
- if (!predicate_refuted_by_recurse(citem, predicate))
+ if (!predicate_refuted_by_recurse(citem, predicate,
+ clause_is_check))
{
result = false;
break;
@@ -679,7 +711,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
result = false;
iterate_begin(pitem, predicate, pred_info)
{
- if (predicate_refuted_by_recurse(clause, pitem))
+ if (predicate_refuted_by_recurse(clause, pitem,
+ clause_is_check))
{
result = true;
break;
@@ -696,7 +729,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(pitem, predicate, pred_info)
{
- if (!predicate_refuted_by_recurse(clause, pitem))
+ if (!predicate_refuted_by_recurse(clause, pitem,
+ clause_is_check))
{
result = false;
break;
@@ -712,7 +746,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
*/
not_arg = extract_not_arg(predicate);
if (not_arg &&
- predicate_implied_by_recurse(clause, not_arg))
+ predicate_implied_by_recurse(clause, not_arg,
+ clause_is_check))
return true;
/*
@@ -720,7 +755,8 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
*/
return
predicate_refuted_by_simple_clause((Expr *) predicate,
- clause);
+ clause,
+ clause_is_check);
}
break;
}
@@ -1022,14 +1058,15 @@ arrayexpr_cleanup_fn(PredIterInfo info)
* functions in the expression are immutable, ie dependent only on their input
* arguments --- but this was checked for the predicate by the caller.)
*
- * When the predicate is of the form "foo IS NOT NULL", we can conclude that
- * the predicate is implied if the clause is a strict operator or function
- * that has "foo" as an input. In this case the clause must yield NULL when
- * "foo" is NULL, which we can take as equivalent to FALSE because we know
- * we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is
- * already known immutable, so the clause will certainly always fail.)
- * Also, if the clause is just "foo" (meaning it's a boolean variable),
- * the predicate is implied since the clause can't be true if "foo" is NULL.
+ * When clause_is_check is false, we know we are within an AND/OR
+ * subtree of a WHERE clause. So, if the predicate is of the form "foo IS
+ * NOT NULL", we can conclude that the predicate is implied if the clause is
+ * a strict operator or function that has "foo" as an input. In this case
+ * the clause must yield NULL when "foo" is NULL, which we can take as
+ * equivalent to FALSE given the context. (Again, "foo" is already known
+ * immutable, so the clause will certainly always fail.) Also, if the clause
+ * is just "foo" (meaning it's a boolean variable), the predicate is implied
+ * since the clause can't be true if "foo" is NULL.
*
* Finally, if both clauses are binary operator expressions, we may be able
* to prove something using the system's knowledge about operators; those
@@ -1037,7 +1074,8 @@ arrayexpr_cleanup_fn(PredIterInfo info)
*----------
*/
static bool
-predicate_implied_by_simple_clause(Expr *predicate, Node *clause)
+predicate_implied_by_simple_clause(Expr *predicate, Node *clause,
+ bool clause_is_check)
{
/* Allow interrupting long proof attempts */
CHECK_FOR_INTERRUPTS();
@@ -1053,7 +1091,7 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause)
Expr *nonnullarg = ((NullTest *) predicate)->arg;
/* row IS NOT NULL does not act in the simple way we have in mind */
- if (!((NullTest *) predicate)->argisrow)
+ if (!((NullTest *) predicate)->argisrow && !clause_is_check)
{
if (is_opclause(clause) &&
list_member_strip(((OpExpr *) clause)->args, nonnullarg) &&
@@ -1098,7 +1136,8 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause)
*----------
*/
static bool
-predicate_refuted_by_simple_clause(Expr *predicate, Node *clause)
+predicate_refuted_by_simple_clause(Expr *predicate, Node *clause,
+ bool clause_is_check)
{
/* Allow interrupting long proof attempts */
CHECK_FOR_INTERRUPTS();
@@ -1114,6 +1153,9 @@ predicate_refuted_by_simple_clause(Expr *predicate, Node *clause)
{
Expr *isnullarg = ((NullTest *) predicate)->arg;
+ if (clause_is_check)
+ return false;
+
/* row IS NULL does not act in the simple way we have in mind */
if (((NullTest *) predicate)->argisrow)
return false;
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 739ff10b07..ffa1ba6605 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -9563,24 +9563,14 @@ AlterSubscriptionStmt:
n->options = $6;
$$ = (Node *)n;
}
- | ALTER SUBSCRIPTION name SET PUBLICATION publication_name_list REFRESH opt_definition
- {
- AlterSubscriptionStmt *n =
- makeNode(AlterSubscriptionStmt);
- n->kind = ALTER_SUBSCRIPTION_PUBLICATION_REFRESH;
- n->subname = $3;
- n->publication = $6;
- n->options = $8;
- $$ = (Node *)n;
- }
- | ALTER SUBSCRIPTION name SET PUBLICATION publication_name_list SKIP REFRESH
+ | ALTER SUBSCRIPTION name SET PUBLICATION publication_name_list opt_definition
{
AlterSubscriptionStmt *n =
makeNode(AlterSubscriptionStmt);
n->kind = ALTER_SUBSCRIPTION_PUBLICATION;
n->subname = $3;
n->publication = $6;
- n->options = NIL;
+ n->options = $7;
$$ = (Node *)n;
}
| ALTER SUBSCRIPTION name ENABLE_P
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 9fc0371cb3..a95e349562 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -712,6 +712,14 @@ check_agg_arguments_walker(Node *node,
}
/* Continue and descend into subtree */
}
+ /* We can throw error on sight for a set-returning function */
+ if ((IsA(node, FuncExpr) &&((FuncExpr *) node)->funcretset) ||
+ (IsA(node, OpExpr) &&((OpExpr *) node)->opretset))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("aggregate function calls cannot contain set-returning function calls"),
+ errhint("You might be able to move the set-returning function into a LATERAL FROM item."),
+ parser_errposition(context->pstate, exprLocation(node))));
/* We can throw error on sight for a window function */
if (IsA(node, WindowFunc))
ereport(ERROR,
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 27dd49d301..3d5b20836f 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -572,6 +572,8 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
List *pair = (List *) lfirst(lc);
Node *fexpr;
List *coldeflist;
+ Node *newfexpr;
+ Node *last_srf;
/* Disassemble the function-call/column-def-list pairs */
Assert(list_length(pair) == 2);
@@ -618,13 +620,25 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
Node *arg = (Node *) lfirst(lc);
FuncCall *newfc;
+ last_srf = pstate->p_last_srf;
+
newfc = makeFuncCall(SystemFuncName("unnest"),
list_make1(arg),
fc->location);
- funcexprs = lappend(funcexprs,
- transformExpr(pstate, (Node *) newfc,
- EXPR_KIND_FROM_FUNCTION));
+ newfexpr = transformExpr(pstate, (Node *) newfc,
+ EXPR_KIND_FROM_FUNCTION);
+
+ /* nodeFunctionscan.c requires SRFs to be at top level */
+ if (pstate->p_last_srf != last_srf &&
+ pstate->p_last_srf != newfexpr)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-returning functions must appear at top level of FROM"),
+ parser_errposition(pstate,
+ exprLocation(pstate->p_last_srf))));
+
+ funcexprs = lappend(funcexprs, newfexpr);
funcnames = lappend(funcnames,
FigureColname((Node *) newfc));
@@ -638,9 +652,21 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
}
/* normal case ... */
- funcexprs = lappend(funcexprs,
- transformExpr(pstate, fexpr,
- EXPR_KIND_FROM_FUNCTION));
+ last_srf = pstate->p_last_srf;
+
+ newfexpr = transformExpr(pstate, fexpr,
+ EXPR_KIND_FROM_FUNCTION);
+
+ /* nodeFunctionscan.c requires SRFs to be at top level */
+ if (pstate->p_last_srf != last_srf &&
+ pstate->p_last_srf != newfexpr)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-returning functions must appear at top level of FROM"),
+ parser_errposition(pstate,
+ exprLocation(pstate->p_last_srf))));
+
+ funcexprs = lappend(funcexprs, newfexpr);
funcnames = lappend(funcnames,
FigureColname(fexpr));
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 8e2ae0e11c..958176c0ac 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -118,8 +118,7 @@ static Node *transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr *cexpr);
static Node *transformColumnRef(ParseState *pstate, ColumnRef *cref);
static Node *transformWholeRowRef(ParseState *pstate, RangeTblEntry *rte,
int location);
-static Node *transformIndirection(ParseState *pstate, Node *basenode,
- List *indirection);
+static Node *transformIndirection(ParseState *pstate, A_Indirection *ind);
static Node *transformTypeCast(ParseState *pstate, TypeCast *tc);
static Node *transformCollateClause(ParseState *pstate, CollateClause *c);
static Node *make_row_comparison_op(ParseState *pstate, List *opname,
@@ -192,14 +191,8 @@ transformExprRecurse(ParseState *pstate, Node *expr)
}
case T_A_Indirection:
- {
- A_Indirection *ind = (A_Indirection *) expr;
-
- result = transformExprRecurse(pstate, ind->arg);
- result = transformIndirection(pstate, result,
- ind->indirection);
- break;
- }
+ result = transformIndirection(pstate, (A_Indirection *) expr);
+ break;
case T_A_ArrayExpr:
result = transformArrayExpr(pstate, (A_ArrayExpr *) expr,
@@ -439,11 +432,12 @@ unknown_attribute(ParseState *pstate, Node *relref, char *attname,
}
static Node *
-transformIndirection(ParseState *pstate, Node *basenode, List *indirection)
+transformIndirection(ParseState *pstate, A_Indirection *ind)
{
- Node *result = basenode;
+ Node *last_srf = pstate->p_last_srf;
+ Node *result = transformExprRecurse(pstate, ind->arg);
List *subscripts = NIL;
- int location = exprLocation(basenode);
+ int location = exprLocation(result);
ListCell *i;
/*
@@ -451,7 +445,7 @@ transformIndirection(ParseState *pstate, Node *basenode, List *indirection)
* subscripting. Adjacent A_Indices nodes have to be treated as a single
* multidimensional subscript operation.
*/
- foreach(i, indirection)
+ foreach(i, ind->indirection)
{
Node *n = lfirst(i);
@@ -484,6 +478,7 @@ transformIndirection(ParseState *pstate, Node *basenode, List *indirection)
newresult = ParseFuncOrColumn(pstate,
list_make1(n),
list_make1(result),
+ last_srf,
NULL,
location);
if (newresult == NULL)
@@ -632,6 +627,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
node = ParseFuncOrColumn(pstate,
list_make1(makeString(colname)),
list_make1(node),
+ pstate->p_last_srf,
NULL,
cref->location);
}
@@ -678,6 +674,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
node = ParseFuncOrColumn(pstate,
list_make1(makeString(colname)),
list_make1(node),
+ pstate->p_last_srf,
NULL,
cref->location);
}
@@ -737,6 +734,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
node = ParseFuncOrColumn(pstate,
list_make1(makeString(colname)),
list_make1(node),
+ pstate->p_last_srf,
NULL,
cref->location);
}
@@ -927,6 +925,8 @@ transformAExprOp(ParseState *pstate, A_Expr *a)
else
{
/* Ordinary scalar operator */
+ Node *last_srf = pstate->p_last_srf;
+
lexpr = transformExprRecurse(pstate, lexpr);
rexpr = transformExprRecurse(pstate, rexpr);
@@ -934,6 +934,7 @@ transformAExprOp(ParseState *pstate, A_Expr *a)
a->name,
lexpr,
rexpr,
+ last_srf,
a->location);
}
@@ -1053,6 +1054,7 @@ transformAExprNullIf(ParseState *pstate, A_Expr *a)
a->name,
lexpr,
rexpr,
+ pstate->p_last_srf,
a->location);
/*
@@ -1063,6 +1065,12 @@ transformAExprNullIf(ParseState *pstate, A_Expr *a)
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("NULLIF requires = operator to yield boolean"),
parser_errposition(pstate, a->location)));
+ if (result->opretset)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ /* translator: %s is name of a SQL construct, eg NULLIF */
+ errmsg("%s must not return a set", "NULLIF"),
+ parser_errposition(pstate, a->location)));
/*
* ... but the NullIfExpr will yield the first operand's type.
@@ -1266,6 +1274,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
a->name,
copyObject(lexpr),
rexpr,
+ pstate->p_last_srf,
a->location);
}
@@ -1430,6 +1439,7 @@ transformBoolExpr(ParseState *pstate, BoolExpr *a)
static Node *
transformFuncCall(ParseState *pstate, FuncCall *fn)
{
+ Node *last_srf = pstate->p_last_srf;
List *targs;
ListCell *args;
@@ -1465,6 +1475,7 @@ transformFuncCall(ParseState *pstate, FuncCall *fn)
return ParseFuncOrColumn(pstate,
fn->funcname,
targs,
+ last_srf,
fn,
fn->location);
}
@@ -1620,7 +1631,8 @@ transformMultiAssignRef(ParseState *pstate, MultiAssignRef *maref)
static Node *
transformCaseExpr(ParseState *pstate, CaseExpr *c)
{
- CaseExpr *newc;
+ CaseExpr *newc = makeNode(CaseExpr);
+ Node *last_srf = pstate->p_last_srf;
Node *arg;
CaseTestExpr *placeholder;
List *newargs;
@@ -1629,8 +1641,6 @@ transformCaseExpr(ParseState *pstate, CaseExpr *c)
Node *defresult;
Oid ptype;
- newc = makeNode(CaseExpr);
-
/* transform the test expression, if any */
arg = transformExprRecurse(pstate, (Node *) c->arg);
@@ -1742,6 +1752,17 @@ transformCaseExpr(ParseState *pstate, CaseExpr *c)
"CASE/WHEN");
}
+ /* if any subexpression contained a SRF, complain */
+ if (pstate->p_last_srf != last_srf)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
+ errmsg("set-returning functions are not allowed in %s",
+ "CASE"),
+ errhint("You might be able to move the set-returning function into a LATERAL FROM item."),
+ parser_errposition(pstate,
+ exprLocation(pstate->p_last_srf))));
+
newc->location = c->location;
return (Node *) newc;
@@ -2178,6 +2199,7 @@ static Node *
transformCoalesceExpr(ParseState *pstate, CoalesceExpr *c)
{
CoalesceExpr *newc = makeNode(CoalesceExpr);
+ Node *last_srf = pstate->p_last_srf;
List *newargs = NIL;
List *newcoercedargs = NIL;
ListCell *args;
@@ -2206,6 +2228,17 @@ transformCoalesceExpr(ParseState *pstate, CoalesceExpr *c)
newcoercedargs = lappend(newcoercedargs, newe);
}
+ /* if any subexpression contained a SRF, complain */
+ if (pstate->p_last_srf != last_srf)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
+ errmsg("set-returning functions are not allowed in %s",
+ "COALESCE"),
+ errhint("You might be able to move the set-returning function into a LATERAL FROM item."),
+ parser_errposition(pstate,
+ exprLocation(pstate->p_last_srf))));
+
newc->args = newcoercedargs;
newc->location = c->location;
return (Node *) newc;
@@ -2800,7 +2833,8 @@ make_row_comparison_op(ParseState *pstate, List *opname,
Node *rarg = (Node *) lfirst(r);
OpExpr *cmp;
- cmp = castNode(OpExpr, make_op(pstate, opname, larg, rarg, location));
+ cmp = castNode(OpExpr, make_op(pstate, opname, larg, rarg,
+ pstate->p_last_srf, location));
/*
* We don't use coerce_to_boolean here because we insist on the
@@ -3007,12 +3041,19 @@ make_distinct_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree,
{
Expr *result;
- result = make_op(pstate, opname, ltree, rtree, location);
+ result = make_op(pstate, opname, ltree, rtree,
+ pstate->p_last_srf, location);
if (((OpExpr *) result)->opresulttype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("IS DISTINCT FROM requires = operator to yield boolean"),
parser_errposition(pstate, location)));
+ if (((OpExpr *) result)->opretset)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ /* translator: %s is name of a SQL construct, eg NULLIF */
+ errmsg("%s must not return a set", "IS DISTINCT FROM"),
+ parser_errposition(pstate, location)));
/*
* We rely on DistinctExpr and OpExpr being same struct
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index 55853c20bb..34f1cf82ee 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -64,10 +64,14 @@ static Node *ParseComplexProjection(ParseState *pstate, char *funcname,
*
* The argument expressions (in fargs) must have been transformed
* already. However, nothing in *fn has been transformed.
+ *
+ * last_srf should be a copy of pstate->p_last_srf from just before we
+ * started transforming fargs. If the caller knows that fargs couldn't
+ * contain any SRF calls, last_srf can just be pstate->p_last_srf.
*/
Node *
ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
- FuncCall *fn, int location)
+ Node *last_srf, FuncCall *fn, int location)
{
bool is_column = (fn == NULL);
List *agg_order = (fn ? fn->agg_order : NIL);
@@ -628,7 +632,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/* if it returns a set, check that's OK */
if (retset)
- check_srf_call_placement(pstate, location);
+ check_srf_call_placement(pstate, last_srf, location);
/* build the appropriate output structure */
if (fdresult == FUNCDETAIL_NORMAL)
@@ -759,6 +763,17 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
errmsg("FILTER is not implemented for non-aggregate window functions"),
parser_errposition(pstate, location)));
+ /*
+ * Window functions can't either take or return sets
+ */
+ if (pstate->p_last_srf != last_srf)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("window function calls cannot contain set-returning function calls"),
+ errhint("You might be able to move the set-returning function into a LATERAL FROM item."),
+ parser_errposition(pstate,
+ exprLocation(pstate->p_last_srf))));
+
if (retset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
@@ -771,6 +786,10 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
retval = (Node *) wfunc;
}
+ /* if it returns a set, remember it for error checks at higher levels */
+ if (retset)
+ pstate->p_last_srf = retval;
+
return retval;
}
@@ -2083,9 +2102,13 @@ LookupAggWithArgs(ObjectWithArgs *agg, bool noError)
* and throw a nice error if not.
*
* A side-effect is to set pstate->p_hasTargetSRFs true if appropriate.
+ *
+ * last_srf should be a copy of pstate->p_last_srf from just before we
+ * started transforming the function's arguments. This allows detection
+ * of whether the SRF's arguments contain any SRFs.
*/
void
-check_srf_call_placement(ParseState *pstate, int location)
+check_srf_call_placement(ParseState *pstate, Node *last_srf, int location)
{
const char *err;
bool errkind;
@@ -2121,7 +2144,15 @@ check_srf_call_placement(ParseState *pstate, int location)
errkind = true;
break;
case EXPR_KIND_FROM_FUNCTION:
- /* okay ... but we can't check nesting here */
+ /* okay, but we don't allow nested SRFs here */
+ /* errmsg is chosen to match transformRangeFunction() */
+ /* errposition should point to the inner SRF */
+ if (pstate->p_last_srf != last_srf)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-returning functions must appear at top level of FROM"),
+ parser_errposition(pstate,
+ exprLocation(pstate->p_last_srf))));
break;
case EXPR_KIND_WHERE:
errkind = true;
@@ -2202,7 +2233,7 @@ check_srf_call_placement(ParseState *pstate, int location)
err = _("set-returning functions are not allowed in trigger WHEN conditions");
break;
case EXPR_KIND_PARTITION_EXPRESSION:
- err = _("set-returning functions are not allowed in partition key expression");
+ err = _("set-returning functions are not allowed in partition key expressions");
break;
/*
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index e40b10d4f6..4b1db76e19 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -735,12 +735,14 @@ op_error(ParseState *pstate, List *op, char oprkind,
* Transform operator expression ensuring type compatibility.
* This is where some type conversion happens.
*
- * As with coerce_type, pstate may be NULL if no special unknown-Param
- * processing is wanted.
+ * last_srf should be a copy of pstate->p_last_srf from just before we
+ * started transforming the operator's arguments; this is used for nested-SRF
+ * detection. If the caller will throw an error anyway for a set-returning
+ * expression, it's okay to cheat and just pass pstate->p_last_srf.
*/
Expr *
make_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree,
- int location)
+ Node *last_srf, int location)
{
Oid ltypeId,
rtypeId;
@@ -843,7 +845,11 @@ make_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree,
/* if it returns a set, check that's OK */
if (result->opretset)
- check_srf_call_placement(pstate, location);
+ {
+ check_srf_call_placement(pstate, last_srf, location);
+ /* ... and remember it for error checks at higher levels */
+ pstate->p_last_srf = (Node *) result;
+ }
ReleaseSysCache(tup);
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index c04e77775e..708188f300 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -486,6 +486,15 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
return result;
}
+/*
+ * generateSerialExtraStmts
+ * Generate CREATE SEQUENCE and ALTER SEQUENCE ... OWNED BY statements
+ * to create the sequence for a serial or identity column.
+ *
+ * This includes determining the name the sequence will have. The caller
+ * can ask to get back the name components by passing non-null pointers
+ * for snamespace_p and sname_p.
+ */
static void
generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
Oid seqtypid, List *seqoptions, bool for_identity,
@@ -514,7 +523,6 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
* problem, especially since few people would need two serial columns in
* one table.
*/
-
foreach(option, seqoptions)
{
DefElem *defel = lfirst_node(DefElem, option);
@@ -534,7 +542,17 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
snamespace = rv->schemaname;
+ if (!snamespace)
+ {
+ /* Given unqualified SEQUENCE NAME, select namespace */
+ if (cxt->rel)
+ snamespaceid = RelationGetNamespace(cxt->rel);
+ else
+ snamespaceid = RangeVarGetCreationNamespace(cxt->relation);
+ snamespace = get_namespace_name(snamespaceid);
+ }
sname = rv->relname;
+ /* Remove the SEQUENCE NAME item from seqoptions */
seqoptions = list_delete_ptr(seqoptions, nameEl);
}
else
@@ -574,7 +592,9 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
* not our synthetic one.
*/
if (seqtypid)
- seqstmt->options = lcons(makeDefElem("as", (Node *) makeTypeNameFromOid(seqtypid, -1), -1),
+ seqstmt->options = lcons(makeDefElem("as",
+ (Node *) makeTypeNameFromOid(seqtypid, -1),
+ -1),
seqstmt->options);
/*
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index c3454276bf..712d700481 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -1144,7 +1144,7 @@ WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle)
if (status == BGWH_STOPPED)
break;
- rc = WaitLatch(&MyProc->procLatch,
+ rc = WaitLatch(MyLatch,
WL_LATCH_SET | WL_POSTMASTER_DEATH, 0,
WAIT_EVENT_BGWORKER_SHUTDOWN);
@@ -1154,7 +1154,7 @@ WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle)
break;
}
- ResetLatch(&MyProc->procLatch);
+ ResetLatch(MyLatch);
}
return status;
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index f6f920e493..1e511f4c1e 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -3080,7 +3080,7 @@ reaper(SIGNAL_ARGS)
* Waken walsenders for the last time. No regular backends
* should be around anymore.
*/
- SignalChildren(SIGINT);
+ SignalChildren(SIGUSR2);
pmState = PM_SHUTDOWN_2;
@@ -3876,9 +3876,7 @@ PostmasterStateMachine(void)
/*
* If we get here, we are proceeding with normal shutdown. All
* the regular children are gone, and it's time to tell the
- * checkpointer to do a shutdown checkpoint. All WAL senders
- * are told to switch to a stopping state so that the shutdown
- * checkpoint can go ahead.
+ * checkpointer to do a shutdown checkpoint.
*/
Assert(Shutdown > NoShutdown);
/* Start the checkpointer if not running */
@@ -3887,7 +3885,6 @@ PostmasterStateMachine(void)
/* And tell it to shut down */
if (CheckpointerPID != 0)
{
- SignalSomeChildren(SIGUSR2, BACKEND_TYPE_WALSND);
signal_child(CheckpointerPID, SIGUSR2);
pmState = PM_SHUTDOWN;
}
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index ebe9c91e98..7509b4fe60 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -176,7 +176,7 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname,
? WL_SOCKET_READABLE
: WL_SOCKET_WRITEABLE);
- rc = WaitLatchOrSocket(&MyProc->procLatch,
+ rc = WaitLatchOrSocket(MyLatch,
WL_POSTMASTER_DEATH |
WL_LATCH_SET | io_flag,
PQsocket(conn->streamConn),
@@ -190,7 +190,7 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname,
/* Interrupted? */
if (rc & WL_LATCH_SET)
{
- ResetLatch(&MyProc->procLatch);
+ ResetLatch(MyLatch);
CHECK_FOR_INTERRUPTS();
}
@@ -574,21 +574,22 @@ libpqrcv_PQexec(PGconn *streamConn, const char *query)
* the signal arrives in the middle of establishment of
* replication connection.
*/
- ResetLatch(&MyProc->procLatch);
- rc = WaitLatchOrSocket(&MyProc->procLatch,
+ rc = WaitLatchOrSocket(MyLatch,
WL_POSTMASTER_DEATH | WL_SOCKET_READABLE |
WL_LATCH_SET,
PQsocket(streamConn),
0,
WAIT_EVENT_LIBPQWALRECEIVER);
+
+ /* Emergency bailout? */
if (rc & WL_POSTMASTER_DEATH)
exit(1);
- /* interrupted */
+ /* Interrupted? */
if (rc & WL_LATCH_SET)
{
+ ResetLatch(MyLatch);
CHECK_FOR_INTERRUPTS();
- continue;
}
if (PQconsumeInput(streamConn) == 0)
return NULL; /* trouble */
@@ -681,12 +682,25 @@ libpqrcv_receive(WalReceiverConn *conn, char **buffer,
{
PQclear(res);
- /* Verify that there are no more results */
+ /* Verify that there are no more results. */
res = PQgetResult(conn->streamConn);
if (res != NULL)
+ {
+ PQclear(res);
+
+ /*
+ * If the other side closed the connection orderly (otherwise
+ * we'd seen an error, or PGRES_COPY_IN) don't report an error
+ * here, but let callers deal with it.
+ */
+ if (PQstatus(conn->streamConn) == CONNECTION_BAD)
+ return -1;
+
ereport(ERROR,
(errmsg("unexpected result after CommandComplete: %s",
PQerrorMessage(conn->streamConn))));
+ }
+
return -1;
}
else if (PQresultStatus(res) == PGRES_COPY_IN)
@@ -806,11 +820,10 @@ libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres,
/* Make sure we got expected number of fields. */
if (nfields != nRetTypes)
ereport(ERROR,
- (errmsg("invalid query responser"),
+ (errmsg("invalid query response"),
errdetail("Expected %d fields, got %d fields.",
nRetTypes, nfields)));
-
walres->tuplestore = tuplestore_begin_heap(true, false, work_mem);
/* Create tuple descriptor corresponding to expected result. */
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index b956052014..15dac00ffa 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -80,8 +80,7 @@ static void logicalrep_worker_detach(void);
static void logicalrep_worker_cleanup(LogicalRepWorker *worker);
/* Flags set by signal handlers */
-volatile sig_atomic_t got_SIGHUP = false;
-volatile sig_atomic_t got_SIGTERM = false;
+static volatile sig_atomic_t got_SIGHUP = false;
static bool on_commit_launcher_wakeup = false;
@@ -208,10 +207,15 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
1000L, WAIT_EVENT_BGWORKER_STARTUP);
+ /* emergency bailout if postmaster has died */
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
- ResetLatch(MyLatch);
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
}
return;
@@ -440,10 +444,8 @@ logicalrep_worker_stop(Oid subid, Oid relid)
LWLockRelease(LogicalRepWorkerLock);
- CHECK_FOR_INTERRUPTS();
-
/* Wait for signal. */
- rc = WaitLatch(&MyProc->procLatch,
+ rc = WaitLatch(MyLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
1000L, WAIT_EVENT_BGWORKER_STARTUP);
@@ -451,7 +453,11 @@ logicalrep_worker_stop(Oid subid, Oid relid)
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
- ResetLatch(&MyProc->procLatch);
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
/* Check worker status. */
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
@@ -492,7 +498,7 @@ logicalrep_worker_stop(Oid subid, Oid relid)
CHECK_FOR_INTERRUPTS();
/* Wait for more work. */
- rc = WaitLatch(&MyProc->procLatch,
+ rc = WaitLatch(MyLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
1000L, WAIT_EVENT_BGWORKER_SHUTDOWN);
@@ -500,7 +506,11 @@ logicalrep_worker_stop(Oid subid, Oid relid)
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
- ResetLatch(&MyProc->procLatch);
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
}
}
@@ -614,26 +624,18 @@ logicalrep_launcher_onexit(int code, Datum arg)
static void
logicalrep_worker_onexit(int code, Datum arg)
{
- logicalrep_worker_detach();
-}
-
-/* SIGTERM: set flag to exit at next convenient time */
-void
-logicalrep_worker_sigterm(SIGNAL_ARGS)
-{
- int save_errno = errno;
+ /* Disconnect gracefully from the remote side. */
+ if (wrconn)
+ walrcv_disconnect(wrconn);
- got_SIGTERM = true;
-
- /* Waken anything waiting on the process latch */
- SetLatch(MyLatch);
+ logicalrep_worker_detach();
- errno = save_errno;
+ ApplyLauncherWakeup();
}
/* SIGHUP: set flag to reload configuration at next convenient time */
-void
-logicalrep_worker_sighup(SIGNAL_ARGS)
+static void
+logicalrep_launcher_sighup(SIGNAL_ARGS)
{
int save_errno = errno;
@@ -792,17 +794,14 @@ ApplyLauncherMain(Datum main_arg)
before_shmem_exit(logicalrep_launcher_onexit, (Datum) 0);
+ Assert(LogicalRepCtx->launcher_pid == 0);
+ LogicalRepCtx->launcher_pid = MyProcPid;
+
/* Establish signal handlers. */
- pqsignal(SIGHUP, logicalrep_worker_sighup);
- pqsignal(SIGTERM, logicalrep_worker_sigterm);
+ pqsignal(SIGHUP, logicalrep_launcher_sighup);
+ pqsignal(SIGTERM, die);
BackgroundWorkerUnblockSignals();
- /* Make it easy to identify our processes. */
- SetConfigOption("application_name", MyBgworkerEntry->bgw_name,
- PGC_USERSET, PGC_S_SESSION);
-
- LogicalRepCtx->launcher_pid = MyProcPid;
-
/*
* Establish connection to nailed catalogs (we only ever access
* pg_subscription).
@@ -810,7 +809,7 @@ ApplyLauncherMain(Datum main_arg)
BackgroundWorkerInitializeConnection(NULL, NULL);
/* Enter main loop */
- while (!got_SIGTERM)
+ for (;;)
{
int rc;
List *sublist;
@@ -820,6 +819,8 @@ ApplyLauncherMain(Datum main_arg)
TimestampTz now;
long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
+ CHECK_FOR_INTERRUPTS();
+
now = GetCurrentTimestamp();
/* Limit the start retry to once a wal_retrieve_retry_interval */
@@ -874,7 +875,7 @@ ApplyLauncherMain(Datum main_arg)
}
/* Wait for more work. */
- rc = WaitLatch(&MyProc->procLatch,
+ rc = WaitLatch(MyLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
wait_time,
WAIT_EVENT_LOGICAL_LAUNCHER_MAIN);
@@ -883,22 +884,29 @@ ApplyLauncherMain(Datum main_arg)
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
+
if (got_SIGHUP)
{
got_SIGHUP = false;
ProcessConfigFile(PGC_SIGHUP);
}
-
- ResetLatch(&MyProc->procLatch);
}
- LogicalRepCtx->launcher_pid = 0;
-
- /* ... and if it returns, we're done */
- ereport(DEBUG1,
- (errmsg("logical replication launcher shutting down")));
+ /* Not reachable */
+}
- proc_exit(0);
+/*
+ * Is current process the logical replication launcher?
+ */
+bool
+IsLogicalLauncher(void)
+{
+ return LogicalRepCtx->launcher_pid == MyProcPid;
}
/*
diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c
index e65f2865dd..2bd1d9f792 100644
--- a/src/backend/replication/logical/relation.c
+++ b/src/backend/replication/logical/relation.c
@@ -283,7 +283,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
continue;
attnum = logicalrep_rel_att_by_name(remoterel,
- NameStr(desc->attrs[i]->attname));
+ NameStr(desc->attrs[i]->attname));
entry->attrmap[i] = attnum;
if (attnum >= 0)
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 8848f5b4ec..e06aa0992a 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -262,7 +262,7 @@ static bool ExportInProgress = false;
static void SnapBuildPurgeCommittedTxn(SnapBuild *builder);
/* snapshot building/manipulation/distribution functions */
-static Snapshot SnapBuildBuildSnapshot(SnapBuild *builder, TransactionId xid);
+static Snapshot SnapBuildBuildSnapshot(SnapBuild *builder);
static void SnapBuildFreeSnapshot(Snapshot snap);
@@ -463,7 +463,7 @@ SnapBuildSnapDecRefcount(Snapshot snap)
* and ->subxip/subxcnt values.
*/
static Snapshot
-SnapBuildBuildSnapshot(SnapBuild *builder, TransactionId xid)
+SnapBuildBuildSnapshot(SnapBuild *builder)
{
Snapshot snapshot;
Size ssize;
@@ -562,7 +562,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder)
if (TransactionIdIsValid(MyPgXact->xmin))
elog(ERROR, "cannot build an initial slot snapshot when MyPgXact->xmin already is valid");
- snap = SnapBuildBuildSnapshot(builder, GetTopTransactionId());
+ snap = SnapBuildBuildSnapshot(builder);
/*
* We know that snap->xmin is alive, enforced by the logical xmin
@@ -679,7 +679,7 @@ SnapBuildGetOrBuildSnapshot(SnapBuild *builder, TransactionId xid)
/* only build a new snapshot if we don't have a prebuilt one */
if (builder->snapshot == NULL)
{
- builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
+ builder->snapshot = SnapBuildBuildSnapshot(builder);
/* increase refcount for the snapshot builder */
SnapBuildSnapIncRefcount(builder->snapshot);
}
@@ -743,7 +743,7 @@ SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn)
/* only build a new snapshot if we don't have a prebuilt one */
if (builder->snapshot == NULL)
{
- builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
+ builder->snapshot = SnapBuildBuildSnapshot(builder);
/* increase refcount for the snapshot builder */
SnapBuildSnapIncRefcount(builder->snapshot);
}
@@ -1061,7 +1061,7 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
if (builder->snapshot)
SnapBuildSnapDecRefcount(builder->snapshot);
- builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
+ builder->snapshot = SnapBuildBuildSnapshot(builder);
/* we might need to execute invalidations, add snapshot */
if (!ReorderBufferXidHasBaseSnapshot(builder->reorder, xid))
@@ -1831,7 +1831,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
{
SnapBuildSnapDecRefcount(builder->snapshot);
}
- builder->snapshot = SnapBuildBuildSnapshot(builder, InvalidTransactionId);
+ builder->snapshot = SnapBuildBuildSnapshot(builder);
SnapBuildSnapIncRefcount(builder->snapshot);
ReorderBufferSetRestartPoint(builder->reorder, lsn);
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index fe45fb8820..3ff08bfb2b 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -12,70 +12,72 @@
* logical replication.
*
* The initial data synchronization is done separately for each table,
- * in separate apply worker that only fetches the initial snapshot data
- * from the publisher and then synchronizes the position in stream with
+ * in a separate apply worker that only fetches the initial snapshot data
+ * from the publisher and then synchronizes the position in the stream with
* the main apply worker.
*
- * The are several reasons for doing the synchronization this way:
+ * There are several reasons for doing the synchronization this way:
* - It allows us to parallelize the initial data synchronization
* which lowers the time needed for it to happen.
* - The initial synchronization does not have to hold the xid and LSN
* for the time it takes to copy data of all tables, causing less
* bloat and lower disk consumption compared to doing the
- * synchronization in single process for whole database.
- * - It allows us to synchronize the tables added after the initial
+ * synchronization in a single process for the whole database.
+ * - It allows us to synchronize any tables added after the initial
* synchronization has finished.
*
* The stream position synchronization works in multiple steps.
- * - Sync finishes copy and sets table state as SYNCWAIT and waits
- * for state to change in a loop.
+ * - Sync finishes copy and sets worker state as SYNCWAIT and waits for
+ * state to change in a loop.
* - Apply periodically checks tables that are synchronizing for SYNCWAIT.
- * When the desired state appears it will compare its position in the
- * stream with the SYNCWAIT position and based on that changes the
- * state to based on following rules:
- * - if the apply is in front of the sync in the WAL stream the new
- * state is set to CATCHUP and apply loops until the sync process
- * catches up to the same LSN as apply
- * - if the sync is in front of the apply in the WAL stream the new
- * state is set to SYNCDONE
- * - if both apply and sync are at the same position in the WAL stream
- * the state of the table is set to READY
- * - If the state was set to CATCHUP sync will read the stream and
- * apply changes until it catches up to the specified stream
- * position and then sets state to READY and signals apply that it
- * can stop waiting and exits, if the state was set to something
- * else than CATCHUP the sync process will simply end.
- * - If the state was set to SYNCDONE by apply, the apply will
- * continue tracking the table until it reaches the SYNCDONE stream
- * position at which point it sets state to READY and stops tracking.
+ * When the desired state appears, it will set the worker state to
+ * CATCHUP and starts loop-waiting until either the table state is set
+ * to SYNCDONE or the sync worker exits.
+ * - After the sync worker has seen the state change to CATCHUP, it will
+ * read the stream and apply changes (acting like an apply worker) until
+ * it catches up to the specified stream position. Then it sets the
+ * state to SYNCDONE. There might be zero changes applied between
+ * CATCHUP and SYNCDONE, because the sync worker might be ahead of the
+ * apply worker.
+ * - Once the state was set to SYNCDONE, the apply will continue tracking
+ * the table until it reaches the SYNCDONE stream position, at which
+ * point it sets state to READY and stops tracking. Again, there might
+ * be zero changes in between.
+ *
+ * So the state progression is always: INIT -> DATASYNC -> SYNCWAIT -> CATCHUP ->
+ * SYNCDONE -> READY.
*
* The catalog pg_subscription_rel is used to keep information about
- * subscribed tables and their state and some transient state during
- * data synchronization is kept in shared memory.
+ * subscribed tables and their state. Some transient state during data
+ * synchronization is kept in shared memory. The states SYNCWAIT and
+ * CATCHUP only appear in memory.
*
* Example flows look like this:
* - Apply is in front:
* sync:8
- * -> set SYNCWAIT
+ * -> set in memory SYNCWAIT
* apply:10
- * -> set CATCHUP
+ * -> set in memory CATCHUP
* -> enter wait-loop
* sync:10
- * -> set READY
+ * -> set in catalog SYNCDONE
* -> exit
* apply:10
* -> exit wait-loop
* -> continue rep
+ * apply:11
+ * -> set in catalog READY
* - Sync in front:
* sync:10
- * -> set SYNCWAIT
+ * -> set in memory SYNCWAIT
* apply:8
- * -> set SYNCDONE
+ * -> set in memory CATCHUP
* -> continue per-table filtering
* sync:10
+ * -> set in catalog SYNCDONE
* -> exit
* apply:10
- * -> set READY
+ * -> set in catalog READY
* -> stop per-table filtering
* -> continue rep
*-------------------------------------------------------------------------
@@ -100,6 +102,7 @@
#include "replication/walreceiver.h"
#include "replication/worker_internal.h"
+#include "utils/snapmgr.h"
#include "storage/ipc.h"
#include "utils/builtins.h"
@@ -130,61 +133,119 @@ finish_sync_worker(void)
/* And flush all writes. */
XLogFlush(GetXLogWriteRecPtr());
- /* Find the main apply worker and signal it. */
- logicalrep_worker_wakeup(MyLogicalRepWorker->subid, InvalidOid);
-
StartTransactionCommand();
ereport(LOG,
(errmsg("logical replication table synchronization worker for subscription \"%s\", table \"%s\" has finished",
- MySubscription->name, get_rel_name(MyLogicalRepWorker->relid))));
+ MySubscription->name,
+ get_rel_name(MyLogicalRepWorker->relid))));
CommitTransactionCommand();
+ /* Find the main apply worker and signal it. */
+ logicalrep_worker_wakeup(MyLogicalRepWorker->subid, InvalidOid);
+
/* Stop gracefully */
- walrcv_disconnect(wrconn);
proc_exit(0);
}
/*
- * Wait until the table synchronization change.
+ * Wait until the relation synchronization state is set in the catalog to the
+ * expected one.
+ *
+ * Used when transitioning from CATCHUP state to SYNCDONE.
*
- * Returns false if the relation subscription state disappeared.
+ * Returns false if the synchronization worker has disappeared or the table state
+ * has been reset.
*/
static bool
-wait_for_sync_status_change(Oid relid, char origstate)
+wait_for_relation_state_change(Oid relid, char expected_state)
{
int rc;
- char state = origstate;
+ char state;
- while (!got_SIGTERM)
+ for (;;)
{
LogicalRepWorker *worker;
+ XLogRecPtr statelsn;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* XXX use cache invalidation here to improve performance? */
+ PushActiveSnapshot(GetLatestSnapshot());
+ state = GetSubscriptionRelState(MyLogicalRepWorker->subid,
+ relid, &statelsn, true);
+ PopActiveSnapshot();
+ if (state == SUBREL_STATE_UNKNOWN)
+ return false;
+
+ if (state == expected_state)
+ return true;
+
+ /* Check if the sync worker is still running and bail if not. */
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+
+ /* Check if the opposite worker is still running and bail if not. */
worker = logicalrep_worker_find(MyLogicalRepWorker->subid,
- relid, false);
+ am_tablesync_worker() ? InvalidOid : relid,
+ false);
+ LWLockRelease(LogicalRepWorkerLock);
if (!worker)
- {
- LWLockRelease(LogicalRepWorkerLock);
return false;
- }
- state = worker->relstate;
- LWLockRelease(LogicalRepWorkerLock);
- if (state == SUBREL_STATE_UNKNOWN)
+ rc = WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+ 1000L, WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE);
+
+ /* emergency bailout if postmaster has died */
+ if (rc & WL_POSTMASTER_DEATH)
+ proc_exit(1);
+
+ ResetLatch(MyLatch);
+ }
+
+ return false;
+}
+
+/*
+ * Wait until the apply worker changes the state of our synchronization
+ * worker to the expected one.
+ *
+ * Used when transitioning from SYNCWAIT state to CATCHUP.
+ *
+ * Returns false if the apply worker has disappeared or the table state has been
+ * reset.
+ */
+static bool
+wait_for_worker_state_change(char expected_state)
+{
+ int rc;
+
+ for (;;)
+ {
+ LogicalRepWorker *worker;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Bail if the apply has died. */
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+ worker = logicalrep_worker_find(MyLogicalRepWorker->subid,
+ InvalidOid, false);
+ LWLockRelease(LogicalRepWorkerLock);
+ if (!worker)
return false;
- if (state != origstate)
+ if (MyLogicalRepWorker->relstate == expected_state)
return true;
- rc = WaitLatch(&MyProc->procLatch,
+ rc = WaitLatch(MyLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
- 10000L, WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE);
+ 1000L, WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE);
/* emergency bailout if postmaster has died */
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
- ResetLatch(&MyProc->procLatch);
+ ResetLatch(MyLatch);
}
return false;
@@ -203,10 +264,9 @@ invalidate_syncing_table_states(Datum arg, int cacheid, uint32 hashvalue)
* Handle table synchronization cooperation from the synchronization
* worker.
*
- * If the sync worker is in catch up mode and reached the predetermined
- * synchronization point in the WAL stream, mark the table as READY and
- * finish. If it caught up too far, set to SYNCDONE and finish. Things will
- * then proceed in the "sync in front" scenario.
+ * If the sync worker is in CATCHUP state and reached (or passed) the
+ * predetermined synchronization point in the WAL stream, mark the table as
+ * SYNCDONE and finish.
*/
static void
process_syncing_tables_for_sync(XLogRecPtr current_lsn)
@@ -220,10 +280,7 @@ process_syncing_tables_for_sync(XLogRecPtr current_lsn)
{
TimeLineID tli;
- MyLogicalRepWorker->relstate =
- (current_lsn == MyLogicalRepWorker->relstate_lsn)
- ? SUBREL_STATE_READY
- : SUBREL_STATE_SYNCDONE;
+ MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCDONE;
MyLogicalRepWorker->relstate_lsn = current_lsn;
SpinLockRelease(&MyLogicalRepWorker->relmutex);
@@ -231,7 +288,8 @@ process_syncing_tables_for_sync(XLogRecPtr current_lsn)
SetSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
MyLogicalRepWorker->relstate,
- MyLogicalRepWorker->relstate_lsn);
+ MyLogicalRepWorker->relstate_lsn,
+ true);
walrcv_endstreaming(wrconn, &tli);
finish_sync_worker();
@@ -255,17 +313,11 @@ process_syncing_tables_for_sync(XLogRecPtr current_lsn)
* at least wal_retrieve_retry_interval.
*
* For tables that are being synchronized already, check if sync workers
- * either need action from the apply worker or have finished.
+ * either need action from the apply worker or have finished. This is the
+ * SYNCWAIT to CATCHUP transition.
*
- * The usual scenario is that the apply got ahead of the sync while the sync
- * ran, and then the action needed by apply is to mark a table for CATCHUP and
- * wait for the catchup to happen. In the less common case that sync worker
- * got in front of the apply worker, the table is marked as SYNCDONE but not
- * ready yet, as it needs to be tracked until apply reaches the same position
- * to which it was synced.
- *
- * If the synchronization position is reached, then the table can be marked as
- * READY and is no longer tracked.
+ * If the synchronization position is reached (SYNCDONE), then the table can
+ * be marked as READY and is no longer tracked.
*/
static void
process_syncing_tables_for_apply(XLogRecPtr current_lsn)
@@ -282,7 +334,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
Assert(!IsTransactionState());
- /* We need up to date sync state info for subscription tables here. */
+ /* We need up-to-date sync state info for subscription tables here. */
if (!table_states_valid)
{
MemoryContext oldctx;
@@ -314,7 +366,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
}
/*
- * Prepare hash table for tracking last start times of workers, to avoid
+ * Prepare a hash table for tracking last start times of workers, to avoid
* immediate restarts. We don't need it if there are no tables that need
* syncing.
*/
@@ -339,7 +391,9 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
last_start_times = NULL;
}
- /* Process all tables that are being synchronized. */
+ /*
+ * Process all tables that are being synchronized.
+ */
foreach(lc, table_states)
{
SubscriptionRelState *rstate = (SubscriptionRelState *) lfirst(lc);
@@ -348,8 +402,8 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
{
/*
* Apply has caught up to the position where the table sync has
- * finished. Time to mark the table as ready so that apply will
- * just continue to replicate it normally.
+ * finished. Mark the table as ready so that the apply will just
+ * continue to replicate it normally.
*/
if (current_lsn >= rstate->lsn)
{
@@ -362,7 +416,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
}
SetSubscriptionRelState(MyLogicalRepWorker->subid,
rstate->relid, rstate->state,
- rstate->lsn);
+ rstate->lsn, true);
}
}
else
@@ -383,9 +437,9 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
else
/*
- * If no sync worker for this table yet, count running sync
- * workers for this subscription, while we have the lock, for
- * later.
+ * If there is no sync worker for this table yet, count
+ * running sync workers for this subscription, while we have
+ * the lock, for later.
*/
nsyncworkers = logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
LWLockRelease(LogicalRepWorkerLock);
@@ -397,50 +451,34 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
if (syncworker && rstate->state == SUBREL_STATE_SYNCWAIT)
{
/*
- * There are three possible synchronization situations here.
- *
- * a) Apply is in front of the table sync: We tell the table
- * sync to CATCHUP.
- *
- * b) Apply is behind the table sync: We tell the table sync
- * to mark the table as SYNCDONE and finish.
- *
- * c) Apply and table sync are at the same position: We tell
- * table sync to mark the table as READY and finish.
- *
- * In any case we'll need to wait for table sync to change the
- * state in catalog and only then continue ourselves.
+ * Tell sync worker it can catchup now. We'll wait for it so
+ * it does not get lost.
*/
- if (current_lsn > rstate->lsn)
- {
- rstate->state = SUBREL_STATE_CATCHUP;
- rstate->lsn = current_lsn;
- }
- else if (current_lsn == rstate->lsn)
- {
- rstate->state = SUBREL_STATE_READY;
- rstate->lsn = current_lsn;
- }
- else
- rstate->state = SUBREL_STATE_SYNCDONE;
-
SpinLockAcquire(&syncworker->relmutex);
- syncworker->relstate = rstate->state;
- syncworker->relstate_lsn = rstate->lsn;
+ syncworker->relstate = SUBREL_STATE_CATCHUP;
+ syncworker->relstate_lsn =
+ Max(syncworker->relstate_lsn, current_lsn);
SpinLockRelease(&syncworker->relmutex);
/* Signal the sync worker, as it may be waiting for us. */
logicalrep_worker_wakeup_ptr(syncworker);
/*
- * Enter busy loop and wait for synchronization status change.
+ * Enter busy loop and wait for synchronization worker to
+ * reach expected state (or die trying).
*/
- wait_for_sync_status_change(rstate->relid, rstate->state);
+ if (!started_tx)
+ {
+ StartTransactionCommand();
+ started_tx = true;
+ }
+ wait_for_relation_state_change(rstate->relid,
+ SUBREL_STATE_SYNCDONE);
}
/*
* If there is no sync worker registered for the table and there
- * is some free sync worker slot, start new sync worker for the
+ * is some free sync worker slot, start a new sync worker for the
* table.
*/
else if (!syncworker && nsyncworkers < max_sync_workers_per_subscription)
@@ -514,7 +552,7 @@ copy_read_data(void *outbuf, int minread, int maxread)
int bytesread = 0;
int avail;
- /* If there are some leftover data from previous read, use them. */
+ /* If there are some leftover data from previous read, use it. */
avail = copybuf->len - copybuf->cursor;
if (avail)
{
@@ -526,7 +564,7 @@ copy_read_data(void *outbuf, int minread, int maxread)
bytesread += avail;
}
- while (!got_SIGTERM && maxread > 0 && bytesread < minread)
+ while (maxread > 0 && bytesread < minread)
{
pgsocket fd = PGINVALID_SOCKET;
int rc;
@@ -568,7 +606,7 @@ copy_read_data(void *outbuf, int minread, int maxread)
/*
* Wait for more data or latch.
*/
- rc = WaitLatchOrSocket(&MyProc->procLatch,
+ rc = WaitLatchOrSocket(MyLatch,
WL_SOCKET_READABLE | WL_LATCH_SET |
WL_TIMEOUT | WL_POSTMASTER_DEATH,
fd, 1000L, WAIT_EVENT_LOGICAL_SYNC_DATA);
@@ -577,13 +615,9 @@ copy_read_data(void *outbuf, int minread, int maxread)
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
- ResetLatch(&MyProc->procLatch);
+ ResetLatch(MyLatch);
}
- /* Check for exit condition. */
- if (got_SIGTERM)
- proc_exit(0);
-
return bytesread;
}
@@ -661,7 +695,7 @@ fetch_remote_table_info(char *nspname, char *relname,
(errmsg("could not fetch table info for table \"%s.%s\": %s",
nspname, relname, res->err)));
- /* We don't know number of rows coming, so allocate enough space. */
+ /* We don't know the number of rows coming, so allocate enough space. */
lrel->attnames = palloc0(MaxTupleAttributeNumber * sizeof(char *));
lrel->atttyps = palloc0(MaxTupleAttributeNumber * sizeof(Oid));
lrel->attkeys = NULL;
@@ -763,7 +797,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
StartTransactionCommand();
relstate = GetSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
- &relstate_lsn, false);
+ &relstate_lsn, true);
CommitTransactionCommand();
SpinLockAcquire(&MyLogicalRepWorker->relmutex);
@@ -785,6 +819,11 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
MySubscription->oid,
MyLogicalRepWorker->relid);
+ /*
+ * Here we use the slot name instead of the subscription name as the
+ * application_name, so that it is different from the main apply worker,
+ * so that synchronous replication can distinguish them.
+ */
wrconn = walrcv_connect(MySubscription->conninfo, true, slotname, &err);
if (wrconn == NULL)
ereport(ERROR,
@@ -808,28 +847,29 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
SetSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
MyLogicalRepWorker->relstate,
- MyLogicalRepWorker->relstate_lsn);
+ MyLogicalRepWorker->relstate_lsn,
+ true);
CommitTransactionCommand();
pgstat_report_stat(false);
/*
- * We want to do the table data sync in single transaction.
+ * We want to do the table data sync in a single transaction.
*/
StartTransactionCommand();
/*
- * Use standard write lock here. It might be better to
- * disallow access to table while it's being synchronized. But
- * we don't want to block the main apply process from working
- * and it has to open relation in RowExclusiveLock when
- * remapping remote relation id to local one.
+ * Use a standard write lock here. It might be better to
+ * disallow access to the table while it's being synchronized.
+ * But we don't want to block the main apply process from
+ * working and it has to open the relation in RowExclusiveLock
+ * when remapping remote relation id to local one.
*/
rel = heap_open(MyLogicalRepWorker->relid, RowExclusiveLock);
/*
- * Create temporary slot for the sync process. We do this
- * inside transaction so that we can use the snapshot made by
- * the slot to get existing data.
+ * Create a temporary slot for the sync process. We do this
+ * inside the transaction so that we can use the snapshot made
+ * by the slot to get existing data.
*/
res = walrcv_exec(wrconn,
"BEGIN READ ONLY ISOLATION LEVEL "
@@ -844,7 +884,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
* Create new temporary logical decoding slot.
*
* We'll use slot for data copy so make sure the snapshot is
- * used for the transaction, that way the COPY will get data
+ * used for the transaction; that way the COPY will get data
* that is consistent with the lsn used by the slot to start
* decoding.
*/
@@ -874,26 +914,43 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
MyLogicalRepWorker->relstate_lsn = *origin_startpos;
SpinLockRelease(&MyLogicalRepWorker->relmutex);
- /*
- * Wait for main apply worker to either tell us to catchup or
- * that we are done.
+ /* Wait for main apply worker to tell us to catchup. */
+ wait_for_worker_state_change(SUBREL_STATE_CATCHUP);
+
+ /*----------
+ * There are now two possible states here:
+ * a) Sync is behind the apply. If that's the case we need to
+ * catch up with it by consuming the logical replication
+ * stream up to the relstate_lsn. For that, we exit this
+ * function and continue in ApplyWorkerMain().
+ * b) Sync is caught up with the apply. So it can just set
+ * the state to SYNCDONE and finish.
+ *----------
*/
- wait_for_sync_status_change(MyLogicalRepWorker->relid,
- MyLogicalRepWorker->relstate);
- if (MyLogicalRepWorker->relstate != SUBREL_STATE_CATCHUP)
+ if (*origin_startpos >= MyLogicalRepWorker->relstate_lsn)
{
- /* Update the new state. */
+ /*
+ * Update the new state in catalog. No need to bother
+ * with the shmem state as we are exiting for good.
+ */
SetSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
- MyLogicalRepWorker->relstate,
- MyLogicalRepWorker->relstate_lsn);
+ SUBREL_STATE_SYNCDONE,
+ *origin_startpos,
+ true);
finish_sync_worker();
}
break;
}
case SUBREL_STATE_SYNCDONE:
case SUBREL_STATE_READY:
- /* Nothing to do here but finish. */
+ case SUBREL_STATE_UNKNOWN:
+
+ /*
+ * Nothing to do here but finish. (UNKNOWN means the relation was
+ * removed from pg_subscription_rel before the sync worker could
+ * start.)
+ */
finish_sync_worker();
break;
default:
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index c67720bd2f..97d2dff0dd 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -72,6 +72,8 @@
#include "storage/proc.h"
#include "storage/procarray.h"
+#include "tcop/tcopprot.h"
+
#include "utils/builtins.h"
#include "utils/catcache.h"
#include "utils/datum.h"
@@ -116,7 +118,10 @@ static void send_feedback(XLogRecPtr recvpos, bool force, bool requestReply);
static void store_flush_position(XLogRecPtr remote_lsn);
-static void reread_subscription(void);
+static void maybe_reread_subscription(void);
+
+/* Flags set by signal handlers */
+static volatile sig_atomic_t got_SIGHUP = false;
/*
* Should this worker apply changes for given relation.
@@ -160,8 +165,7 @@ ensure_transaction(void)
StartTransactionCommand();
- if (!MySubscriptionValid)
- reread_subscription();
+ maybe_reread_subscription();
MemoryContextSwitchTo(ApplyMessageContext);
return true;
@@ -458,6 +462,12 @@ apply_handle_commit(StringInfo s)
store_flush_position(commit_data.end_lsn);
}
+ else
+ {
+ /* Process any invalidation messages that might have accumulated. */
+ AcceptInvalidationMessages();
+ maybe_reread_subscription();
+ }
in_remote_transaction = false;
@@ -1005,7 +1015,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
/* mark as idle, before starting to loop */
pgstat_report_activity(STATE_IDLE, NULL);
- while (!got_SIGTERM)
+ for (;;)
{
pgsocket fd = PGINVALID_SOCKET;
int rc;
@@ -1015,6 +1025,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
TimestampTz last_recv_timestamp = GetCurrentTimestamp();
bool ping_sent = false;
+ CHECK_FOR_INTERRUPTS();
+
MemoryContextSwitchTo(ApplyMessageContext);
len = walrcv_receive(wrconn, &buf, &fd);
@@ -1112,8 +1124,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
* now.
*/
AcceptInvalidationMessages();
- if (!MySubscriptionValid)
- reread_subscription();
+ maybe_reread_subscription();
/* Process any table synchronization changes. */
process_syncing_tables(last_received);
@@ -1135,7 +1146,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
/*
* Wait for more data or latch.
*/
- rc = WaitLatchOrSocket(&MyProc->procLatch,
+ rc = WaitLatchOrSocket(MyLatch,
WL_SOCKET_READABLE | WL_LATCH_SET |
WL_TIMEOUT | WL_POSTMASTER_DEATH,
fd, NAPTIME_PER_CYCLE,
@@ -1145,6 +1156,12 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
+
if (got_SIGHUP)
{
got_SIGHUP = false;
@@ -1198,8 +1215,6 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
send_feedback(last_received, requestReply, requestReply);
}
-
- ResetLatch(&MyProc->procLatch);
}
}
@@ -1295,17 +1310,20 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
last_flushpos = flushpos;
}
-
/*
- * Reread subscription info and exit on change.
+ * Reread subscription info if needed. Most changes will be exit.
*/
static void
-reread_subscription(void)
+maybe_reread_subscription(void)
{
MemoryContext oldctx;
Subscription *newsub;
bool started_tx = false;
+ /* When cache state is valid there is nothing to do here. */
+ if (MySubscriptionValid)
+ return;
+
/* This function might be called inside or outside of transaction. */
if (!IsTransactionState())
{
@@ -1325,11 +1343,10 @@ reread_subscription(void)
if (!newsub)
{
ereport(LOG,
- (errmsg("logical replication apply worker for subscription \"%s\" will "
- "stop because the subscription was removed",
- MySubscription->name)));
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "stop because the subscription was removed",
+ MySubscription->name)));
- walrcv_disconnect(wrconn);
proc_exit(0);
}
@@ -1340,11 +1357,10 @@ reread_subscription(void)
if (!newsub->enabled)
{
ereport(LOG,
- (errmsg("logical replication apply worker for subscription \"%s\" will "
- "stop because the subscription was disabled",
- MySubscription->name)));
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "stop because the subscription was disabled",
+ MySubscription->name)));
- walrcv_disconnect(wrconn);
proc_exit(0);
}
@@ -1355,11 +1371,10 @@ reread_subscription(void)
if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0)
{
ereport(LOG,
- (errmsg("logical replication apply worker for subscription \"%s\" will "
- "restart because the connection information was changed",
- MySubscription->name)));
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "restart because the connection information was changed",
+ MySubscription->name)));
- walrcv_disconnect(wrconn);
proc_exit(0);
}
@@ -1370,11 +1385,10 @@ reread_subscription(void)
if (strcmp(newsub->name, MySubscription->name) != 0)
{
ereport(LOG,
- (errmsg("logical replication apply worker for subscription \"%s\" will "
- "restart because subscription was renamed",
- MySubscription->name)));
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "restart because subscription was renamed",
+ MySubscription->name)));
- walrcv_disconnect(wrconn);
proc_exit(0);
}
@@ -1388,11 +1402,10 @@ reread_subscription(void)
if (strcmp(newsub->slotname, MySubscription->slotname) != 0)
{
ereport(LOG,
- (errmsg("logical replication apply worker for subscription \"%s\" will "
- "restart because the replication slot name was changed",
- MySubscription->name)));
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "restart because the replication slot name was changed",
+ MySubscription->name)));
- walrcv_disconnect(wrconn);
proc_exit(0);
}
@@ -1403,11 +1416,10 @@ reread_subscription(void)
if (!equal(newsub->publications, MySubscription->publications))
{
ereport(LOG,
- (errmsg("logical replication apply worker for subscription \"%s\" will "
- "restart because subscription's publications were changed",
- MySubscription->name)));
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "restart because subscription's publications were changed",
+ MySubscription->name)));
- walrcv_disconnect(wrconn);
proc_exit(0);
}
@@ -1443,6 +1455,19 @@ subscription_change_cb(Datum arg, int cacheid, uint32 hashvalue)
MySubscriptionValid = false;
}
+/* SIGHUP: set flag to reload configuration at next convenient time */
+static void
+logicalrep_worker_sighup(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ got_SIGHUP = true;
+
+ /* Waken anything waiting on the process latch */
+ SetLatch(MyLatch);
+
+ errno = save_errno;
+}
/* Logical Replication Apply worker entry point */
void
@@ -1460,17 +1485,13 @@ ApplyWorkerMain(Datum main_arg)
/* Setup signal handling */
pqsignal(SIGHUP, logicalrep_worker_sighup);
- pqsignal(SIGTERM, logicalrep_worker_sigterm);
+ pqsignal(SIGTERM, die);
BackgroundWorkerUnblockSignals();
/* Initialise stats to a sanish value */
MyLogicalRepWorker->last_send_time = MyLogicalRepWorker->last_recv_time =
MyLogicalRepWorker->reply_time = GetCurrentTimestamp();
- /* Make it easy to identify our processes. */
- SetConfigOption("application_name", MyBgworkerEntry->bgw_name,
- PGC_USERSET, PGC_S_SESSION);
-
/* Load the libpq-specific functions */
load_file("libpqwalreceiver", false);
@@ -1503,9 +1524,9 @@ ApplyWorkerMain(Datum main_arg)
if (!MySubscription->enabled)
{
ereport(LOG,
- (errmsg("logical replication apply worker for subscription \"%s\" will not "
+ (errmsg("logical replication apply worker for subscription \"%s\" will not "
"start because the subscription was disabled during startup",
- MySubscription->name)));
+ MySubscription->name)));
proc_exit(0);
}
@@ -1518,7 +1539,7 @@ ApplyWorkerMain(Datum main_arg)
if (am_tablesync_worker())
ereport(LOG,
(errmsg("logical replication table synchronization worker for subscription \"%s\", table \"%s\" has started",
- MySubscription->name, get_rel_name(MyLogicalRepWorker->relid))));
+ MySubscription->name, get_rel_name(MyLogicalRepWorker->relid))));
else
ereport(LOG,
(errmsg("logical replication apply worker for subscription \"%s\" has started",
@@ -1556,8 +1577,8 @@ ApplyWorkerMain(Datum main_arg)
/*
* This shouldn't happen if the subscription is enabled, but guard
- * against DDL bugs or manual catalog changes. (libpqwalreceiver
- * will crash if slot is NULL.
+ * against DDL bugs or manual catalog changes. (libpqwalreceiver will
+ * crash if slot is NULL.)
*/
if (!myslotname)
ereport(ERROR,
@@ -1574,7 +1595,7 @@ ApplyWorkerMain(Datum main_arg)
origin_startpos = replorigin_session_get_progress(false);
CommitTransactionCommand();
- wrconn = walrcv_connect(MySubscription->conninfo, true, myslotname,
+ wrconn = walrcv_connect(MySubscription->conninfo, true, MySubscription->name,
&err);
if (wrconn == NULL)
ereport(ERROR,
@@ -1610,8 +1631,14 @@ ApplyWorkerMain(Datum main_arg)
/* Run the main loop. */
LogicalRepApplyLoop(origin_startpos);
- walrcv_disconnect(wrconn);
-
- /* We should only get here if we received SIGTERM */
proc_exit(0);
}
+
+/*
+ * Is current process a logical replication worker?
+ */
+bool
+IsLogicalWorker(void)
+{
+ return MyLogicalRepWorker != NULL;
+}
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index 5386e86aa6..c0f7fbb2b2 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -331,8 +331,6 @@ ReplicationSlotAcquire(const char *name)
Assert(MyReplicationSlot == NULL);
- ReplicationSlotValidateName(name, ERROR);
-
/* Search for the named slot and mark it active if we find it. */
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 49cce38880..976a42f86d 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -24,12 +24,15 @@
* are treated as not a crash but approximately normal termination;
* the walsender will exit quickly without sending any more XLOG records.
*
- * If the server is shut down, postmaster sends us SIGUSR2 after all regular
- * backends have exited. This causes the walsender to switch to the "stopping"
- * state. In this state, the walsender will reject any replication command
- * that may generate WAL activity. The checkpointer begins the shutdown
+ * If the server is shut down, checkpointer sends us
+ * PROCSIG_WALSND_INIT_STOPPING after all regular backends have exited. If
+ * the backend is idle or runs an SQL query this causes the backend to
+ * shutdown, if logical replication is in progress all existing WAL records
+ * are processed followed by a shutdown. Otherwise this causes the walsender
+ * to switch to the "stopping" state. In this state, the walsender will reject
+ * any further replication commands. The checkpointer begins the shutdown
* checkpoint once all walsenders are confirmed as stopping. When the shutdown
- * checkpoint finishes, the postmaster sends us SIGINT. This instructs
+ * checkpoint finishes, the postmaster sends us SIGUSR2. This instructs
* walsender to send any outstanding WAL, including the shutdown checkpoint
* record, wait for it to be replicated to the standby, and then exit.
*
@@ -179,15 +182,14 @@ static bool streamingDoneReceiving;
static bool WalSndCaughtUp = false;
/* Flags set by signal handlers for later service in main loop */
-static volatile sig_atomic_t got_SIGHUP = false;
-static volatile sig_atomic_t got_SIGINT = false;
static volatile sig_atomic_t got_SIGUSR2 = false;
+static volatile sig_atomic_t got_STOPPING = false;
/*
- * This is set while we are streaming. When not set, SIGINT signal will be
- * handled like SIGTERM. When set, the main loop is responsible for checking
- * got_SIGINT and terminating when it's set (after streaming any remaining
- * WAL).
+ * This is set while we are streaming. When not set
+ * PROCSIG_WALSND_INIT_STOPPING signal will be handled like SIGTERM. When set,
+ * the main loop is responsible for checking got_STOPPING and terminating when
+ * it's set (after streaming any remaining WAL).
*/
static volatile sig_atomic_t replication_active = false;
@@ -215,9 +217,6 @@ static struct
} LagTracker;
/* Signal handlers */
-static void WalSndSigHupHandler(SIGNAL_ARGS);
-static void WalSndXLogSendHandler(SIGNAL_ARGS);
-static void WalSndSwitchStopping(SIGNAL_ARGS);
static void WalSndLastCycleHandler(SIGNAL_ARGS);
/* Prototypes for private functions */
@@ -306,14 +305,12 @@ WalSndErrorCleanup(void)
ReplicationSlotCleanup();
replication_active = false;
- if (got_SIGINT)
+
+ if (got_STOPPING || got_SIGUSR2)
proc_exit(0);
/* Revert back to startup state */
WalSndSetState(WALSNDSTATE_STARTUP);
-
- if (got_SIGUSR2)
- WalSndSetState(WALSNDSTATE_STOPPING);
}
/*
@@ -686,7 +683,7 @@ StartReplication(StartReplicationCmd *cmd)
WalSndLoop(XLogSendPhysical);
replication_active = false;
- if (got_SIGINT)
+ if (got_STOPPING)
proc_exit(0);
WalSndSetState(WALSNDSTATE_STARTUP);
@@ -1064,7 +1061,7 @@ StartLogicalReplication(StartReplicationCmd *cmd)
{
ereport(LOG,
(errmsg("terminating walsender process after promotion")));
- got_SIGINT = true;
+ got_STOPPING = true;
}
WalSndSetState(WALSNDSTATE_CATCHUP);
@@ -1115,7 +1112,7 @@ StartLogicalReplication(StartReplicationCmd *cmd)
ReplicationSlotRelease();
replication_active = false;
- if (got_SIGINT)
+ if (got_STOPPING)
proc_exit(0);
WalSndSetState(WALSNDSTATE_STARTUP);
@@ -1202,9 +1199,9 @@ WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
CHECK_FOR_INTERRUPTS();
/* Process any requests or signals received recently */
- if (got_SIGHUP)
+ if (ConfigReloadPending)
{
- got_SIGHUP = false;
+ ConfigReloadPending = false;
ProcessConfigFile(PGC_SIGHUP);
SyncRepInitConfig();
}
@@ -1310,9 +1307,9 @@ WalSndWaitForWal(XLogRecPtr loc)
CHECK_FOR_INTERRUPTS();
/* Process any requests or signals received recently */
- if (got_SIGHUP)
+ if (ConfigReloadPending)
{
- got_SIGHUP = false;
+ ConfigReloadPending = false;
ProcessConfigFile(PGC_SIGHUP);
SyncRepInitConfig();
}
@@ -1320,6 +1317,14 @@ WalSndWaitForWal(XLogRecPtr loc)
/* Check for input from the client */
ProcessRepliesIfAny();
+ /*
+ * If we're shutting down, trigger pending WAL to be written out,
+ * otherwise we'd possibly end up waiting for WAL that never gets
+ * written, because walwriter has shut down already.
+ */
+ if (got_STOPPING)
+ XLogBackgroundFlush();
+
/* Update our idea of the currently flushed position. */
if (!RecoveryInProgress())
RecentFlushPtr = GetFlushRecPtr();
@@ -1327,14 +1332,6 @@ WalSndWaitForWal(XLogRecPtr loc)
RecentFlushPtr = GetXLogReplayRecPtr(NULL);
/*
- * If postmaster asked us to switch to the stopping state, do so.
- * Shutdown is in progress and this will allow the checkpointer to
- * move on with the shutdown checkpoint.
- */
- if (got_SIGUSR2)
- WalSndSetState(WALSNDSTATE_STOPPING);
-
- /*
* If postmaster asked us to stop, don't wait here anymore. This will
* cause the xlogreader to return without reading a full record, which
* is the fastest way to reach the mainloop which then can quit.
@@ -1343,7 +1340,7 @@ WalSndWaitForWal(XLogRecPtr loc)
* RecentFlushPtr, so we can send all remaining data before shutting
* down.
*/
- if (got_SIGINT)
+ if (got_STOPPING)
break;
/*
@@ -1421,7 +1418,7 @@ exec_replication_command(const char *cmd_string)
* If WAL sender has been told that shutdown is getting close, switch its
* status accordingly to handle the next replication commands correctly.
*/
- if (got_SIGUSR2)
+ if (got_STOPPING)
WalSndSetState(WALSNDSTATE_STOPPING);
/*
@@ -2102,9 +2099,9 @@ WalSndLoop(WalSndSendDataCallback send_data)
CHECK_FOR_INTERRUPTS();
/* Process any requests or signals received recently */
- if (got_SIGHUP)
+ if (ConfigReloadPending)
{
- got_SIGHUP = false;
+ ConfigReloadPending = false;
ProcessConfigFile(PGC_SIGHUP);
SyncRepInitConfig();
}
@@ -2155,20 +2152,13 @@ WalSndLoop(WalSndSendDataCallback send_data)
}
/*
- * At the reception of SIGUSR2, switch the WAL sender to the
- * stopping state.
- */
- if (got_SIGUSR2)
- WalSndSetState(WALSNDSTATE_STOPPING);
-
- /*
- * When SIGINT arrives, we send any outstanding logs up to the
+ * When SIGUSR2 arrives, we send any outstanding logs up to the
* shutdown checkpoint record (i.e., the latest record), wait for
* them to be replicated to the standby, and exit. This may be a
* normal termination at shutdown, or a promotion, the walsender
* is not sure which.
*/
- if (got_SIGINT)
+ if (got_SIGUSR2)
WalSndDone(send_data);
}
@@ -2483,6 +2473,10 @@ XLogSendPhysical(void)
XLogRecPtr endptr;
Size nbytes;
+ /* If requested switch the WAL sender to the stopping state. */
+ if (got_STOPPING)
+ WalSndSetState(WALSNDSTATE_STOPPING);
+
if (streamingDoneSending)
{
WalSndCaughtUp = true;
@@ -2773,7 +2767,16 @@ XLogSendLogical(void)
* point, then we're caught up.
*/
if (logical_decoding_ctx->reader->EndRecPtr >= GetFlushRecPtr())
+ {
WalSndCaughtUp = true;
+
+ /*
+ * Have WalSndLoop() terminate the connection in an orderly
+ * manner, after writing out all the pending data.
+ */
+ if (got_STOPPING)
+ got_SIGUSR2 = true;
+ }
}
/* Update shared memory status */
@@ -2883,51 +2886,13 @@ WalSndRqstFileReload(void)
}
}
-/* SIGHUP: set flag to re-read config file at next convenient time */
-static void
-WalSndSigHupHandler(SIGNAL_ARGS)
-{
- int save_errno = errno;
-
- got_SIGHUP = true;
-
- SetLatch(MyLatch);
-
- errno = save_errno;
-}
-
-/* SIGUSR1: set flag to send WAL records */
-static void
-WalSndXLogSendHandler(SIGNAL_ARGS)
-{
- int save_errno = errno;
-
- latch_sigusr1_handler();
-
- errno = save_errno;
-}
-
-/* SIGUSR2: set flag to switch to stopping state */
-static void
-WalSndSwitchStopping(SIGNAL_ARGS)
-{
- int save_errno = errno;
-
- got_SIGUSR2 = true;
- SetLatch(MyLatch);
-
- errno = save_errno;
-}
-
/*
- * SIGINT: set flag to do a last cycle and shut down afterwards. The WAL
- * sender should already have been switched to WALSNDSTATE_STOPPING at
- * this point.
+ * Handle PROCSIG_WALSND_INIT_STOPPING signal.
*/
-static void
-WalSndLastCycleHandler(SIGNAL_ARGS)
+void
+HandleWalSndInitStopping(void)
{
- int save_errno = errno;
+ Assert(am_walsender);
/*
* If replication has not yet started, die like with SIGTERM. If
@@ -2937,8 +2902,21 @@ WalSndLastCycleHandler(SIGNAL_ARGS)
*/
if (!replication_active)
kill(MyProcPid, SIGTERM);
+ else
+ got_STOPPING = true;
+}
+
+/*
+ * SIGUSR2: set flag to do a last cycle and shut down afterwards. The WAL
+ * sender should already have been switched to WALSNDSTATE_STOPPING at
+ * this point.
+ */
+static void
+WalSndLastCycleHandler(SIGNAL_ARGS)
+{
+ int save_errno = errno;
- got_SIGINT = true;
+ got_SIGUSR2 = true;
SetLatch(MyLatch);
errno = save_errno;
@@ -2949,16 +2927,16 @@ void
WalSndSignals(void)
{
/* Set up signal handlers */
- pqsignal(SIGHUP, WalSndSigHupHandler); /* set flag to read config
+ pqsignal(SIGHUP, PostgresSigHupHandler); /* set flag to read config
* file */
- pqsignal(SIGINT, WalSndLastCycleHandler); /* request a last cycle and
- * shutdown */
+ pqsignal(SIGINT, StatementCancelHandler); /* query cancel */
pqsignal(SIGTERM, die); /* request shutdown */
pqsignal(SIGQUIT, quickdie); /* hard crash time */
InitializeTimeouts(); /* establishes SIGALRM handler */
pqsignal(SIGPIPE, SIG_IGN);
- pqsignal(SIGUSR1, WalSndXLogSendHandler); /* request WAL sending */
- pqsignal(SIGUSR2, WalSndSwitchStopping); /* switch to stopping state */
+ pqsignal(SIGUSR1, procsignal_sigusr1_handler);
+ pqsignal(SIGUSR2, WalSndLastCycleHandler); /* request a last cycle and
+ * shutdown */
/* Reset some signals that are accepted by postmaster but not here */
pqsignal(SIGCHLD, SIG_DFL);
@@ -3037,9 +3015,36 @@ WalSndWakeup(void)
}
/*
- * Wait that all the WAL senders have reached the stopping state. This is
- * used by the checkpointer to control when shutdown checkpoints can
- * safely begin.
+ * Signal all walsenders to move to stopping state.
+ *
+ * This will trigger walsenders to move to a state where no further WAL can be
+ * generated. See this file's header for details.
+ */
+void
+WalSndInitStopping(void)
+{
+ int i;
+
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
+ pid_t pid;
+
+ SpinLockAcquire(&walsnd->mutex);
+ pid = walsnd->pid;
+ SpinLockRelease(&walsnd->mutex);
+
+ if (pid == 0)
+ continue;
+
+ SendProcSignal(pid, PROCSIG_WALSND_INIT_STOPPING, InvalidBackendId);
+ }
+}
+
+/*
+ * Wait that all the WAL senders have quit or reached the stopping state. This
+ * is used by the checkpointer to control when the shutdown checkpoint can
+ * safely be performed.
*/
void
WalSndWaitStopping(void)
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 510f49fcc0..c5f6a93e80 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -1987,7 +1987,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
/* Only normal relations can have RLS policies */
if (rte->rtekind != RTE_RELATION ||
- rte->relkind != RELKIND_RELATION)
+ (rte->relkind != RELKIND_RELATION &&
+ rte->relkind != RELKIND_PARTITIONED_TABLE))
continue;
rel = heap_open(rte->relid, NoLock);
@@ -2605,7 +2606,8 @@ relation_is_updatable(Oid reloid,
return 0;
/* If the relation is a table, it is always updatable */
- if (rel->rd_rel->relkind == RELKIND_RELATION)
+ if (rel->rd_rel->relkind == RELKIND_RELATION ||
+ rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
relation_close(rel, AccessShareLock);
return ALL_EVENTS;
@@ -2719,7 +2721,8 @@ relation_is_updatable(Oid reloid,
base_rte = rt_fetch(rtr->rtindex, viewquery->rtable);
Assert(base_rte->rtekind == RTE_RELATION);
- if (base_rte->relkind != RELKIND_RELATION)
+ if (base_rte->relkind != RELKIND_RELATION &&
+ base_rte->relkind != RELKIND_PARTITIONED_TABLE)
{
baseoid = base_rte->relid;
include_cols = adjust_view_column_set(updatable_cols,
diff --git a/src/backend/snowball/Makefile b/src/backend/snowball/Makefile
index 518178ff39..50cbace41d 100644
--- a/src/backend/snowball/Makefile
+++ b/src/backend/snowball/Makefile
@@ -14,8 +14,7 @@ top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
override CPPFLAGS := -I$(top_srcdir)/src/include/snowball \
- -I$(top_srcdir)/src/include/snowball/libstemmer $(CPPFLAGS) \
- $(ICU_CFLAGS)
+ -I$(top_srcdir)/src/include/snowball/libstemmer $(CPPFLAGS)
OBJS= $(WIN32RES) dict_snowball.o api.o utilities.o \
stem_ISO_8859_1_danish.o \
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index 53e6bf2477..55959de91f 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -370,7 +370,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
AddWaitEventToSet(set, WL_LATCH_SET, PGINVALID_SOCKET,
(Latch *) latch, NULL);
- if (wakeEvents & WL_POSTMASTER_DEATH)
+ if (wakeEvents & WL_POSTMASTER_DEATH && IsUnderPostmaster)
AddWaitEventToSet(set, WL_POSTMASTER_DEATH, PGINVALID_SOCKET,
NULL, NULL);
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 1c01dd973f..0e0bbf71f0 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1990,14 +1990,15 @@ GetSnapshotData(Snapshot snapshot, bool latest)
* Returns TRUE if successful, FALSE if source xact is no longer running.
*/
bool
-ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid)
+ProcArrayInstallImportedXmin(TransactionId xmin,
+ VirtualTransactionId *sourcevxid)
{
bool result = false;
ProcArrayStruct *arrayP = procArray;
int index;
Assert(TransactionIdIsNormal(xmin));
- if (!TransactionIdIsNormal(sourcexid))
+ if (!sourcevxid)
return false;
/* Get lock so source xact can't end while we're doing this */
@@ -2014,8 +2015,10 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid)
if (pgxact->vacuumFlags & PROC_IN_VACUUM)
continue;
- xid = pgxact->xid; /* fetch just once */
- if (xid != sourcexid)
+ /* We are only interested in the specific virtual transaction. */
+ if (proc->backendId != sourcevxid->backendId)
+ continue;
+ if (proc->lxid != sourcevxid->localTransactionId)
continue;
/*
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index f4d4f25e68..55e94249db 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -21,6 +21,7 @@
#include "access/parallel.h"
#include "commands/async.h"
#include "miscadmin.h"
+#include "replication/walsender.h"
#include "storage/latch.h"
#include "storage/ipc.h"
#include "storage/proc.h"
@@ -280,6 +281,9 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
if (CheckProcSignal(PROCSIG_PARALLEL_MESSAGE))
HandleParallelMessageInterrupt();
+ if (CheckProcSignal(PROCSIG_WALSND_INIT_STOPPING))
+ HandleWalSndInitStopping();
+
if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_DATABASE))
RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_DATABASE);
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index f5bf807cd6..fcd6cc7a8c 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -769,7 +769,7 @@ shm_mq_wait_for_attach(shm_mq_handle *mqh)
*
* The purpose of this function is to make sure that the process
* with which we're communicating doesn't block forever waiting for us to
- * fill or drain the queue once we've lost interest. Whem the sender
+ * fill or drain the queue once we've lost interest. When the sender
* detaches, the receiver can read any messages remaining in the queue;
* further reads will return SHM_MQ_DETACHED. If the receiver detaches,
* further attempts to send messages will likewise return SHM_MQ_DETACHED.
@@ -1167,7 +1167,7 @@ shm_mq_inc_bytes_written(volatile shm_mq *mq, Size n)
}
/*
- * Set sender's latch, unless queue is detached.
+ * Set receiver's latch, unless queue is detached.
*/
static shm_mq_result
shm_mq_notify_receiver(volatile shm_mq *mq)
diff --git a/src/backend/storage/ipc/shm_toc.c b/src/backend/storage/ipc/shm_toc.c
index 9110ffa4a0..50334cd797 100644
--- a/src/backend/storage/ipc/shm_toc.c
+++ b/src/backend/storage/ipc/shm_toc.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * src/include/storage/shm_toc.c
+ * src/backend/storage/ipc/shm_toc.c
*
*-------------------------------------------------------------------------
*/
@@ -20,16 +20,16 @@
typedef struct shm_toc_entry
{
uint64 key; /* Arbitrary identifier */
- uint64 offset; /* Bytes offset */
+ Size offset; /* Offset, in bytes, from TOC start */
} shm_toc_entry;
struct shm_toc
{
- uint64 toc_magic; /* Magic number for this TOC */
+ uint64 toc_magic; /* Magic number identifying this TOC */
slock_t toc_mutex; /* Spinlock for mutual exclusion */
Size toc_total_bytes; /* Bytes managed by this TOC */
Size toc_allocated_bytes; /* Bytes allocated of those managed */
- Size toc_nentry; /* Number of entries in TOC */
+ uint32 toc_nentry; /* Number of entries in TOC */
shm_toc_entry toc_entry[FLEXIBLE_ARRAY_MEMBER];
};
@@ -53,7 +53,7 @@ shm_toc_create(uint64 magic, void *address, Size nbytes)
/*
* Attach to an existing table of contents. If the magic number found at
- * the target address doesn't match our expectations, returns NULL.
+ * the target address doesn't match our expectations, return NULL.
*/
extern shm_toc *
shm_toc_attach(uint64 magic, void *address)
@@ -64,7 +64,7 @@ shm_toc_attach(uint64 magic, void *address)
return NULL;
Assert(toc->toc_total_bytes >= toc->toc_allocated_bytes);
- Assert(toc->toc_total_bytes >= offsetof(shm_toc, toc_entry));
+ Assert(toc->toc_total_bytes > offsetof(shm_toc, toc_entry));
return toc;
}
@@ -76,7 +76,7 @@ shm_toc_attach(uint64 magic, void *address)
* just a way of dividing a single physical shared memory segment into logical
* chunks that may be used for different purposes.
*
- * We allocated backwards from the end of the segment, so that the TOC entries
+ * We allocate backwards from the end of the segment, so that the TOC entries
* can grow forward from the start of the segment.
*/
extern void *
@@ -140,7 +140,7 @@ shm_toc_freespace(shm_toc *toc)
/*
* Insert a TOC entry.
*
- * The idea here is that process setting up the shared memory segment will
+ * The idea here is that the process setting up the shared memory segment will
* register the addresses of data structures within the segment using this
* function. Each data structure will be identified using a 64-bit key, which
* is assumed to be a well-known or discoverable integer. Other processes
@@ -155,17 +155,17 @@ shm_toc_freespace(shm_toc *toc)
* data structure here. But the real idea here is just to give someone mapping
* a dynamic shared memory the ability to find the bare minimum number of
* pointers that they need to bootstrap. If you're storing a lot of stuff in
- * here, you're doing it wrong.
+ * the TOC, you're doing it wrong.
*/
void
shm_toc_insert(shm_toc *toc, uint64 key, void *address)
{
volatile shm_toc *vtoc = toc;
- uint64 total_bytes;
- uint64 allocated_bytes;
- uint64 nentry;
- uint64 toc_bytes;
- uint64 offset;
+ Size total_bytes;
+ Size allocated_bytes;
+ Size nentry;
+ Size toc_bytes;
+ Size offset;
/* Relativize pointer. */
Assert(address > (void *) toc);
@@ -181,7 +181,8 @@ shm_toc_insert(shm_toc *toc, uint64 key, void *address)
/* Check for memory exhaustion and overflow. */
if (toc_bytes + sizeof(shm_toc_entry) > total_bytes ||
- toc_bytes + sizeof(shm_toc_entry) < toc_bytes)
+ toc_bytes + sizeof(shm_toc_entry) < toc_bytes ||
+ nentry >= PG_UINT32_MAX)
{
SpinLockRelease(&toc->toc_mutex);
ereport(ERROR,
@@ -208,6 +209,9 @@ shm_toc_insert(shm_toc *toc, uint64 key, void *address)
/*
* Look up a TOC entry.
*
+ * If the key is not found, returns NULL if noError is true, otherwise
+ * throws elog(ERROR).
+ *
* Unlike the other functions in this file, this operation acquires no lock;
* it uses only barriers. It probably wouldn't hurt concurrency very much even
* if it did get a lock, but since it's reasonably likely that a group of
@@ -215,21 +219,29 @@ shm_toc_insert(shm_toc *toc, uint64 key, void *address)
* right around the same time, there seems to be some value in avoiding it.
*/
void *
-shm_toc_lookup(shm_toc *toc, uint64 key)
+shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
{
- uint64 nentry;
- uint64 i;
+ uint32 nentry;
+ uint32 i;
- /* Read the number of entries before we examine any entry. */
+ /*
+ * Read the number of entries before we examine any entry. We assume that
+ * reading a uint32 is atomic.
+ */
nentry = toc->toc_nentry;
pg_read_barrier();
/* Now search for a matching entry. */
for (i = 0; i < nentry; ++i)
+ {
if (toc->toc_entry[i].key == key)
return ((char *) toc) + toc->toc_entry[i].offset;
+ }
/* No matching entry was found. */
+ if (!noError)
+ elog(ERROR, "could not find key " UINT64_FORMAT " in shm TOC at %p",
+ key, toc);
return NULL;
}
diff --git a/src/backend/storage/lmgr/condition_variable.c b/src/backend/storage/lmgr/condition_variable.c
index 5afb21121b..b4b7d28dd5 100644
--- a/src/backend/storage/lmgr/condition_variable.c
+++ b/src/backend/storage/lmgr/condition_variable.c
@@ -68,14 +68,14 @@ ConditionVariablePrepareToSleep(ConditionVariable *cv)
{
cv_wait_event_set = CreateWaitEventSet(TopMemoryContext, 1);
AddWaitEventToSet(cv_wait_event_set, WL_LATCH_SET, PGINVALID_SOCKET,
- &MyProc->procLatch, NULL);
+ MyLatch, NULL);
}
/*
* Reset my latch before adding myself to the queue and before entering
* the caller's predicate loop.
*/
- ResetLatch(&MyProc->procLatch);
+ ResetLatch(MyLatch);
/* Add myself to the wait queue. */
SpinLockAcquire(&cv->mutex);
@@ -135,7 +135,7 @@ ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
WaitEventSetWait(cv_wait_event_set, -1, &event, 1, wait_event_info);
/* Reset latch before testing whether we can return. */
- ResetLatch(&MyProc->procLatch);
+ ResetLatch(MyLatch);
/*
* If this process has been taken out of the wait list, then we know
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 30aea14385..38c2e493a5 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -148,7 +148,7 @@
* predicate lock maintenance
* GetSerializableTransactionSnapshot(Snapshot snapshot)
* SetSerializableTransactionSnapshot(Snapshot snapshot,
- * TransactionId sourcexid)
+ * VirtualTransactionId *sourcevxid)
* RegisterPredicateLockingXid(void)
* PredicateLockRelation(Relation relation, Snapshot snapshot)
* PredicateLockPage(Relation relation, BlockNumber blkno,
@@ -434,7 +434,8 @@ static uint32 predicatelock_hash(const void *key, Size keysize);
static void SummarizeOldestCommittedSxact(void);
static Snapshot GetSafeSnapshot(Snapshot snapshot);
static Snapshot GetSerializableTransactionSnapshotInt(Snapshot snapshot,
- TransactionId sourcexid);
+ VirtualTransactionId *sourcevxid,
+ int sourcepid);
static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
static bool GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
PREDICATELOCKTARGETTAG *parent);
@@ -1510,7 +1511,7 @@ GetSafeSnapshot(Snapshot origSnapshot)
* one passed to it, but we avoid assuming that here.
*/
snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
- InvalidTransactionId);
+ NULL, InvalidPid);
if (MySerializableXact == InvalidSerializableXact)
return snapshot; /* no concurrent r/w xacts; it's safe */
@@ -1643,7 +1644,7 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
return GetSafeSnapshot(snapshot);
return GetSerializableTransactionSnapshotInt(snapshot,
- InvalidTransactionId);
+ NULL, InvalidPid);
}
/*
@@ -1658,7 +1659,8 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
*/
void
SetSerializableTransactionSnapshot(Snapshot snapshot,
- TransactionId sourcexid)
+ VirtualTransactionId *sourcevxid,
+ int sourcepid)
{
Assert(IsolationIsSerializable());
@@ -1673,7 +1675,8 @@ SetSerializableTransactionSnapshot(Snapshot snapshot,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("a snapshot-importing transaction must not be READ ONLY DEFERRABLE")));
- (void) GetSerializableTransactionSnapshotInt(snapshot, sourcexid);
+ (void) GetSerializableTransactionSnapshotInt(snapshot, sourcevxid,
+ sourcepid);
}
/*
@@ -1687,7 +1690,8 @@ SetSerializableTransactionSnapshot(Snapshot snapshot,
*/
static Snapshot
GetSerializableTransactionSnapshotInt(Snapshot snapshot,
- TransactionId sourcexid)
+ VirtualTransactionId *sourcevxid,
+ int sourcepid)
{
PGPROC *proc;
VirtualTransactionId vxid;
@@ -1741,17 +1745,17 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
} while (!sxact);
/* Get the snapshot, or check that it's safe to use */
- if (!TransactionIdIsValid(sourcexid))
+ if (!sourcevxid)
snapshot = GetSnapshotData(snapshot, false);
- else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcexid))
+ else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcevxid))
{
ReleasePredXact(sxact);
LWLockRelease(SerializableXactHashLock);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not import the requested snapshot"),
- errdetail("The source transaction %u is not running anymore.",
- sourcexid)));
+ errdetail("The source process with pid %d is not running anymore.",
+ sourcepid)));
}
/*
@@ -2841,7 +2845,7 @@ exit:
/* We shouldn't run out of memory if we're moving locks */
Assert(!outOfShmem);
- /* Put the scrach entry back */
+ /* Put the scratch entry back */
RestoreScratchTarget(false);
}
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index fdf045a45b..1b53d651cd 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -902,8 +902,8 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
offset != MAXALIGN(offset))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item pointer: offset = %u, size = %u",
- offset, (unsigned int) size)));
+ errmsg("corrupted item pointer: offset = %u, length = %u",
+ offset, (unsigned int) size)));
if (nextitm < nitems && offnum == itemnos[nextitm])
{
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 3cc070a34b..8c7beaf201 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -55,6 +55,8 @@
#include "pg_getopt.h"
#include "postmaster/autovacuum.h"
#include "postmaster/postmaster.h"
+#include "replication/logicallauncher.h"
+#include "replication/logicalworker.h"
#include "replication/slot.h"
#include "replication/walsender.h"
#include "rewrite/rewriteHandler.h"
@@ -141,13 +143,6 @@ char *register_stack_base_ptr = NULL;
#endif
/*
- * Flag to mark SIGHUP. Whenever the main loop comes around it
- * will reread the configuration file. (Better than doing the
- * reading in the signal handler, ey?)
- */
-static volatile sig_atomic_t got_SIGHUP = false;
-
-/*
* Flag to keep track of whether we have started a transaction.
* For extended query protocol this has to be remembered across messages.
*/
@@ -205,7 +200,6 @@ static bool IsTransactionExitStmt(Node *parsetree);
static bool IsTransactionExitStmtList(List *pstmts);
static bool IsTransactionStmtList(List *pstmts);
static void drop_unnamed_stmt(void);
-static void SigHupHandler(SIGNAL_ARGS);
static void log_disconnections(int code, Datum arg);
@@ -3098,13 +3092,19 @@ FloatExceptionHandler(SIGNAL_ARGS)
"invalid operation, such as division by zero.")));
}
-/* SIGHUP: set flag to re-read config file at next convenient time */
-static void
-SigHupHandler(SIGNAL_ARGS)
+/*
+ * SIGHUP: set flag to re-read config file at next convenient time.
+ *
+ * Sets the ConfigReloadPending flag, which should be checked at convenient
+ * places inside main loops. (Better than doing the reading in the signal
+ * handler, ey?)
+ */
+void
+PostgresSigHupHandler(SIGNAL_ARGS)
{
int save_errno = errno;
- got_SIGHUP = true;
+ ConfigReloadPending = true;
SetLatch(MyLatch);
errno = save_errno;
@@ -3260,6 +3260,18 @@ ProcessInterrupts(void)
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
errmsg("terminating autovacuum process due to administrator command")));
+ else if (IsLogicalWorker())
+ ereport(FATAL,
+ (errcode(ERRCODE_ADMIN_SHUTDOWN),
+ errmsg("terminating logical replication worker due to administrator command")));
+ else if (IsLogicalLauncher())
+ {
+ ereport(DEBUG1,
+ (errmsg("logical replication launcher shutting down")));
+
+ /* The logical replication launcher can be stopped at any time. */
+ proc_exit(0);
+ }
else if (RecoveryConflictPending && RecoveryConflictRetryable)
{
pgstat_report_recovery_conflict(RecoveryConflictReason);
@@ -4114,8 +4126,8 @@ PostgresMain(int argc, char *argv[],
WalSndSignals();
else
{
- pqsignal(SIGHUP, SigHupHandler); /* set flag to read config
- * file */
+ pqsignal(SIGHUP, PostgresSigHupHandler); /* set flag to read
+ * config file */
pqsignal(SIGINT, StatementCancelHandler); /* cancel current query */
pqsignal(SIGTERM, die); /* cancel current query and exit */
@@ -4620,9 +4632,9 @@ PostgresMain(int argc, char *argv[],
* (6) check for any other interesting events that happened while we
* slept.
*/
- if (got_SIGHUP)
+ if (ConfigReloadPending)
{
- got_SIGHUP = false;
+ ConfigReloadPending = false;
ProcessConfigFile(PGC_SIGHUP);
}
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 632d51f3ac..d3eba7bcdf 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -2019,6 +2019,7 @@ ProcessUtilitySlow(ParseState *pstate,
InvalidOid, /* no predefined OID */
false, /* is_alter_table */
true, /* check_rights */
+ true, /* check_not_in_use */
false, /* skip_build */
false); /* quiet */
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 47371ab7cb..0f99b613f5 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -2008,7 +2008,7 @@ json_object_agg_transfn(PG_FUNCTION_ARGS)
if (arg_type == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("could not determine data type for argument 1")));
+ errmsg("could not determine data type for argument %d", 1)));
json_categorize_type(arg_type, &state->key_category,
&state->key_output_func);
@@ -2018,7 +2018,7 @@ json_object_agg_transfn(PG_FUNCTION_ARGS)
if (arg_type == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("could not determine data type for argument 2")));
+ errmsg("could not determine data type for argument %d", 2)));
json_categorize_type(arg_type, &state->val_category,
&state->val_output_func);
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index 1dabfa9fc1..c588ce00af 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -1212,7 +1212,7 @@ jsonb_build_object(PG_FUNCTION_ARGS)
if (val_type == InvalidOid || val_type == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument %d: could not determine data type", i + 1)));
+ errmsg("could not determine data type for argument %d", i + 1)));
add_jsonb(arg, false, &result, val_type, true);
@@ -1235,7 +1235,7 @@ jsonb_build_object(PG_FUNCTION_ARGS)
if (val_type == InvalidOid || val_type == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument %d: could not determine data type", i + 2)));
+ errmsg("could not determine data type for argument %d", i + 2)));
add_jsonb(arg, PG_ARGISNULL(i + 1), &result, val_type, false);
}
@@ -1295,7 +1295,7 @@ jsonb_build_array(PG_FUNCTION_ARGS)
if (val_type == InvalidOid || val_type == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument %d: could not determine data type", i + 1)));
+ errmsg("could not determine data type for argument %d", i + 1)));
add_jsonb(arg, PG_ARGISNULL(i), &result, val_type, false);
}
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 2820dbe465..f5631e512e 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -1879,7 +1879,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
heap_close(relation, AccessShareLock);
return NULL;
}
- elog(ERROR, "cache lookup failed for constraint %u", constraintId);
+ elog(ERROR, "could not find tuple for constraint %u", constraintId);
}
conForm = (Form_pg_constraint) GETSTRUCT(tup);
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 94b2de5e2d..2967f179ad 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -154,12 +154,13 @@
get_relation_stats_hook_type get_relation_stats_hook = NULL;
get_index_stats_hook_type get_index_stats_hook = NULL;
+static double eqsel_internal(PG_FUNCTION_ARGS, bool negate);
static double var_eq_const(VariableStatData *vardata, Oid operator,
Datum constval, bool constisnull,
- bool varonleft);
+ bool varonleft, bool negate);
static double var_eq_non_const(VariableStatData *vardata, Oid operator,
Node *other,
- bool varonleft);
+ bool varonleft, bool negate);
static double ineq_histogram_selectivity(PlannerInfo *root,
VariableStatData *vardata,
FmgrInfo *opproc, bool isgt,
@@ -227,6 +228,15 @@ static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
Datum
eqsel(PG_FUNCTION_ARGS)
{
+ PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, false));
+}
+
+/*
+ * Common code for eqsel() and neqsel()
+ */
+static double
+eqsel_internal(PG_FUNCTION_ARGS, bool negate)
+{
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Oid operator = PG_GETARG_OID(1);
List *args = (List *) PG_GETARG_POINTER(2);
@@ -237,12 +247,26 @@ eqsel(PG_FUNCTION_ARGS)
double selec;
/*
+ * When asked about <>, we do the estimation using the corresponding =
+ * operator, then convert to <> via "1.0 - eq_selectivity - nullfrac".
+ */
+ if (negate)
+ {
+ operator = get_negator(operator);
+ if (!OidIsValid(operator))
+ {
+ /* Use default selectivity (should we raise an error instead?) */
+ return 1.0 - DEFAULT_EQ_SEL;
+ }
+ }
+
+ /*
* If expression is not variable = something or something = variable, then
* punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
- PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
+ return negate ? (1.0 - DEFAULT_EQ_SEL) : DEFAULT_EQ_SEL;
/*
* We can do a lot better if the something is a constant. (Note: the
@@ -253,14 +277,14 @@ eqsel(PG_FUNCTION_ARGS)
selec = var_eq_const(&vardata, operator,
((Const *) other)->constvalue,
((Const *) other)->constisnull,
- varonleft);
+ varonleft, negate);
else
selec = var_eq_non_const(&vardata, operator, other,
- varonleft);
+ varonleft, negate);
ReleaseVariableStats(vardata);
- PG_RETURN_FLOAT8((float8) selec);
+ return selec;
}
/*
@@ -271,20 +295,33 @@ eqsel(PG_FUNCTION_ARGS)
static double
var_eq_const(VariableStatData *vardata, Oid operator,
Datum constval, bool constisnull,
- bool varonleft)
+ bool varonleft, bool negate)
{
double selec;
+ double nullfrac = 0.0;
bool isdefault;
Oid opfuncoid;
/*
* If the constant is NULL, assume operator is strict and return zero, ie,
- * operator will never return TRUE.
+ * operator will never return TRUE. (It's zero even for a negator op.)
*/
if (constisnull)
return 0.0;
/*
+ * Grab the nullfrac for use below. Note we allow use of nullfrac
+ * regardless of security check.
+ */
+ if (HeapTupleIsValid(vardata->statsTuple))
+ {
+ Form_pg_statistic stats;
+
+ stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
+ nullfrac = stats->stanullfrac;
+ }
+
+ /*
* If we matched the var to a unique index or DISTINCT clause, assume
* there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
@@ -292,19 +329,17 @@ var_eq_const(VariableStatData *vardata, Oid operator,
* ignoring the information.)
*/
if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
- return 1.0 / vardata->rel->tuples;
-
- if (HeapTupleIsValid(vardata->statsTuple) &&
- statistic_proc_security_check(vardata,
- (opfuncoid = get_opcode(operator))))
{
- Form_pg_statistic stats;
+ selec = 1.0 / vardata->rel->tuples;
+ }
+ else if (HeapTupleIsValid(vardata->statsTuple) &&
+ statistic_proc_security_check(vardata,
+ (opfuncoid = get_opcode(operator))))
+ {
AttStatsSlot sslot;
bool match = false;
int i;
- stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
-
/*
* Is the constant "=" to any of the column's most common values?
* (Although the given operator may not really be "=", we will assume
@@ -363,7 +398,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
for (i = 0; i < sslot.nnumbers; i++)
sumcommon += sslot.numbers[i];
- selec = 1.0 - sumcommon - stats->stanullfrac;
+ selec = 1.0 - sumcommon - nullfrac;
CLAMP_PROBABILITY(selec);
/*
@@ -396,6 +431,10 @@ var_eq_const(VariableStatData *vardata, Oid operator,
selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
}
+ /* now adjust if we wanted <> rather than = */
+ if (negate)
+ selec = 1.0 - selec - nullfrac;
+
/* result should be in range, but make sure... */
CLAMP_PROBABILITY(selec);
@@ -408,12 +447,24 @@ var_eq_const(VariableStatData *vardata, Oid operator,
static double
var_eq_non_const(VariableStatData *vardata, Oid operator,
Node *other,
- bool varonleft)
+ bool varonleft, bool negate)
{
double selec;
+ double nullfrac = 0.0;
bool isdefault;
/*
+ * Grab the nullfrac for use below.
+ */
+ if (HeapTupleIsValid(vardata->statsTuple))
+ {
+ Form_pg_statistic stats;
+
+ stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
+ nullfrac = stats->stanullfrac;
+ }
+
+ /*
* If we matched the var to a unique index or DISTINCT clause, assume
* there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
@@ -421,16 +472,14 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* ignoring the information.)
*/
if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
- return 1.0 / vardata->rel->tuples;
-
- if (HeapTupleIsValid(vardata->statsTuple))
{
- Form_pg_statistic stats;
+ selec = 1.0 / vardata->rel->tuples;
+ }
+ else if (HeapTupleIsValid(vardata->statsTuple))
+ {
double ndistinct;
AttStatsSlot sslot;
- stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
-
/*
* Search is for a value that we do not know a priori, but we will
* assume it is not NULL. Estimate the selectivity as non-null
@@ -441,7 +490,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* values, regardless of their frequency in the table. Is that a good
* idea?)
*/
- selec = 1.0 - stats->stanullfrac;
+ selec = 1.0 - nullfrac;
ndistinct = get_variable_numdistinct(vardata, &isdefault);
if (ndistinct > 1)
selec /= ndistinct;
@@ -469,6 +518,10 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
}
+ /* now adjust if we wanted <> rather than = */
+ if (negate)
+ selec = 1.0 - selec - nullfrac;
+
/* result should be in range, but make sure... */
CLAMP_PROBABILITY(selec);
@@ -485,33 +538,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
Datum
neqsel(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Oid operator = PG_GETARG_OID(1);
- List *args = (List *) PG_GETARG_POINTER(2);
- int varRelid = PG_GETARG_INT32(3);
- Oid eqop;
- float8 result;
-
- /*
- * We want 1 - eqsel() where the equality operator is the one associated
- * with this != operator, that is, its negator.
- */
- eqop = get_negator(operator);
- if (eqop)
- {
- result = DatumGetFloat8(DirectFunctionCall4(eqsel,
- PointerGetDatum(root),
- ObjectIdGetDatum(eqop),
- PointerGetDatum(args),
- Int32GetDatum(varRelid)));
- }
- else
- {
- /* Use default selectivity (should we raise an error instead?) */
- result = DEFAULT_EQ_SEL;
- }
- result = 1.0 - result;
- PG_RETURN_FLOAT8(result);
+ PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, true));
}
/*
@@ -1114,6 +1141,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
Const *patt;
Const *prefix = NULL;
Selectivity rest_selec = 0;
+ double nullfrac = 0.0;
double result;
/*
@@ -1203,6 +1231,17 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
}
/*
+ * Grab the nullfrac for use below.
+ */
+ if (HeapTupleIsValid(vardata.statsTuple))
+ {
+ Form_pg_statistic stats;
+
+ stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
+ nullfrac = stats->stanullfrac;
+ }
+
+ /*
* Pull out any fixed prefix implied by the pattern, and estimate the
* fractional selectivity of the remainder of the pattern. Unlike many of
* the other functions in this file, we use the pattern operator's actual
@@ -1252,7 +1291,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
if (eqopr == InvalidOid)
elog(ERROR, "no = operator for opfamily %u", opfamily);
result = var_eq_const(&vardata, eqopr, prefix->constvalue,
- false, true);
+ false, true, false);
}
else
{
@@ -1275,8 +1314,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
Selectivity selec;
int hist_size;
FmgrInfo opproc;
- double nullfrac,
- mcv_selec,
+ double mcv_selec,
sumcommon;
/* Try to use the histogram entries to get selectivity */
@@ -1328,11 +1366,6 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
&sumcommon);
- if (HeapTupleIsValid(vardata.statsTuple))
- nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
- else
- nullfrac = 0.0;
-
/*
* Now merge the results from the MCV and histogram calculations,
* realizing that the histogram covers only the non-null values that
@@ -1340,12 +1373,16 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
*/
selec *= 1.0 - nullfrac - sumcommon;
selec += mcv_selec;
-
- /* result should be in range, but make sure... */
- CLAMP_PROBABILITY(selec);
result = selec;
}
+ /* now adjust if we wanted not-match rather than match */
+ if (negate)
+ result = 1.0 - result - nullfrac;
+
+ /* result should be in range, but make sure... */
+ CLAMP_PROBABILITY(result);
+
if (prefix)
{
pfree(DatumGetPointer(prefix->constvalue));
@@ -1354,7 +1391,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
ReleaseVariableStats(vardata);
- return negate ? (1.0 - result) : result;
+ return result;
}
/*
@@ -1451,7 +1488,7 @@ boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
* compute the selectivity as if that is what we have.
*/
selec = var_eq_const(&vardata, BooleanEqualOperator,
- BoolGetDatum(true), false, true);
+ BoolGetDatum(true), false, true, false);
}
else if (is_funcclause(arg))
{
@@ -5793,7 +5830,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
if (cmpopr == InvalidOid)
elog(ERROR, "no = operator for opfamily %u", opfamily);
eq_sel = var_eq_const(vardata, cmpopr, prefixcon->constvalue,
- false, true);
+ false, true, false);
prefixsel = Max(prefixsel, eq_sel);
@@ -6639,7 +6676,7 @@ add_predicate_to_quals(IndexOptInfo *index, List *indexQuals)
Node *predQual = (Node *) lfirst(lc);
List *oneQual = list_make1(predQual);
- if (!predicate_implied_by(oneQual, indexQuals))
+ if (!predicate_implied_by(oneQual, indexQuals, false))
predExtraQuals = list_concat(predExtraQuals, oneQual);
}
/* list_concat avoids modifying the passed-in indexQuals list */
@@ -7524,7 +7561,7 @@ gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Node *predQual = (Node *) lfirst(l);
List *oneQual = list_make1(predQual);
- if (!predicate_implied_by(oneQual, indexQuals))
+ if (!predicate_implied_by(oneQual, indexQuals, false))
predExtraQuals = list_concat(predExtraQuals, oneQual);
}
/* list_concat avoids modifying the passed-in indexQuals list */
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index f7f85b53db..4b30e6bc62 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -71,7 +71,7 @@ InvalidateAttoptCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
/*
* InitializeAttoptCache
- * Initialize the tablespace cache.
+ * Initialize the attribute options cache.
*/
static void
InitializeAttoptCache(void)
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index 54ddc55f76..6faf4ae354 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -68,7 +68,7 @@ EventCacheLookup(EventTriggerEvent event)
if (EventTriggerCacheState != ETCS_VALID)
BuildEventTriggerCache();
entry = hash_search(EventTriggerCache, &event, HASH_FIND, NULL);
- return entry != NULL ? entry->triggerlist : NULL;
+ return entry != NULL ? entry->triggerlist : NIL;
}
/*
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index f18dbb31b0..e244faac0e 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -680,30 +680,30 @@ static const struct cachedesc cacheinfo[] = {
},
128
},
- {RangeRelationId, /* RANGETYPE */
- RangeTypidIndexId,
+ {PublicationRelationId, /* PUBLICATIONNAME */
+ PublicationNameIndexId,
1,
{
- Anum_pg_range_rngtypid,
+ Anum_pg_publication_pubname,
0,
0,
0
},
- 4
+ 8
},
- {RelationRelationId, /* RELNAMENSP */
- ClassNameNspIndexId,
- 2,
+ {PublicationRelationId, /* PUBLICATIONOID */
+ PublicationObjectIndexId,
+ 1,
{
- Anum_pg_class_relname,
- Anum_pg_class_relnamespace,
+ ObjectIdAttributeNumber,
+ 0,
0,
0
},
- 128
+ 8
},
- {RelationRelationId, /* RELOID */
- ClassOidIndexId,
+ {PublicationRelRelationId, /* PUBLICATIONREL */
+ PublicationRelObjectIndexId,
1,
{
ObjectIdAttributeNumber,
@@ -711,73 +711,73 @@ static const struct cachedesc cacheinfo[] = {
0,
0
},
- 128
+ 64
},
- {ReplicationOriginRelationId, /* REPLORIGIDENT */
- ReplicationOriginIdentIndex,
- 1,
+ {PublicationRelRelationId, /* PUBLICATIONRELMAP */
+ PublicationRelPrrelidPrpubidIndexId,
+ 2,
{
- Anum_pg_replication_origin_roident,
- 0,
+ Anum_pg_publication_rel_prrelid,
+ Anum_pg_publication_rel_prpubid,
0,
0
},
- 16
+ 64
},
- {ReplicationOriginRelationId, /* REPLORIGNAME */
- ReplicationOriginNameIndex,
+ {RangeRelationId, /* RANGETYPE */
+ RangeTypidIndexId,
1,
{
- Anum_pg_replication_origin_roname,
+ Anum_pg_range_rngtypid,
0,
0,
0
},
- 16
+ 4
},
- {PublicationRelationId, /* PUBLICATIONOID */
- PublicationObjectIndexId,
- 1,
+ {RelationRelationId, /* RELNAMENSP */
+ ClassNameNspIndexId,
+ 2,
{
- ObjectIdAttributeNumber,
- 0,
+ Anum_pg_class_relname,
+ Anum_pg_class_relnamespace,
0,
0
},
- 8
+ 128
},
- {PublicationRelationId, /* PUBLICATIONNAME */
- PublicationNameIndexId,
+ {RelationRelationId, /* RELOID */
+ ClassOidIndexId,
1,
{
- Anum_pg_publication_pubname,
+ ObjectIdAttributeNumber,
0,
0,
0
},
- 8
+ 128
},
- {PublicationRelRelationId, /* PUBLICATIONREL */
- PublicationRelObjectIndexId,
+ {ReplicationOriginRelationId, /* REPLORIGIDENT */
+ ReplicationOriginIdentIndex,
1,
{
- ObjectIdAttributeNumber,
+ Anum_pg_replication_origin_roident,
0,
0,
0
},
- 64
+ 16
},
- {PublicationRelRelationId, /* PUBLICATIONRELMAP */
- PublicationRelPrrelidPrpubidIndexId,
- 2,
+ {ReplicationOriginRelationId, /* REPLORIGNAME */
+ ReplicationOriginNameIndex,
+ 1,
{
- Anum_pg_publication_rel_prrelid,
- Anum_pg_publication_rel_prpubid,
+ Anum_pg_replication_origin_roname,
+ 0,
0,
0
},
- 64
+ 16
},
{RewriteRelationId, /* RULERELNAME */
RewriteRelRulenameIndexId,
@@ -834,23 +834,23 @@ static const struct cachedesc cacheinfo[] = {
},
128
},
- {SubscriptionRelationId, /* SUBSCRIPTIONOID */
- SubscriptionObjectIndexId,
- 1,
+ {SubscriptionRelationId, /* SUBSCRIPTIONNAME */
+ SubscriptionNameIndexId,
+ 2,
{
- ObjectIdAttributeNumber,
- 0,
+ Anum_pg_subscription_subdbid,
+ Anum_pg_subscription_subname,
0,
0
},
4
},
- {SubscriptionRelationId, /* SUBSCRIPTIONNAME */
- SubscriptionNameIndexId,
- 2,
+ {SubscriptionRelationId, /* SUBSCRIPTIONOID */
+ SubscriptionObjectIndexId,
+ 1,
{
- Anum_pg_subscription_subdbid,
- Anum_pg_subscription_subname,
+ ObjectIdAttributeNumber,
+ 0,
0,
0
},
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index b0ec4a2d27..a91faf4b71 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -32,6 +32,7 @@ volatile bool QueryCancelPending = false;
volatile bool ProcDiePending = false;
volatile bool ClientConnectionLost = false;
volatile bool IdleInTransactionSessionTimeoutPending = false;
+volatile sig_atomic_t ConfigReloadPending = false;
volatile uint32 InterruptHoldoffCount = 0;
volatile uint32 QueryCancelHoldoffCount = 0;
volatile uint32 CritSectionCount = 0;
diff --git a/src/backend/utils/misc/pg_rusage.c b/src/backend/utils/misc/pg_rusage.c
index e4dccc383a..98fa7ea9a8 100644
--- a/src/backend/utils/misc/pg_rusage.c
+++ b/src/backend/utils/misc/pg_rusage.c
@@ -61,7 +61,7 @@ pg_rusage_show(const PGRUsage *ru0)
}
snprintf(result, sizeof(result),
- "CPU: user: %d.%02d s, system: %d.%02d s, elapsed: %d.%02d s",
+ _("CPU: user: %d.%02d s, system: %d.%02d s, elapsed: %d.%02d s"),
(int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec),
(int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
(int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec),
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index cd07343fd4..ee88e94a93 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -84,7 +84,7 @@
#ssl_key_file = 'server.key'
#ssl_ca_file = ''
#ssl_crl_file = ''
-#password_encryption = md5 # md5, scram-sha-256 or plain
+#password_encryption = md5 # md5, scram-sha-256, or plain
#db_user_namespace = off
#row_security = on
@@ -162,7 +162,7 @@
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
-#max_parallel_workers = 8 # maximum number of max_worker_processes that
+#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel queries
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
@@ -249,7 +249,7 @@
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
- # method to choose sync standbys, number of sync standbys
+ # method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index f89d635162..687222fc54 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -59,6 +59,7 @@
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/sinval.h"
+#include "storage/sinvaladt.h"
#include "storage/spin.h"
#include "utils/builtins.h"
#include "utils/memutils.h"
@@ -214,11 +215,15 @@ static Snapshot FirstXactSnapshot = NULL;
/* Define pathname of exported-snapshot files */
#define SNAPSHOT_EXPORT_DIR "pg_snapshots"
-#define XactExportFilePath(path, xid, num, suffix) \
- snprintf(path, sizeof(path), SNAPSHOT_EXPORT_DIR "/%08X-%d%s", \
- xid, num, suffix)
-/* Current xact's exported snapshots (a list of Snapshot structs) */
+/* Structure holding info about exported snapshot. */
+typedef struct ExportedSnapshot
+{
+ char *snapfile;
+ Snapshot snapshot;
+} ExportedSnapshot;
+
+/* Current xact's exported snapshots (a list of ExportedSnapshot structs) */
static List *exportedSnapshots = NIL;
/* Prototypes for local functions */
@@ -587,8 +592,8 @@ SnapshotSetCommandId(CommandId curcid)
* in GetTransactionSnapshot.
*/
static void
-SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid,
- PGPROC *sourceproc)
+SetTransactionSnapshot(Snapshot sourcesnap, VirtualTransactionId *sourcevxid,
+ int sourcepid, PGPROC *sourceproc)
{
/* Caller should have checked this already */
Assert(!FirstSnapshotSet);
@@ -646,12 +651,12 @@ SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid,
errmsg("could not import the requested snapshot"),
errdetail("The source transaction is not running anymore.")));
}
- else if (!ProcArrayInstallImportedXmin(CurrentSnapshot->xmin, sourcexid))
+ else if (!ProcArrayInstallImportedXmin(CurrentSnapshot->xmin, sourcevxid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not import the requested snapshot"),
- errdetail("The source transaction %u is not running anymore.",
- sourcexid)));
+ errdetail("The source process with pid %d is not running anymore.",
+ sourcepid)));
/*
* In transaction-snapshot mode, the first snapshot must live until end of
@@ -661,7 +666,8 @@ SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid,
if (IsolationUsesXactSnapshot())
{
if (IsolationIsSerializable())
- SetSerializableTransactionSnapshot(CurrentSnapshot, sourcexid);
+ SetSerializableTransactionSnapshot(CurrentSnapshot, sourcevxid,
+ sourcepid);
/* Make a saved copy */
CurrentSnapshot = CopySnapshot(CurrentSnapshot);
FirstXactSnapshot = CurrentSnapshot;
@@ -1121,33 +1127,29 @@ AtEOXact_Snapshot(bool isCommit, bool resetXmin)
*/
if (exportedSnapshots != NIL)
{
- TransactionId myxid = GetTopTransactionId();
- int i;
- char buf[MAXPGPATH];
ListCell *lc;
/*
* Get rid of the files. Unlink failure is only a WARNING because (1)
* it's too late to abort the transaction, and (2) leaving a leaked
* file around has little real consequence anyway.
- */
- for (i = 1; i <= list_length(exportedSnapshots); i++)
- {
- XactExportFilePath(buf, myxid, i, "");
- if (unlink(buf))
- elog(WARNING, "could not unlink file \"%s\": %m", buf);
- }
-
- /*
- * As with the FirstXactSnapshot, we needn't spend any effort on
- * cleaning up the per-snapshot data structures, but we do need to
- * remove them from RegisteredSnapshots to prevent a warning below.
+ *
+ * We also also need to remove the snapshots from RegisteredSnapshots
+ * to prevent a warning below.
+ *
+ * As with the FirstXactSnapshot, we don't need to free resources of
+ * the snapshot iself as it will go away with the memory context.
*/
foreach(lc, exportedSnapshots)
{
- Snapshot snap = (Snapshot) lfirst(lc);
+ ExportedSnapshot *esnap = (ExportedSnapshot *) lfirst(lc);
- pairingheap_remove(&RegisteredSnapshots, &snap->ph_node);
+ if (unlink(esnap->snapfile))
+ elog(WARNING, "could not unlink file \"%s\": %m",
+ esnap->snapfile);
+
+ pairingheap_remove(&RegisteredSnapshots,
+ &esnap->snapshot->ph_node);
}
exportedSnapshots = NIL;
@@ -1205,6 +1207,7 @@ ExportSnapshot(Snapshot snapshot)
{
TransactionId topXid;
TransactionId *children;
+ ExportedSnapshot *esnap;
int nchildren;
int addTopXid;
StringInfoData buf;
@@ -1229,9 +1232,9 @@ ExportSnapshot(Snapshot snapshot)
*/
/*
- * This will assign a transaction ID if we do not yet have one.
+ * Get our transaction ID if there is one, to include in the snapshot.
*/
- topXid = GetTopTransactionId();
+ topXid = GetTopTransactionIdIfAny();
/*
* We cannot export a snapshot from a subtransaction because there's no
@@ -1251,6 +1254,13 @@ ExportSnapshot(Snapshot snapshot)
nchildren = xactGetCommittedChildren(&children);
/*
+ * Generate file path for the snapshot. We start numbering of snapshots
+ * inside the transaction from 1.
+ */
+ snprintf(path, sizeof(path), SNAPSHOT_EXPORT_DIR "/%08X-%08X-%d",
+ MyProc->backendId, MyProc->lxid, list_length(exportedSnapshots) + 1);
+
+ /*
* Copy the snapshot into TopTransactionContext, add it to the
* exportedSnapshots list, and mark it pseudo-registered. We do this to
* ensure that the snapshot's xmin is honored for the rest of the
@@ -1259,7 +1269,10 @@ ExportSnapshot(Snapshot snapshot)
snapshot = CopySnapshot(snapshot);
oldcxt = MemoryContextSwitchTo(TopTransactionContext);
- exportedSnapshots = lappend(exportedSnapshots, snapshot);
+ esnap = (ExportedSnapshot *) palloc(sizeof(ExportedSnapshot));
+ esnap->snapfile = pstrdup(path);
+ esnap->snapshot = snapshot;
+ exportedSnapshots = lappend(exportedSnapshots, esnap);
MemoryContextSwitchTo(oldcxt);
snapshot->regd_count++;
@@ -1272,7 +1285,8 @@ ExportSnapshot(Snapshot snapshot)
*/
initStringInfo(&buf);
- appendStringInfo(&buf, "xid:%u\n", topXid);
+ appendStringInfo(&buf, "vxid:%d/%u\n", MyProc->backendId, MyProc->lxid);
+ appendStringInfo(&buf, "pid:%d\n", MyProcPid);
appendStringInfo(&buf, "dbid:%u\n", MyDatabaseId);
appendStringInfo(&buf, "iso:%d\n", XactIsoLevel);
appendStringInfo(&buf, "ro:%d\n", XactReadOnly);
@@ -1291,7 +1305,8 @@ ExportSnapshot(Snapshot snapshot)
* xmax. (We need not make the same check for subxip[] members, see
* snapshot.h.)
*/
- addTopXid = TransactionIdPrecedes(topXid, snapshot->xmax) ? 1 : 0;
+ addTopXid = (TransactionIdIsValid(topXid) &&
+ TransactionIdPrecedes(topXid, snapshot->xmax)) ? 1 : 0;
appendStringInfo(&buf, "xcnt:%d\n", snapshot->xcnt + addTopXid);
for (i = 0; i < snapshot->xcnt; i++)
appendStringInfo(&buf, "xip:%u\n", snapshot->xip[i]);
@@ -1322,7 +1337,7 @@ ExportSnapshot(Snapshot snapshot)
* ensures that no other backend can read an incomplete file
* (ImportSnapshot won't allow it because of its valid-characters check).
*/
- XactExportFilePath(pathtmp, topXid, list_length(exportedSnapshots), ".tmp");
+ snprintf(pathtmp, sizeof(pathtmp), "%s.tmp", path);
if (!(f = AllocateFile(pathtmp, PG_BINARY_W)))
ereport(ERROR,
(errcode_for_file_access(),
@@ -1344,8 +1359,6 @@ ExportSnapshot(Snapshot snapshot)
* Now that we have written everything into a .tmp file, rename the file
* to remove the .tmp suffix.
*/
- XactExportFilePath(path, topXid, list_length(exportedSnapshots), "");
-
if (rename(pathtmp, path) < 0)
ereport(ERROR,
(errcode_for_file_access(),
@@ -1430,6 +1443,30 @@ parseXidFromText(const char *prefix, char **s, const char *filename)
return val;
}
+static void
+parseVxidFromText(const char *prefix, char **s, const char *filename,
+ VirtualTransactionId *vxid)
+{
+ char *ptr = *s;
+ int prefixlen = strlen(prefix);
+
+ if (strncmp(ptr, prefix, prefixlen) != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("invalid snapshot data in file \"%s\"", filename)));
+ ptr += prefixlen;
+ if (sscanf(ptr, "%d/%u", &vxid->backendId, &vxid->localTransactionId) != 2)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("invalid snapshot data in file \"%s\"", filename)));
+ ptr = strchr(ptr, '\n');
+ if (!ptr)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("invalid snapshot data in file \"%s\"", filename)));
+ *s = ptr + 1;
+}
+
/*
* ImportSnapshot
* Import a previously exported snapshot. The argument should be a
@@ -1445,7 +1482,8 @@ ImportSnapshot(const char *idstr)
char *filebuf;
int xcnt;
int i;
- TransactionId src_xid;
+ VirtualTransactionId src_vxid;
+ int src_pid;
Oid src_dbid;
int src_isolevel;
bool src_readonly;
@@ -1509,7 +1547,8 @@ ImportSnapshot(const char *idstr)
*/
memset(&snapshot, 0, sizeof(snapshot));
- src_xid = parseXidFromText("xid:", &filebuf, path);
+ parseVxidFromText("vxid:", &filebuf, path, &src_vxid);
+ src_pid = parseIntFromText("pid:", &filebuf, path);
/* we abuse parseXidFromText a bit here ... */
src_dbid = parseXidFromText("dbid:", &filebuf, path);
src_isolevel = parseIntFromText("iso:", &filebuf, path);
@@ -1559,7 +1598,7 @@ ImportSnapshot(const char *idstr)
* don't trouble to check the array elements, just the most critical
* fields.
*/
- if (!TransactionIdIsNormal(src_xid) ||
+ if (!VirtualTransactionIdIsValid(src_vxid) ||
!OidIsValid(src_dbid) ||
!TransactionIdIsNormal(snapshot.xmin) ||
!TransactionIdIsNormal(snapshot.xmax))
@@ -1600,7 +1639,7 @@ ImportSnapshot(const char *idstr)
errmsg("cannot import a snapshot from a different database")));
/* OK, install the snapshot */
- SetTransactionSnapshot(&snapshot, src_xid, NULL);
+ SetTransactionSnapshot(&snapshot, &src_vxid, src_pid, NULL);
}
/*
@@ -2187,5 +2226,5 @@ RestoreSnapshot(char *start_address)
void
RestoreTransactionSnapshot(Snapshot snapshot, void *master_pgproc)
{
- SetTransactionSnapshot(snapshot, InvalidTransactionId, master_pgproc);
+ SetTransactionSnapshot(snapshot, NULL, InvalidPid, master_pgproc);
}
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 432c282b52..54d27dc658 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -493,7 +493,7 @@ LogStreamerMain(logstreamer_param *param)
stream.replication_slot = replication_slot;
stream.temp_slot = param->temp_slot;
if (stream.temp_slot && !stream.replication_slot)
- stream.replication_slot = psprintf("pg_basebackup_%d", (int) getpid());
+ stream.replication_slot = psprintf("pg_basebackup_%d", (int) PQbackendPID(param->bgconn));
if (format == 'p')
stream.walmethod = CreateWalDirectoryMethod(param->xlog, 0, do_sync);
@@ -2183,7 +2183,7 @@ main(int argc, char **argv)
else
{
fprintf(stderr,
- _("%s: invalid wal-method option \"%s\", must be \"fetch\", \"stream\" or \"none\"\n"),
+ _("%s: invalid wal-method option \"%s\", must be \"fetch\", \"stream\", or \"none\"\n"),
progname, optarg);
exit(1);
}
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index 6b081bd737..5f7412e9d5 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -81,12 +81,12 @@ usage(void)
printf(_(" --drop-slot drop the replication slot (for the slot's name see --slot)\n"));
printf(_(" --start start streaming in a replication slot (for the slot's name see --slot)\n"));
printf(_("\nOptions:\n"));
+ printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n"));
printf(_(" -f, --file=FILE receive log into this file, - for stdout\n"));
printf(_(" -F --fsync-interval=SECS\n"
" time between fsyncs to the output file (default: %d)\n"), (fsync_interval / 1000));
printf(_(" --if-not-exists do not error if slot already exists when creating a slot\n"));
printf(_(" -I, --startpos=LSN where in an existing slot should the streaming start\n"));
- printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n"));
printf(_(" -n, --no-loop do not loop on connection lost\n"));
printf(_(" -o, --option=NAME[=VALUE]\n"
" pass option NAME with optional value VALUE to the\n"
@@ -725,7 +725,7 @@ main(int argc, char **argv)
}
}
- while ((c = getopt_long(argc, argv, "f:F:nvd:h:p:U:wWI:E:o:P:s:S:",
+ while ((c = getopt_long(argc, argv, "E:f:F:nvd:h:p:U:wWI:o:P:s:S:",
long_options, &option_index)) != -1)
{
switch (c)
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index cf730da283..52aa274bb8 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -629,7 +629,7 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream)
* server had sent us half of a WAL record, when it was promoted.
* The new timeline will begin at the end of the last complete
* record in that case, overlapping the partial WAL record on the
- * the old timeline.
+ * old timeline.
*/
uint32 newtimeline;
bool parsed;
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index a95a2f5fb3..8829ceacc8 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -8042,7 +8042,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
tbinfo->attstattarget = (int *) pg_malloc(ntups * sizeof(int));
tbinfo->attstorage = (char *) pg_malloc(ntups * sizeof(char));
tbinfo->typstorage = (char *) pg_malloc(ntups * sizeof(char));
- tbinfo->attidentity = (char *) pg_malloc(ntups * sizeof(bool));
+ tbinfo->attidentity = (char *) pg_malloc(ntups * sizeof(char));
tbinfo->attisdropped = (bool *) pg_malloc(ntups * sizeof(bool));
tbinfo->attlen = (int *) pg_malloc(ntups * sizeof(int));
tbinfo->attalign = (char *) pg_malloc(ntups * sizeof(char));
@@ -13162,6 +13162,9 @@ dumpCollation(Archive *fout, CollInfo *collinfo)
appendPQExpBufferStr(q, "libc");
else if (collprovider[0] == 'i')
appendPQExpBufferStr(q, "icu");
+ else if (collprovider[0] == 'd')
+ /* to allow dumping pg_catalog; not accepted on input */
+ appendPQExpBufferStr(q, "default");
else
exit_horribly(NULL,
"unrecognized collation provider: %s\n",
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 9534134e61..fbd6342de5 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -140,11 +140,11 @@ main(int argc, char *argv[])
{"role", required_argument, NULL, 3},
{"use-set-session-authorization", no_argument, &use_setsessauth, 1},
{"no-publications", no_argument, &no_publications, 1},
+ {"no-role-passwords", no_argument, &no_role_passwords, 1},
{"no-security-labels", no_argument, &no_security_labels, 1},
{"no-subscriptions", no_argument, &no_subscriptions, 1},
{"no-sync", no_argument, NULL, 4},
{"no-unlogged-table-data", no_argument, &no_unlogged_table_data, 1},
- {"no-role-passwords", no_argument, &no_role_passwords, 1},
#ifdef PGXC
{"dump-nodes", no_argument, &dump_nodes, 1},
{"include-nodes", no_argument, &include_nodes, 1},
@@ -625,12 +625,12 @@ help(void)
printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
printf(_(" --no-publications do not dump publications\n"));
+ printf(_(" --no-role-passwords do not dump passwords for roles\n"));
printf(_(" --no-security-labels do not dump security label assignments\n"));
printf(_(" --no-subscriptions do not dump subscriptions\n"));
printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
- printf(_(" --no-role-passwords do not dump passwords for roles\n"));
printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
diff --git a/src/bin/pg_upgrade/.gitignore b/src/bin/pg_upgrade/.gitignore
index d24ec60184..6fb644de7a 100644
--- a/src/bin/pg_upgrade/.gitignore
+++ b/src/bin/pg_upgrade/.gitignore
@@ -4,5 +4,7 @@
/delete_old_cluster.sh
/analyze_new_cluster.bat
/delete_old_cluster.bat
+/reindex_hash.sql
+/loadable_libraries.txt
/log/
/tmp_check/
diff --git a/src/bin/pg_upgrade/Makefile b/src/bin/pg_upgrade/Makefile
index 8823288708..d252e08d37 100644
--- a/src/bin/pg_upgrade/Makefile
+++ b/src/bin/pg_upgrade/Makefile
@@ -32,12 +32,12 @@ uninstall:
clean distclean maintainer-clean:
rm -f pg_upgrade$(X) $(OBJS)
rm -rf analyze_new_cluster.sh delete_old_cluster.sh log/ tmp_check/ \
+ loadable_libraries.txt reindex_hash.sql \
pg_upgrade_dump_globals.sql \
pg_upgrade_dump_*.custom pg_upgrade_*.log
check: test.sh all
MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) EXTRA_REGRESS_OPTS="$(EXTRA_REGRESS_OPTS)" $(SHELL) $< --install
-# disabled because it upsets the build farm
-#installcheck: test.sh
-# MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) $(SHELL) $<
+# installcheck is not supported because there's no meaningful way to test
+# pg_upgrade against a single already-running server
diff --git a/src/bin/pg_upgrade/TESTING b/src/bin/pg_upgrade/TESTING
index 4ecfc5798e..6831f679f6 100644
--- a/src/bin/pg_upgrade/TESTING
+++ b/src/bin/pg_upgrade/TESTING
@@ -1,3 +1,30 @@
+THE SHORT VERSION
+-----------------
+
+On non-Windows machines, you can execute the testing process
+described below by running
+ make check
+in this directory. This will run the shell script test.sh, performing
+an upgrade from the version in this source tree to a new instance of
+the same version.
+
+To test an upgrade from a different version, you must have a built
+source tree for the old version as well as this version, and you
+must have done "make install" for both versions. Then do:
+
+export oldsrc=...somewhere/postgresql (old version's source tree)
+export oldbindir=...otherversion/bin (old version's installed bin dir)
+export bindir=...thisversion/bin (this version's installed bin dir)
+export libdir=...thisversion/lib (this version's installed lib dir)
+sh test.sh
+
+In this case, you will have to manually eyeball the resulting dump
+diff for version-specific differences, as explained below.
+
+
+DETAILS
+-------
+
The most effective way to test pg_upgrade, aside from testing on user
data, is by upgrading the PostgreSQL regression database.
@@ -7,7 +34,7 @@ specific to each major version of Postgres.
Here are the steps needed to create a regression database dump file:
-1) Create and populate the regression database in the old cluster
+1) Create and populate the regression database in the old cluster.
This database can be created by running 'make installcheck' from
src/test/regression.
@@ -60,22 +87,3 @@ steps:
7) Diff the regression database dump file with the regression dump
file loaded into the old server.
-
-The shell script test.sh in this directory performs more or less this
-procedure. You can invoke it by running
-
- make check
-
-or by running
-
- make installcheck
-
-if "make install" (or "make install-world") were done beforehand.
-When invoked without arguments, it will run an upgrade from the
-version in this source tree to a new instance of the same version. To
-test an upgrade from a different version, invoke it like this:
-
- make installcheck oldbindir=...otherversion/bin oldsrc=...somewhere/postgresql
-
-In this case, you will have to manually eyeball the resulting dump
-diff for version-specific differences, as explained above.
diff --git a/src/bin/pg_upgrade/test.sh b/src/bin/pg_upgrade/test.sh
index 841da034b0..f4556341f3 100644
--- a/src/bin/pg_upgrade/test.sh
+++ b/src/bin/pg_upgrade/test.sh
@@ -170,18 +170,32 @@ createdb "$dbname2" || createdb_status=$?
createdb "$dbname3" || createdb_status=$?
if "$MAKE" -C "$oldsrc" installcheck; then
- pg_dumpall --no-sync -f "$temp_root"/dump1.sql || pg_dumpall1_status=$?
+ oldpgversion=`psql -X -A -t -d regression -c "SHOW server_version_num"`
+
+ # before dumping, get rid of objects not existing in later versions
if [ "$newsrc" != "$oldsrc" ]; then
- oldpgversion=`psql -X -A -t -d regression -c "SHOW server_version_num"`
fix_sql=""
case $oldpgversion in
804??)
- fix_sql="UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%'; DROP FUNCTION public.myfunc(integer);"
+ fix_sql="DROP FUNCTION public.myfunc(integer); DROP FUNCTION public.oldstyle_length(integer, text);"
;;
- 900??)
- fix_sql="SET bytea_output TO escape; UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%';"
+ *)
+ fix_sql="DROP FUNCTION public.oldstyle_length(integer, text);"
+ ;;
+ esac
+ psql -X -d regression -c "$fix_sql;" || psql_fix_sql_status=$?
+ fi
+
+ pg_dumpall --no-sync -f "$temp_root"/dump1.sql || pg_dumpall1_status=$?
+
+ if [ "$newsrc" != "$oldsrc" ]; then
+ # update references to old source tree's regress.so etc
+ fix_sql=""
+ case $oldpgversion in
+ 804??)
+ fix_sql="UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%';"
;;
- 901??)
+ *)
fix_sql="UPDATE pg_proc SET probin = replace(probin, '$oldsrc', '$newsrc') WHERE probin LIKE '$oldsrc%';"
;;
esac
diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c
index 6bc27f4be3..2578d4b692 100644
--- a/src/bin/pg_waldump/pg_waldump.c
+++ b/src/bin/pg_waldump/pg_waldump.c
@@ -363,23 +363,13 @@ XLogDumpReadPage(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen,
}
/*
- * Store per-rmgr and per-record statistics for a given record.
+ * Calculate the size of a record, split into !FPI and FPI parts.
*/
static void
-XLogDumpCountRecord(XLogDumpConfig *config, XLogDumpStats *stats,
- XLogReaderState *record)
+XLogDumpRecordLen(XLogReaderState *record, uint32 *rec_len, uint32 *fpi_len)
{
- RmgrId rmid;
- uint8 recid;
- uint32 rec_len;
- uint32 fpi_len;
int block_id;
- stats->count++;
-
- rmid = XLogRecGetRmid(record);
- rec_len = XLogRecGetDataLen(record) + SizeOfXLogRecord;
-
/*
* Calculate the amount of FPI data in the record.
*
@@ -387,13 +377,38 @@ XLogDumpCountRecord(XLogDumpConfig *config, XLogDumpStats *stats,
* bimg_len indicating the length of FPI data. It doesn't seem worth it to
* add an accessor macro for this.
*/
- fpi_len = 0;
+ *fpi_len = 0;
for (block_id = 0; block_id <= record->max_block_id; block_id++)
{
if (XLogRecHasBlockImage(record, block_id))
- fpi_len += record->blocks[block_id].bimg_len;
+ *fpi_len += record->blocks[block_id].bimg_len;
}
+ /*
+ * Calculate the length of the record as the total length - the length of
+ * all the block images.
+ */
+ *rec_len = XLogRecGetTotalLen(record) - *fpi_len;
+}
+
+/*
+ * Store per-rmgr and per-record statistics for a given record.
+ */
+static void
+XLogDumpCountRecord(XLogDumpConfig *config, XLogDumpStats *stats,
+ XLogReaderState *record)
+{
+ RmgrId rmid;
+ uint8 recid;
+ uint32 rec_len;
+ uint32 fpi_len;
+
+ stats->count++;
+
+ rmid = XLogRecGetRmid(record);
+
+ XLogDumpRecordLen(record, &rec_len, &fpi_len);
+
/* Update per-rmgr statistics */
stats->rmgr_stats[rmid].count++;
@@ -422,6 +437,8 @@ XLogDumpDisplayRecord(XLogDumpConfig *config, XLogReaderState *record)
{
const char *id;
const RmgrDescData *desc = &RmgrDescTable[XLogRecGetRmid(record)];
+ uint32 rec_len;
+ uint32 fpi_len;
RelFileNode rnode;
ForkNumber forknum;
BlockNumber blk;
@@ -429,13 +446,15 @@ XLogDumpDisplayRecord(XLogDumpConfig *config, XLogReaderState *record)
uint8 info = XLogRecGetInfo(record);
XLogRecPtr xl_prev = XLogRecGetPrev(record);
+ XLogDumpRecordLen(record, &rec_len, &fpi_len);
+
id = desc->rm_identify(info);
if (id == NULL)
id = psprintf("UNKNOWN (%x)", info & ~XLR_INFO_MASK);
printf("rmgr: %-11s len (rec/tot): %6u/%6u, tx: %10u, lsn: %X/%08X, prev %X/%08X, ",
desc->rm_name,
- XLogRecGetDataLen(record), XLogRecGetTotalLen(record),
+ rec_len, XLogRecGetTotalLen(record),
XLogRecGetXid(record),
(uint32) (record->ReadRecPtr >> 32), (uint32) record->ReadRecPtr,
(uint32) (xl_prev >> 32), (uint32) xl_prev);
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index b3b89b43d2..fe5df0fa22 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -522,10 +522,10 @@ usage(void)
" -T, --time=NUM duration of benchmark test in seconds\n"
" -v, --vacuum-all vacuum all four standard tables before tests\n"
" --aggregate-interval=NUM aggregate data over NUM seconds\n"
- " --progress-timestamp use Unix epoch timestamps for progress\n"
- " --sampling-rate=NUM fraction of transactions to log (e.g., 0.01 for 1%%)\n"
" --log-prefix=PREFIX prefix for transaction time log file\n"
" (default: \"pgbench_log\")\n"
+ " --progress-timestamp use Unix epoch timestamps for progress\n"
+ " --sampling-rate=NUM fraction of transactions to log (e.g., 0.01 for 1%%)\n"
"\nCommon options:\n"
" -d, --debug print debugging output\n"
" -h, --host=HOSTNAME database server host or socket directory\n"
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 61c3c63c31..69fc7e6ba0 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -1732,7 +1732,7 @@ describeOneTableDetails(const char *schemaname,
headers[cols++] = gettext_noop("Definition");
if (tableinfo.relkind == RELKIND_FOREIGN_TABLE && pset.sversion >= 90200)
- headers[cols++] = gettext_noop("FDW Options");
+ headers[cols++] = gettext_noop("FDW options");
if (verbose)
{
@@ -2543,10 +2543,9 @@ describeOneTableDetails(const char *schemaname,
{
printfPQExpBuffer(&buf,
"SELECT pub.pubname\n"
- " FROM pg_catalog.pg_publication pub\n"
- " LEFT JOIN pg_catalog.pg_publication_rel pr\n"
- " ON (pr.prpubid = pub.oid)\n"
- "WHERE pr.prrelid = '%s' OR pub.puballtables\n"
+ " FROM pg_catalog.pg_publication pub,\n"
+ " pg_catalog.pg_get_publication_tables(pub.pubname)\n"
+ "WHERE relid = '%s'\n"
"ORDER BY 1;",
oid);
@@ -2793,7 +2792,7 @@ describeOneTableDetails(const char *schemaname,
ftoptions = PQgetvalue(result, 0, 1);
if (ftoptions && ftoptions[0] != '\0')
{
- printfPQExpBuffer(&buf, _("FDW Options: (%s)"), ftoptions);
+ printfPQExpBuffer(&buf, _("FDW options: (%s)"), ftoptions);
printTableAddFooter(&cont, buf.data);
}
PQclear(result);
@@ -3531,12 +3530,12 @@ listLanguages(const char *pattern, bool verbose, bool showSystem)
",\n NOT l.lanispl AS \"%s\",\n"
" l.lanplcallfoid::regprocedure AS \"%s\",\n"
" l.lanvalidator::regprocedure AS \"%s\",\n ",
- gettext_noop("Internal Language"),
- gettext_noop("Call Handler"),
+ gettext_noop("Internal language"),
+ gettext_noop("Call handler"),
gettext_noop("Validator"));
if (pset.sversion >= 90000)
appendPQExpBuffer(&buf, "l.laninline::regprocedure AS \"%s\",\n ",
- gettext_noop("Inline Handler"));
+ gettext_noop("Inline handler"));
printACLColumn(&buf, "l.lanacl");
}
@@ -4670,7 +4669,7 @@ listForeignDataWrappers(const char *pattern, bool verbose)
" quote_literal(option_value) FROM "
" pg_options_to_table(fdwoptions)), ', ') || ')' "
" END AS \"%s\"",
- gettext_noop("FDW Options"));
+ gettext_noop("FDW options"));
if (pset.sversion >= 90100)
appendPQExpBuffer(&buf,
@@ -4754,7 +4753,7 @@ listForeignServers(const char *pattern, bool verbose)
" d.description AS \"%s\"",
gettext_noop("Type"),
gettext_noop("Version"),
- gettext_noop("FDW Options"),
+ gettext_noop("FDW options"),
gettext_noop("Description"));
}
@@ -4825,7 +4824,7 @@ listUserMappings(const char *pattern, bool verbose)
" quote_literal(option_value) FROM "
" pg_options_to_table(umoptions)), ', ') || ')' "
" END AS \"%s\"",
- gettext_noop("FDW Options"));
+ gettext_noop("FDW options"));
appendPQExpBufferStr(&buf, "\nFROM pg_catalog.pg_user_mappings um\n");
@@ -4889,7 +4888,7 @@ listForeignTables(const char *pattern, bool verbose)
" pg_options_to_table(ftoptions)), ', ') || ')' "
" END AS \"%s\",\n"
" d.description AS \"%s\"",
- gettext_noop("FDW Options"),
+ gettext_noop("FDW options"),
gettext_noop("Description"));
appendPQExpBufferStr(&buf,
@@ -5075,7 +5074,7 @@ listOneExtensionContents(const char *extname, const char *oid)
"FROM pg_catalog.pg_depend\n"
"WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND refobjid = '%s' AND deptype = 'e'\n"
"ORDER BY 1;",
- gettext_noop("Object Description"),
+ gettext_noop("Object description"),
oid);
res = PSQLexec(buf.data);
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index ac435220e6..f097b06594 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -171,13 +171,13 @@ slashUsage(unsigned short int pager)
fprintf(output, _("General\n"));
fprintf(output, _(" \\copyright show PostgreSQL usage and distribution terms\n"));
+ fprintf(output, _(" \\crosstabview [COLUMNS] execute query and display results in crosstab\n"));
fprintf(output, _(" \\errverbose show most recent error message at maximum verbosity\n"));
fprintf(output, _(" \\g [FILE] or ; execute query (and send results to file or |pipe)\n"));
- fprintf(output, _(" \\gx [FILE] as \\g, but forces expanded output mode\n"));
fprintf(output, _(" \\gexec execute query, then execute each value in its result\n"));
fprintf(output, _(" \\gset [PREFIX] execute query and store results in psql variables\n"));
+ fprintf(output, _(" \\gx [FILE] as \\g, but forces expanded output mode\n"));
fprintf(output, _(" \\q quit psql\n"));
- fprintf(output, _(" \\crosstabview [COLUMNS] execute query and display results in crosstab\n"));
fprintf(output, _(" \\watch [SEC] execute query every SEC seconds\n"));
fprintf(output, "\n");
@@ -227,8 +227,9 @@ slashUsage(unsigned short int pager)
fprintf(output, _(" \\dc[S+] [PATTERN] list conversions\n"));
fprintf(output, _(" \\dC[+] [PATTERN] list casts\n"));
fprintf(output, _(" \\dd[S] [PATTERN] show object descriptions not displayed elsewhere\n"));
- fprintf(output, _(" \\ddp [PATTERN] list default privileges\n"));
fprintf(output, _(" \\dD[S+] [PATTERN] list domains\n"));
+ fprintf(output, _(" \\ddp [PATTERN] list default privileges\n"));
+ fprintf(output, _(" \\dE[S+] [PATTERN] list foreign tables\n"));
fprintf(output, _(" \\det[+] [PATTERN] list foreign tables\n"));
fprintf(output, _(" \\des[+] [PATTERN] list foreign servers\n"));
fprintf(output, _(" \\deu[+] [PATTERN] list user mappings\n"));
@@ -255,7 +256,6 @@ slashUsage(unsigned short int pager)
fprintf(output, _(" \\dT[S+] [PATTERN] list data types\n"));
fprintf(output, _(" \\du[S+] [PATTERN] list roles\n"));
fprintf(output, _(" \\dv[S+] [PATTERN] list views\n"));
- fprintf(output, _(" \\dE[S+] [PATTERN] list foreign tables\n"));
fprintf(output, _(" \\dx[+] [PATTERN] list extensions\n"));
fprintf(output, _(" \\dy [PATTERN] list event triggers\n"));
fprintf(output, _(" \\l[+] [PATTERN] list databases\n"));
@@ -289,9 +289,9 @@ slashUsage(unsigned short int pager)
else
fprintf(output, _(" \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n"
" connect to new database (currently no connection)\n"));
+ fprintf(output, _(" \\conninfo display information about current connection\n"));
fprintf(output, _(" \\encoding [ENCODING] show or set client encoding\n"));
fprintf(output, _(" \\password [USERNAME] securely change the password for a user\n"));
- fprintf(output, _(" \\conninfo display information about current connection\n"));
fprintf(output, "\n");
fprintf(output, _("Operating System\n"));
@@ -413,10 +413,10 @@ helpVariables(unsigned short int pager)
fprintf(output, _(" PGAPPNAME same as the application_name connection parameter\n"));
fprintf(output, _(" PGDATABASE same as the dbname connection parameter\n"));
fprintf(output, _(" PGHOST same as the host connection parameter\n"));
- fprintf(output, _(" PGPORT same as the port connection parameter\n"));
- fprintf(output, _(" PGUSER same as the user connection parameter\n"));
fprintf(output, _(" PGPASSWORD connection password (not recommended)\n"));
fprintf(output, _(" PGPASSFILE password file name\n"));
+ fprintf(output, _(" PGPORT same as the port connection parameter\n"));
+ fprintf(output, _(" PGUSER same as the user connection parameter\n"));
fprintf(output, _(" PSQL_EDITOR, EDITOR, VISUAL\n"
" editor used by the \\e, \\ef, and \\ev commands\n"));
fprintf(output, _(" PSQL_EDITOR_LINENUMBER_ARG\n"
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 04e6a21bb3..e1f33175e2 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -1436,19 +1436,33 @@ psql_completion(const char *text, int start, int end)
/* psql's backslash commands. */
static const char *const backslash_commands[] = {
- "\\a", "\\connect", "\\conninfo", "\\C", "\\cd", "\\copy",
+ "\\a",
+ "\\connect", "\\conninfo", "\\C", "\\cd", "\\copy",
"\\copyright", "\\crosstabview",
"\\d", "\\da", "\\dA", "\\db", "\\dc", "\\dC", "\\dd", "\\ddp", "\\dD",
"\\des", "\\det", "\\deu", "\\dew", "\\dE", "\\df",
"\\dF", "\\dFd", "\\dFp", "\\dFt", "\\dg", "\\di", "\\dl", "\\dL",
- "\\dm", "\\dn", "\\do", "\\dO", "\\dp", "\\drds", "\\ds", "\\dS",
+ "\\dm", "\\dn", "\\do", "\\dO", "\\dp",
+ "\\drds", "\\dRs", "\\dRp", "\\ds", "\\dS",
"\\dt", "\\dT", "\\dv", "\\du", "\\dx", "\\dy",
- "\\e", "\\echo", "\\ef", "\\encoding", "\\errverbose", "\\ev",
- "\\f", "\\g", "\\gexec", "\\gset", "\\gx", "\\h", "\\help", "\\H",
- "\\i", "\\ir", "\\l", "\\lo_import", "\\lo_export", "\\lo_list",
- "\\lo_unlink", "\\o", "\\p", "\\password", "\\prompt", "\\pset", "\\q",
- "\\qecho", "\\r", "\\s", "\\set", "\\setenv", "\\sf", "\\sv", "\\t",
- "\\T", "\\timing", "\\unset", "\\x", "\\w", "\\watch", "\\z", "\\!",
+ "\\e", "\\echo", "\\ef", "\\elif", "\\else", "\\encoding",
+ "\\endif", "\\errverbose", "\\ev",
+ "\\f",
+ "\\g", "\\gexec", "\\gset", "\\gx",
+ "\\h", "\\help", "\\H",
+ "\\i", "\\if", "\\ir",
+ "\\l", "\\lo_import", "\\lo_export", "\\lo_list", "\\lo_unlink",
+ "\\o",
+ "\\p", "\\password", "\\prompt", "\\pset",
+ "\\q", "\\qecho",
+ "\\r",
+ "\\s", "\\set", "\\setenv", "\\sf", "\\sv",
+ "\\t", "\\T", "\\timing",
+ "\\unset",
+ "\\x",
+ "\\w", "\\watch",
+ "\\z",
+ "\\!", "\\?",
NULL
};
@@ -1592,6 +1606,18 @@ psql_completion(const char *text, int start, int end)
{
/* complete with nothing here as this refers to remote publications */
}
+ /* ALTER SUBSCRIPTION <name> SET PUBLICATION <name> */
+ else if (HeadMatches3("ALTER", "SUBSCRIPTION", MatchAny) &&
+ TailMatches3("SET", "PUBLICATION", MatchAny))
+ {
+ COMPLETE_WITH_CONST("WITH (");
+ }
+ /* ALTER SUBSCRIPTION <name> SET PUBLICATION <name> WITH ( */
+ else if (HeadMatches3("ALTER", "SUBSCRIPTION", MatchAny) &&
+ TailMatches5("SET", "PUBLICATION", MatchAny, "WITH", "("))
+ {
+ COMPLETE_WITH_LIST2("copy_data", "refresh");
+ }
/* ALTER SCHEMA <name> */
else if (Matches3("ALTER", "SCHEMA", MatchAny))
COMPLETE_WITH_LIST2("OWNER TO", "RENAME TO");
diff --git a/src/include/Makefile b/src/include/Makefile
index 6afa3cfe25..ea3b9245c0 100644
--- a/src/include/Makefile
+++ b/src/include/Makefile
@@ -20,7 +20,7 @@ all: pg_config.h pg_config_ext.h pg_config_os.h
SUBDIRS = access bootstrap catalog commands common datatype \
executor fe_utils foreign \
lib libpq mb nodes optimizer parser pgxc postmaster regex replication \
- rewrite storage tcop snowball snowball/libstemmer tsearch \
+ rewrite statistics storage tcop snowball snowball/libstemmer tsearch \
tsearch/dicts utils port port/atomics port/win32 port/win32_msvc \
port/win32_msvc/sys port/win32/arpa port/win32/netinet \
port/win32/sys portability gtm
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 06fff799af..8b2ec21509 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -53,6 +53,6 @@
*/
/* yyyymmddN */
-#define CATALOG_VERSION_NO 201705141
+#define CATALOG_VERSION_NO 201706141
#endif
diff --git a/src/include/catalog/pg_collation.h b/src/include/catalog/pg_collation.h
index 1e44ce0949..901c0b5115 100644
--- a/src/include/catalog/pg_collation.h
+++ b/src/include/catalog/pg_collation.h
@@ -70,13 +70,13 @@ typedef FormData_pg_collation *Form_pg_collation;
* ----------------
*/
-DATA(insert OID = 100 ( default PGNSP PGUID d -1 "" "" 0 ));
+DATA(insert OID = 100 ( default PGNSP PGUID d -1 "" "" _null_ ));
DESCR("database's default collation");
#define DEFAULT_COLLATION_OID 100
-DATA(insert OID = 950 ( C PGNSP PGUID c -1 "C" "C" 0 ));
+DATA(insert OID = 950 ( C PGNSP PGUID c -1 "C" "C" _null_ ));
DESCR("standard C collation");
#define C_COLLATION_OID 950
-DATA(insert OID = 951 ( POSIX PGNSP PGUID c -1 "POSIX" "POSIX" 0 ));
+DATA(insert OID = 951 ( POSIX PGNSP PGUID c -1 "POSIX" "POSIX" _null_ ));
DESCR("standard POSIX collation");
#define POSIX_COLLATION_OID 951
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index e06ed6cc77..cafdb8990a 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -4872,9 +4872,9 @@ DATA(insert OID = 4209 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f s s
DESCR("transform jsonb to tsvector");
DATA(insert OID = 4210 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f s s 1 0 3614 "114" _null_ _null_ _null_ _null_ _null_ json_to_tsvector _null_ _null_ _null_ ));
DESCR("transform json to tsvector");
-DATA(insert OID = 4211 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f s s 2 0 3614 "3734 3802" _null_ _null_ _null_ _null_ _null_ jsonb_to_tsvector_byid _null_ _null_ _null_ ));
+DATA(insert OID = 4211 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f i s 2 0 3614 "3734 3802" _null_ _null_ _null_ _null_ _null_ jsonb_to_tsvector_byid _null_ _null_ _null_ ));
DESCR("transform jsonb to tsvector");
-DATA(insert OID = 4212 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f s s 2 0 3614 "3734 114" _null_ _null_ _null_ _null_ _null_ json_to_tsvector_byid _null_ _null_ _null_ ));
+DATA(insert OID = 4212 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f i s 2 0 3614 "3734 114" _null_ _null_ _null_ _null_ _null_ json_to_tsvector_byid _null_ _null_ _null_ ));
DESCR("transform json to tsvector");
DATA(insert OID = 3752 ( tsvector_update_trigger PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ tsvector_update_trigger_byid _null_ _null_ _null_ ));
diff --git a/src/include/catalog/pg_subscription_rel.h b/src/include/catalog/pg_subscription_rel.h
index 391f96b76e..f5f6191676 100644
--- a/src/include/catalog/pg_subscription_rel.h
+++ b/src/include/catalog/pg_subscription_rel.h
@@ -71,7 +71,7 @@ typedef struct SubscriptionRelState
} SubscriptionRelState;
extern Oid SetSubscriptionRelState(Oid subid, Oid relid, char state,
- XLogRecPtr sublsn);
+ XLogRecPtr sublsn, bool update_only);
extern char GetSubscriptionRelState(Oid subid, Oid relid,
XLogRecPtr *sublsn, bool missing_ok);
extern void RemoveSubscriptionRel(Oid subid, Oid relid);
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index 79f3be36e4..5dd14b43d3 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -27,6 +27,7 @@ extern ObjectAddress DefineIndex(Oid relationId,
Oid indexRelationId,
bool is_alter_table,
bool check_rights,
+ bool check_not_in_use,
bool skip_build,
bool quiet);
extern Oid ReindexIndex(RangeVar *indexRelation, int options);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index a35addf636..47dd0bc4ee 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -214,12 +214,12 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
/* supporting zero sized hashes would complicate matters */
size = Max(newsize, 2);
- /* round up size to the next power of 2, that's the bucketing works */
+ /* round up size to the next power of 2, that's how bucketing works */
size = sh_pow2(size);
Assert(size <= SH_MAX_SIZE);
/*
- * Verify allocation of ->data is possible on platform, without
+ * Verify that allocation of ->data is possible on this platform, without
* overflowing Size.
*/
if ((((uint64) sizeof(SH_ELEMENT_TYPE)) * size) >= MaxAllocHugeSize)
@@ -234,8 +234,8 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
tb->sizemask = tb->size - 1;
/*
- * Compute growth threshold here and after growing the table, to make
- * computations during insert cheaper.
+ * Compute the next threshold at which we need to grow the hash table
+ * again.
*/
if (tb->size == SH_MAX_SIZE)
tb->grow_threshold = ((double) tb->size) * SH_MAX_FILLFACTOR;
@@ -696,7 +696,7 @@ SH_DELETE(SH_TYPE * tb, SH_KEY_TYPE key)
* or an element at its optimal position is encountered.
*
* While that sounds expensive, the average chain length is short,
- * and deletions would otherwise require toombstones.
+ * and deletions would otherwise require tombstones.
*/
while (true)
{
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 343cbd9692..8ac73dd403 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -24,6 +24,8 @@
#ifndef MISCADMIN_H
#define MISCADMIN_H
+#include <signal.h>
+
#include "pgtime.h" /* for pg_time_t */
@@ -82,6 +84,7 @@ extern PGDLLIMPORT volatile bool InterruptPending;
extern PGDLLIMPORT volatile bool QueryCancelPending;
extern PGDLLIMPORT volatile bool ProcDiePending;
extern PGDLLIMPORT volatile bool IdleInTransactionSessionTimeoutPending;
+extern PGDLLIMPORT volatile sig_atomic_t ConfigReloadPending;
extern volatile bool ClientConnectionLost;
@@ -278,6 +281,8 @@ extern void restore_stack_base(pg_stack_base_t base);
extern void check_stack_depth(void);
extern bool stack_is_too_deep(void);
+extern void PostgresSigHupHandler(SIGNAL_ARGS);
+
/* in tcop/utility.c */
extern void PreventCommandIfReadOnly(const char *cmdname);
extern void PreventCommandIfParallelMode(const char *cmdname);
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 8d4e58ca89..fb3684eb5c 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -957,6 +957,11 @@ typedef struct RangeTblEntry
/*
* Fields valid for a plain relation RTE (else zero):
+ *
+ * As a special case, RTE_NAMEDTUPLESTORE can also set relid to indicate
+ * that the tuple format of the tuplestore is the same as the referenced
+ * relation. This allows plans referencing AFTER trigger transition
+ * tables to be invalidated if the underlying table is altered.
*/
Oid relid; /* OID of the relation */
char relkind; /* relation kind (see pg_class.relkind) */
@@ -1017,16 +1022,23 @@ typedef struct RangeTblEntry
bool self_reference; /* is this a recursive self-reference? */
/*
- * Fields valid for values and CTE RTEs (else NIL):
+ * Fields valid for table functions, values, CTE and ENR RTEs (else NIL):
*
* We need these for CTE RTEs so that the types of self-referential
* columns are well-defined. For VALUES RTEs, storing these explicitly
* saves having to re-determine the info by scanning the values_lists.
+ * For ENRs, we store the types explicitly here (we could get the
+ * information from the catalogs if 'relid' was supplied, but we'd still
+ * need these for TupleDesc-based ENRs, so we might as well always store
+ * the type info here).
*/
List *coltypes; /* OID list of column type OIDs */
List *coltypmods; /* integer list of column typmods */
List *colcollations; /* OID list of column collation OIDs */
+ /*
+ * Fields valid for ENR RTEs (else NULL/zero):
+ */
char *enrname; /* name of ephemeral named relation */
double enrtuples; /* estimated or actual from caller */
@@ -3520,7 +3532,6 @@ typedef enum AlterSubscriptionType
ALTER_SUBSCRIPTION_OPTIONS,
ALTER_SUBSCRIPTION_CONNECTION,
ALTER_SUBSCRIPTION_PUBLICATION,
- ALTER_SUBSCRIPTION_PUBLICATION_REFRESH,
ALTER_SUBSCRIPTION_REFRESH,
ALTER_SUBSCRIPTION_ENABLED
} AlterSubscriptionType;
diff --git a/src/include/optimizer/geqo.h b/src/include/optimizer/geqo.h
index 6b09c4e195..be65c054e1 100644
--- a/src/include/optimizer/geqo.h
+++ b/src/include/optimizer/geqo.h
@@ -31,7 +31,7 @@
#define GEQO_DEBUG
*/
-/* recombination mechanism */
+/* choose one recombination mechanism here */
/*
#define ERX
#define PMX
diff --git a/src/include/optimizer/predtest.h b/src/include/optimizer/predtest.h
index 658a86cc15..748cd35611 100644
--- a/src/include/optimizer/predtest.h
+++ b/src/include/optimizer/predtest.h
@@ -17,9 +17,9 @@
#include "nodes/primnodes.h"
-extern bool predicate_implied_by(List *predicate_list,
- List *restrictinfo_list);
-extern bool predicate_refuted_by(List *predicate_list,
- List *restrictinfo_list);
+extern bool predicate_implied_by(List *predicate_list, List *clause_list,
+ bool clause_is_check);
+extern bool predicate_refuted_by(List *predicate_list, List *clause_list,
+ bool clause_is_check);
#endif /* PREDTEST_H */
diff --git a/src/include/parser/parse_func.h b/src/include/parser/parse_func.h
index e0dad6ac10..c684217e33 100644
--- a/src/include/parser/parse_func.h
+++ b/src/include/parser/parse_func.h
@@ -31,7 +31,7 @@ typedef enum
extern Node *ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
- FuncCall *fn, int location);
+ Node *last_srf, FuncCall *fn, int location);
extern FuncDetailCode func_get_detail(List *funcname,
List *fargs, List *fargnames,
@@ -67,7 +67,8 @@ extern Oid LookupFuncWithArgs(ObjectWithArgs *func,
extern Oid LookupAggWithArgs(ObjectWithArgs *agg,
bool noError);
-extern void check_srf_call_placement(ParseState *pstate, int location);
+extern void check_srf_call_placement(ParseState *pstate, Node *last_srf,
+ int location);
extern void check_pg_get_expr_args(ParseState *pstate, Oid fnoid, List *args);
#endif /* PARSE_FUNC_H */
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 0b54840e29..6a3507f3b1 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -157,6 +157,9 @@ typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param,
* p_hasAggs, p_hasWindowFuncs, etc: true if we've found any of the indicated
* constructs in the query.
*
+ * p_last_srf: the set-returning FuncExpr or OpExpr most recently found in
+ * the query, or NULL if none.
+ *
* p_pre_columnref_hook, etc: optional parser hook functions for modifying the
* interpretation of ColumnRefs and ParamRefs.
*
@@ -199,6 +202,8 @@ struct ParseState
bool p_hasSubLinks;
bool p_hasModifyingCTE;
+ Node *p_last_srf; /* most recent set-returning func/op found */
+
/*
* Optional hook functions for parser callbacks. These are null unless
* set up by the caller of make_parsestate.
diff --git a/src/include/parser/parse_oper.h b/src/include/parser/parse_oper.h
index d783b37f0f..ab3c4aa62f 100644
--- a/src/include/parser/parse_oper.h
+++ b/src/include/parser/parse_oper.h
@@ -59,7 +59,7 @@ extern Oid oprfuncid(Operator op);
/* Build expression tree for an operator invocation */
extern Expr *make_op(ParseState *pstate, List *opname,
- Node *ltree, Node *rtree, int location);
+ Node *ltree, Node *rtree, Node *last_srf, int location);
extern Expr *make_scalar_array_op(ParseState *pstate, List *opname,
bool useOr,
Node *ltree, Node *rtree, int location);
diff --git a/src/include/postgres.h b/src/include/postgres.h
index 87df7844f4..7426a253d2 100644
--- a/src/include/postgres.h
+++ b/src/include/postgres.h
@@ -689,7 +689,7 @@ DatumGetFloat4(Datum X)
float4 retval;
} myunion;
- myunion.value = GET_4_BYTES(X);
+ myunion.value = DatumGetInt32(X);
return myunion.retval;
}
#else
@@ -714,7 +714,7 @@ Float4GetDatum(float4 X)
} myunion;
myunion.value = X;
- return SET_4_BYTES(myunion.retval);
+ return Int32GetDatum(myunion.retval);
}
#else
extern Datum Float4GetDatum(float4 X);
@@ -737,7 +737,7 @@ DatumGetFloat8(Datum X)
float8 retval;
} myunion;
- myunion.value = GET_8_BYTES(X);
+ myunion.value = DatumGetInt64(X);
return myunion.retval;
}
#else
@@ -763,7 +763,7 @@ Float8GetDatum(float8 X)
} myunion;
myunion.value = X;
- return SET_8_BYTES(myunion.retval);
+ return Int64GetDatum(myunion.retval);
}
#else
extern Datum Float8GetDatum(float8 X);
diff --git a/src/include/replication/logicallauncher.h b/src/include/replication/logicallauncher.h
index d202a237e7..4f3e89e061 100644
--- a/src/include/replication/logicallauncher.h
+++ b/src/include/replication/logicallauncher.h
@@ -24,4 +24,6 @@ extern void ApplyLauncherShmemInit(void);
extern void ApplyLauncherWakeupAtCommit(void);
extern void AtEOXact_ApplyLauncher(bool isCommit);
+extern bool IsLogicalLauncher(void);
+
#endif /* LOGICALLAUNCHER_H */
diff --git a/src/include/replication/logicalworker.h b/src/include/replication/logicalworker.h
index 3e0affa190..5877a930f6 100644
--- a/src/include/replication/logicalworker.h
+++ b/src/include/replication/logicalworker.h
@@ -14,4 +14,6 @@
extern void ApplyWorkerMain(Datum main_arg);
+extern bool IsLogicalWorker(void);
+
#endif /* LOGICALWORKER_H */
diff --git a/src/include/replication/walsender.h b/src/include/replication/walsender.h
index 99f12377e0..c50e450ec2 100644
--- a/src/include/replication/walsender.h
+++ b/src/include/replication/walsender.h
@@ -44,7 +44,9 @@ extern void WalSndSignals(void);
extern Size WalSndShmemSize(void);
extern void WalSndShmemInit(void);
extern void WalSndWakeup(void);
+extern void WalSndInitStopping(void);
extern void WalSndWaitStopping(void);
+extern void HandleWalSndInitStopping(void);
extern void WalSndRqstFileReload(void);
/*
diff --git a/src/include/replication/worker_internal.h b/src/include/replication/worker_internal.h
index 0654461305..2bfff5c120 100644
--- a/src/include/replication/worker_internal.h
+++ b/src/include/replication/worker_internal.h
@@ -67,8 +67,6 @@ extern Subscription *MySubscription;
extern LogicalRepWorker *MyLogicalRepWorker;
extern bool in_remote_transaction;
-extern volatile sig_atomic_t got_SIGHUP;
-extern volatile sig_atomic_t got_SIGTERM;
extern void logicalrep_worker_attach(int slot);
extern LogicalRepWorker *logicalrep_worker_find(Oid subid, Oid relid,
@@ -81,8 +79,6 @@ extern void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker);
extern int logicalrep_sync_worker_count(Oid subid);
-extern void logicalrep_worker_sighup(SIGNAL_ARGS);
-extern void logicalrep_worker_sigterm(SIGNAL_ARGS);
extern char *LogicalRepSyncTableStart(XLogRecPtr *origin_startpos);
void process_syncing_tables(XLogRecPtr current_lsn);
void invalidate_syncing_table_states(Datum arg, int cacheid,
diff --git a/src/include/storage/predicate.h b/src/include/storage/predicate.h
index 8f9ea29917..941ba7119e 100644
--- a/src/include/storage/predicate.h
+++ b/src/include/storage/predicate.h
@@ -14,6 +14,7 @@
#ifndef PREDICATE_H
#define PREDICATE_H
+#include "storage/lock.h"
#include "utils/relcache.h"
#include "utils/snapshot.h"
@@ -46,7 +47,8 @@ extern bool PageIsPredicateLocked(Relation relation, BlockNumber blkno);
/* predicate lock maintenance */
extern Snapshot GetSerializableTransactionSnapshot(Snapshot snapshot);
extern void SetSerializableTransactionSnapshot(Snapshot snapshot,
- TransactionId sourcexid);
+ VirtualTransactionId *sourcevxid,
+ int sourcepid);
extern void RegisterPredicateLockingXid(TransactionId xid);
extern void PredicateLockRelation(Relation relation, Snapshot snapshot);
extern void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot);
diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h
index bc46229b42..7dfe37f881 100644
--- a/src/include/storage/procarray.h
+++ b/src/include/storage/procarray.h
@@ -108,7 +108,7 @@ extern int GetMaxSnapshotSubxidCount(void);
extern Snapshot GetSnapshotData(Snapshot snapshot, bool latest);
extern bool ProcArrayInstallImportedXmin(TransactionId xmin,
- TransactionId sourcexid);
+ VirtualTransactionId *sourcevxid);
extern bool ProcArrayInstallRestoredXmin(TransactionId xmin, PGPROC *proc);
extern void ProcArrayCheckXminConsistency(TransactionId global_xmin);
extern void SetLatestCompletedXid(TransactionId latestCompletedXid);
diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h
index 67cb913829..d58c1bede9 100644
--- a/src/include/storage/procsignal.h
+++ b/src/include/storage/procsignal.h
@@ -46,6 +46,8 @@ typedef enum
PROCSIG_PGXCPOOL_REFRESH, /* refresh local view of connection handles */
#endif
PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */
+ PROCSIG_WALSND_INIT_STOPPING, /* ask walsenders to prepare for
+ * shutdown */
/* Recovery conflict reasons */
PROCSIG_RECOVERY_CONFLICT_DATABASE,
diff --git a/src/include/storage/shm_toc.h b/src/include/storage/shm_toc.h
index ae0a3878fe..9175a472d8 100644
--- a/src/include/storage/shm_toc.h
+++ b/src/include/storage/shm_toc.h
@@ -22,9 +22,9 @@
#ifndef SHM_TOC_H
#define SHM_TOC_H
-#include "storage/shmem.h"
+#include "storage/shmem.h" /* for add_size() */
-struct shm_toc;
+/* shm_toc is an opaque type known only within shm_toc.c */
typedef struct shm_toc shm_toc;
extern shm_toc *shm_toc_create(uint64 magic, void *address, Size nbytes);
@@ -32,11 +32,13 @@ extern shm_toc *shm_toc_attach(uint64 magic, void *address);
extern void *shm_toc_allocate(shm_toc *toc, Size nbytes);
extern Size shm_toc_freespace(shm_toc *toc);
extern void shm_toc_insert(shm_toc *toc, uint64 key, void *address);
-extern void *shm_toc_lookup(shm_toc *toc, uint64 key);
+extern void *shm_toc_lookup(shm_toc *toc, uint64 key, bool noError);
/*
* Tools for estimating how large a chunk of shared memory will be needed
- * to store a TOC and its dependent objects.
+ * to store a TOC and its dependent objects. Note: we don't really support
+ * large numbers of keys, but it's convenient to declare number_of_keys
+ * as a Size anyway.
*/
typedef struct
{
@@ -47,11 +49,10 @@ typedef struct
#define shm_toc_initialize_estimator(e) \
((e)->space_for_chunks = 0, (e)->number_of_keys = 0)
#define shm_toc_estimate_chunk(e, sz) \
- ((e)->space_for_chunks = add_size((e)->space_for_chunks, \
- BUFFERALIGN((sz))))
+ ((e)->space_for_chunks = add_size((e)->space_for_chunks, BUFFERALIGN(sz)))
#define shm_toc_estimate_keys(e, cnt) \
- ((e)->number_of_keys = add_size((e)->number_of_keys, (cnt)))
+ ((e)->number_of_keys = add_size((e)->number_of_keys, cnt))
-extern Size shm_toc_estimate(shm_toc_estimator *);
+extern Size shm_toc_estimate(shm_toc_estimator *e);
#endif /* SHM_TOC_H */
diff --git a/src/include/utils/syscache.h b/src/include/utils/syscache.h
index 2bbc93a975..67cc6f4c23 100644
--- a/src/include/utils/syscache.h
+++ b/src/include/utils/syscache.h
@@ -84,22 +84,22 @@ enum SysCacheIdentifier
PARTRELID,
PROCNAMEARGSNSP,
PROCOID,
+ PUBLICATIONNAME,
+ PUBLICATIONOID,
+ PUBLICATIONREL,
+ PUBLICATIONRELMAP,
RANGETYPE,
RELNAMENSP,
RELOID,
REPLORIGIDENT,
REPLORIGNAME,
- PUBLICATIONOID,
- PUBLICATIONNAME,
- PUBLICATIONREL,
- PUBLICATIONRELMAP,
RULERELNAME,
SEQRELID,
STATEXTNAMENSP,
STATEXTOID,
STATRELATTINH,
- SUBSCRIPTIONOID,
SUBSCRIPTIONNAME,
+ SUBSCRIPTIONOID,
SUBSCRIPTIONRELMAP,
TABLESPACEOID,
TRFOID,
diff --git a/src/interfaces/ecpg/ecpglib/pg_type.h b/src/interfaces/ecpg/ecpglib/pg_type.h
index a2f44324ba..48ae480129 100644
--- a/src/interfaces/ecpg/ecpglib/pg_type.h
+++ b/src/interfaces/ecpg/ecpglib/pg_type.h
@@ -57,23 +57,23 @@
#define ZPBITOID 1560
#define VARBITOID 1562
#define NUMERICOID 1700
-#define REFCURSOROID 1790
+#define REFCURSOROID 1790
#define REGPROCEDUREOID 2202
-#define REGOPEROID 2203
-#define REGOPERATOROID 2204
-#define REGCLASSOID 2205
-#define REGTYPEOID 2206
-#define REGROLEOID 4096
-#define REGNAMESPACEOID 4089
+#define REGOPEROID 2203
+#define REGOPERATOROID 2204
+#define REGCLASSOID 2205
+#define REGTYPEOID 2206
+#define REGROLEOID 4096
+#define REGNAMESPACEOID 4089
#define REGTYPEARRAYOID 2211
#define UUIDOID 2950
-#define LSNOID 3220
-#define TSVECTOROID 3614
-#define GTSVECTOROID 3642
-#define TSQUERYOID 3615
-#define REGCONFIGOID 3734
-#define REGDICTIONARYOID 3769
+#define LSNOID 3220
+#define TSVECTOROID 3614
+#define GTSVECTOROID 3642
+#define TSQUERYOID 3615
+#define REGCONFIGOID 3734
+#define REGDICTIONARYOID 3769
#define JSONBOID 3802
-#define INT4RANGEOID 3904
+#define INT4RANGEOID 3904
#endif /* PG_TYPE_H */
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index 16956dc3f7..74086545bf 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -136,6 +136,11 @@ pg_GSS_continue(PGconn *conn, int payloadlen)
return STATUS_ERROR;
}
}
+ else
+ {
+ ginbuf.length = 0;
+ ginbuf.value = NULL;
+ }
maj_stat = gss_init_sec_context(&min_stat,
GSS_C_NO_CREDENTIAL,
@@ -145,13 +150,13 @@ pg_GSS_continue(PGconn *conn, int payloadlen)
GSS_C_MUTUAL_FLAG,
0,
GSS_C_NO_CHANNEL_BINDINGS,
- (conn->gctx == GSS_C_NO_CONTEXT) ? GSS_C_NO_BUFFER : &ginbuf,
+ (ginbuf.value == NULL) ? GSS_C_NO_BUFFER : &ginbuf,
NULL,
&goutbuf,
NULL,
NULL);
- if (conn->gctx != GSS_C_NO_CONTEXT)
+ if (ginbuf.value)
free(ginbuf.value);
if (goutbuf.length != 0)
@@ -414,7 +419,12 @@ pg_SSPI_startup(PGconn *conn, int use_negotiate, int payloadlen)
TimeStamp expire;
char *host = PQhost(conn);
- conn->sspictx = NULL;
+ if (conn->sspictx)
+ {
+ printfPQExpBuffer(&conn->errorMessage,
+ libpq_gettext("duplicate SSPI authentication request\n"));
+ return STATUS_ERROR;
+ }
/*
* Retrieve credentials handle
@@ -1211,7 +1221,8 @@ PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user,
else
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("unknown password encryption algorithm\n"));
+ libpq_gettext("unrecognized password encryption algorithm \"%s\"\n"),
+ algorithm);
return NULL;
}
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index f2c9bf7a88..02ec8f0cea 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -406,15 +406,59 @@ pqDropConnection(PGconn *conn, bool flushInput)
{
/* Drop any SSL state */
pqsecure_close(conn);
+
/* Close the socket itself */
if (conn->sock != PGINVALID_SOCKET)
closesocket(conn->sock);
conn->sock = PGINVALID_SOCKET;
+
/* Optionally discard any unread data */
if (flushInput)
conn->inStart = conn->inCursor = conn->inEnd = 0;
+
/* Always discard any unsent data */
conn->outCount = 0;
+
+ /* Free authentication state */
+#ifdef ENABLE_GSS
+ {
+ OM_uint32 min_s;
+
+ if (conn->gctx)
+ gss_delete_sec_context(&min_s, &conn->gctx, GSS_C_NO_BUFFER);
+ if (conn->gtarg_nam)
+ gss_release_name(&min_s, &conn->gtarg_nam);
+ }
+#endif
+#ifdef ENABLE_SSPI
+ if (conn->sspitarget)
+ {
+ free(conn->sspitarget);
+ conn->sspitarget = NULL;
+ }
+ if (conn->sspicred)
+ {
+ FreeCredentialsHandle(conn->sspicred);
+ free(conn->sspicred);
+ conn->sspicred = NULL;
+ }
+ if (conn->sspictx)
+ {
+ DeleteSecurityContext(conn->sspictx);
+ free(conn->sspictx);
+ conn->sspictx = NULL;
+ }
+ conn->usesspi = 0;
+#endif
+ if (conn->sasl_state)
+ {
+ /*
+ * XXX: if support for more authentication mechanisms is added, this
+ * needs to call the right 'free' function.
+ */
+ pg_fe_scram_free(conn->sasl_state);
+ conn->sasl_state = NULL;
+ }
}
@@ -1598,7 +1642,6 @@ connectDBStart(PGconn *conn)
for (i = 0; i < conn->nconnhost; ++i)
{
pg_conn_host *ch = &conn->connhost[i];
- char *node = ch->host;
struct addrinfo hint;
int thisport;
@@ -1624,17 +1667,29 @@ connectDBStart(PGconn *conn)
}
snprintf(portstr, sizeof(portstr), "%d", thisport);
- /* Set up for name resolution. */
+ /* Use pg_getaddrinfo_all() to resolve the address */
+ ret = 1;
switch (ch->type)
{
case CHT_HOST_NAME:
+ ret = pg_getaddrinfo_all(ch->host, portstr, &hint, &ch->addrlist);
+ if (ret || !ch->addrlist)
+ appendPQExpBuffer(&conn->errorMessage,
+ libpq_gettext("could not translate host name \"%s\" to address: %s\n"),
+ ch->host, gai_strerror(ret));
break;
+
case CHT_HOST_ADDRESS:
hint.ai_flags = AI_NUMERICHOST;
+ ret = pg_getaddrinfo_all(ch->host, portstr, &hint, &ch->addrlist);
+ if (ret || !ch->addrlist)
+ appendPQExpBuffer(&conn->errorMessage,
+ libpq_gettext("could not parse network address \"%s\": %s\n"),
+ ch->host, gai_strerror(ret));
break;
+
case CHT_UNIX_SOCKET:
#ifdef HAVE_UNIX_SOCKETS
- node = NULL;
hint.ai_family = AF_UNIX;
UNIXSOCK_PATH(portstr, thisport, ch->host);
if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN)
@@ -1646,24 +1701,25 @@ connectDBStart(PGconn *conn)
conn->options_valid = false;
goto connect_errReturn;
}
+
+ /*
+ * NULL hostname tells pg_getaddrinfo_all to parse the service
+ * name as a Unix-domain socket path.
+ */
+ ret = pg_getaddrinfo_all(NULL, portstr, &hint, &ch->addrlist);
+ if (ret || !ch->addrlist)
+ appendPQExpBuffer(&conn->errorMessage,
+ libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"),
+ portstr, gai_strerror(ret));
+ break;
#else
Assert(false);
+ conn->options_valid = false;
+ goto connect_errReturn;
#endif
- break;
}
-
- /* Use pg_getaddrinfo_all() to resolve the address */
- ret = pg_getaddrinfo_all(node, portstr, &hint, &ch->addrlist);
if (ret || !ch->addrlist)
{
- if (node)
- appendPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not translate host name \"%s\" to address: %s\n"),
- node, gai_strerror(ret));
- else
- appendPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"),
- portstr, gai_strerror(ret));
if (ch->addrlist)
{
pg_freeaddrinfo_all(hint.ai_family, ch->addrlist);
@@ -1786,16 +1842,23 @@ connectDBComplete(PGconn *conn)
return 0;
}
- if (ret == 1) /* connect_timeout elapsed */
+ if (ret == 1) /* connect_timeout elapsed */
{
- /* If there are no more hosts, return (the error message is already set) */
+ /*
+ * If there are no more hosts, return (the error message is
+ * already set)
+ */
if (++conn->whichhost >= conn->nconnhost)
{
conn->whichhost = 0;
conn->status = CONNECTION_BAD;
return 0;
}
- /* Attempt connection to the next host, starting the connect_timeout timer */
+
+ /*
+ * Attempt connection to the next host, starting the
+ * connect_timeout timer
+ */
pqDropConnection(conn, true);
conn->addr_cur = conn->connhost[conn->whichhost].addrlist;
conn->status = CONNECTION_NEEDED;
@@ -3043,7 +3106,7 @@ keep_going: /* We will come back to here until there is
restoreErrorMessage(conn, &savedMessage);
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("test \"SHOW transaction_read_only\" failed "
- " on \"%s:%s\"\n"),
+ "on server \"%s:%s\"\n"),
conn->connhost[conn->whichhost].host,
conn->connhost[conn->whichhost].port);
conn->status = CONNECTION_OK;
@@ -3475,42 +3538,6 @@ closePGconn(PGconn *conn)
if (conn->lobjfuncs)
free(conn->lobjfuncs);
conn->lobjfuncs = NULL;
-#ifdef ENABLE_GSS
- {
- OM_uint32 min_s;
-
- if (conn->gctx)
- gss_delete_sec_context(&min_s, &conn->gctx, GSS_C_NO_BUFFER);
- if (conn->gtarg_nam)
- gss_release_name(&min_s, &conn->gtarg_nam);
- }
-#endif
-#ifdef ENABLE_SSPI
- if (conn->sspitarget)
- free(conn->sspitarget);
- conn->sspitarget = NULL;
- if (conn->sspicred)
- {
- FreeCredentialsHandle(conn->sspicred);
- free(conn->sspicred);
- conn->sspicred = NULL;
- }
- if (conn->sspictx)
- {
- DeleteSecurityContext(conn->sspictx);
- free(conn->sspictx);
- conn->sspictx = NULL;
- }
-#endif
- if (conn->sasl_state)
- {
- /*
- * XXX: if support for more authentication mechanisms is added, this
- * needs to call the right 'free' function.
- */
- pg_fe_scram_free(conn->sasl_state);
- conn->sasl_state = NULL;
- }
}
/*
diff --git a/src/interfaces/libpq/test/README b/src/interfaces/libpq/test/README
index 001ecc378d..a05eb6bb3b 100644
--- a/src/interfaces/libpq/test/README
+++ b/src/interfaces/libpq/test/README
@@ -1,7 +1,7 @@
This is a testsuite for testing libpq URI connection string syntax.
To run the suite, use 'make installcheck' command. It works by
-running 'regress.sh' from this directory with appropriate environment
+running 'regress.pl' from this directory with appropriate environment
set up, which in turn feeds up lines from 'regress.in' to
'uri-regress' test program and compares the output against the correct
one in 'expected.out' file.
diff --git a/src/makefiles/Makefile.linux b/src/makefiles/Makefile.linux
index 52bf0b1e2b..f4f091caef 100644
--- a/src/makefiles/Makefile.linux
+++ b/src/makefiles/Makefile.linux
@@ -1,15 +1,14 @@
AROPT = crs
+
export_dynamic = -Wl,-E
# Use --enable-new-dtags to generate DT_RUNPATH instead of DT_RPATH.
# This allows LD_LIBRARY_PATH to still work when needed.
rpath = -Wl,-rpath,'$(rpathdir)',--enable-new-dtags
+
DLSUFFIX = .so
-ifeq "$(findstring sparc,$(host_cpu))" "sparc"
CFLAGS_SL = -fPIC
-else
-CFLAGS_SL = -fpic
-endif
+
# Rule for building a shared library from a single .o file
%.so: %.o
diff --git a/src/makefiles/Makefile.netbsd b/src/makefiles/Makefile.netbsd
index 31a52601af..43841c1597 100644
--- a/src/makefiles/Makefile.netbsd
+++ b/src/makefiles/Makefile.netbsd
@@ -9,11 +9,7 @@ endif
DLSUFFIX = .so
-ifeq ($(findstring sparc,$(host_cpu)), sparc)
CFLAGS_SL = -fPIC -DPIC
-else
-CFLAGS_SL = -fpic -DPIC
-endif
# Rule for building a shared library from a single .o file
diff --git a/src/makefiles/Makefile.openbsd b/src/makefiles/Makefile.openbsd
index 7bf5493309..d8fde49d5c 100644
--- a/src/makefiles/Makefile.openbsd
+++ b/src/makefiles/Makefile.openbsd
@@ -7,11 +7,7 @@ endif
DLSUFFIX = .so
-ifeq ($(findstring sparc,$(host_cpu)), sparc)
CFLAGS_SL = -fPIC -DPIC
-else
-CFLAGS_SL = -fpic -DPIC
-endif
# Rule for building a shared library from a single .o file
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index a6375511f6..cc093556e5 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -1761,7 +1761,8 @@ plpgsql_parse_cwordtype(List *idents)
classStruct->relkind != RELKIND_VIEW &&
classStruct->relkind != RELKIND_MATVIEW &&
classStruct->relkind != RELKIND_COMPOSITE_TYPE &&
- classStruct->relkind != RELKIND_FOREIGN_TABLE)
+ classStruct->relkind != RELKIND_FOREIGN_TABLE &&
+ classStruct->relkind != RELKIND_PARTITIONED_TABLE)
goto done;
/*
@@ -1987,7 +1988,8 @@ build_row_from_class(Oid classOid)
classStruct->relkind != RELKIND_VIEW &&
classStruct->relkind != RELKIND_MATVIEW &&
classStruct->relkind != RELKIND_COMPOSITE_TYPE &&
- classStruct->relkind != RELKIND_FOREIGN_TABLE)
+ classStruct->relkind != RELKIND_FOREIGN_TABLE &&
+ classStruct->relkind != RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("relation \"%s\" is not a table", relname)));
diff --git a/src/test/isolation/expected/sequence-ddl.out b/src/test/isolation/expected/sequence-ddl.out
index 6b7119738f..6766c0aff6 100644
--- a/src/test/isolation/expected/sequence-ddl.out
+++ b/src/test/isolation/expected/sequence-ddl.out
@@ -13,15 +13,13 @@ step s1commit: COMMIT;
step s2nv: <... completed>
error in steps s1commit s2nv: ERROR: nextval: reached maximum value of sequence "seq1" (10)
-starting permutation: s2begin s2nv s1alter2 s2commit s1commit
-step s2begin: BEGIN;
-step s2nv: SELECT nextval('seq1') FROM generate_series(1, 15);
+starting permutation: s1restart s2nv s1commit
+step s1restart: ALTER SEQUENCE seq1 RESTART WITH 5;
+step s2nv: SELECT nextval('seq1') FROM generate_series(1, 15); <waiting ...>
+step s1commit: COMMIT;
+step s2nv: <... completed>
nextval
-1
-2
-3
-4
5
6
7
@@ -33,14 +31,16 @@ nextval
13
14
15
-step s1alter2: ALTER SEQUENCE seq1 MAXVALUE 20; <waiting ...>
-step s2commit: COMMIT;
-step s1alter2: <... completed>
-step s1commit: COMMIT;
+16
+17
+18
+19
starting permutation: s1restart s2nv s1commit
step s1restart: ALTER SEQUENCE seq1 RESTART WITH 5;
-step s2nv: SELECT nextval('seq1') FROM generate_series(1, 15);
+step s2nv: SELECT nextval('seq1') FROM generate_series(1, 15); <waiting ...>
+step s1commit: COMMIT;
+step s2nv: <... completed>
nextval
5
@@ -58,9 +58,8 @@ nextval
17
18
19
-step s1commit: COMMIT;
-starting permutation: s2begin s2nv s1restart s2commit s1commit
+starting permutation: s2begin s2nv s1alter2 s2commit s1commit
step s2begin: BEGIN;
step s2nv: SELECT nextval('seq1') FROM generate_series(1, 15);
nextval
@@ -80,6 +79,7 @@ nextval
13
14
15
-step s1restart: ALTER SEQUENCE seq1 RESTART WITH 5;
+step s1alter2: ALTER SEQUENCE seq1 MAXVALUE 20; <waiting ...>
step s2commit: COMMIT;
+step s1alter2: <... completed>
step s1commit: COMMIT;
diff --git a/src/test/isolation/specs/sequence-ddl.spec b/src/test/isolation/specs/sequence-ddl.spec
index 42ee3b0615..5c51fcdae6 100644
--- a/src/test/isolation/specs/sequence-ddl.spec
+++ b/src/test/isolation/specs/sequence-ddl.spec
@@ -15,6 +15,7 @@ setup { BEGIN; }
step "s1alter" { ALTER SEQUENCE seq1 MAXVALUE 10; }
step "s1alter2" { ALTER SEQUENCE seq1 MAXVALUE 20; }
step "s1restart" { ALTER SEQUENCE seq1 RESTART WITH 5; }
+step "s1setval" { SELECT setval('seq1', 5); }
step "s1commit" { COMMIT; }
session "s2"
@@ -24,16 +25,18 @@ step "s2commit" { COMMIT; }
permutation "s1alter" "s1commit" "s2nv"
-# Prior to PG10, the s2nv would see the uncommitted s1alter change,
-# but now it waits.
+# Prior to PG10, the s2nv step would see the uncommitted s1alter
+# change, but now it waits.
permutation "s1alter" "s2nv" "s1commit"
-# nextval doesn't release lock until transaction end, so s1alter2 has
-# to wait for s2commit.
-permutation "s2begin" "s2nv" "s1alter2" "s2commit" "s1commit"
+# Prior to PG10, the s2nv step would see the uncommitted s1reset
+# change, but now it waits.
+permutation "s1restart" "s2nv" "s1commit"
-# RESTART is nontransactional, so s2nv sees it right away
+# In contrast to ALTER setval() is non-transactional, so it doesn't
+# have to wait.
permutation "s1restart" "s2nv" "s1commit"
-# RESTART does not wait
-permutation "s2begin" "s2nv" "s1restart" "s2commit" "s1commit"
+# nextval doesn't release lock until transaction end, so s1alter2 has
+# to wait for s2commit.
+permutation "s2begin" "s2nv" "s1alter2" "s2commit" "s1commit"
diff --git a/src/test/modules/test_extensions/expected/test_extensions.out b/src/test/modules/test_extensions/expected/test_extensions.out
index ba8b90e742..28d86c4b87 100644
--- a/src/test/modules/test_extensions/expected/test_extensions.out
+++ b/src/test/modules/test_extensions/expected/test_extensions.out
@@ -44,7 +44,7 @@ create table old_table1 (col1 serial primary key);
create extension test_ext7;
\dx+ test_ext7
Objects in extension "test_ext7"
- Object Description
+ Object description
-------------------------------
sequence ext7_table1_col1_seq
sequence ext7_table2_col2_seq
@@ -57,7 +57,7 @@ Objects in extension "test_ext7"
alter extension test_ext7 update to '2.0';
\dx+ test_ext7
Objects in extension "test_ext7"
- Object Description
+ Object description
-------------------------------
sequence ext7_table2_col2_seq
table ext7_table2
@@ -67,12 +67,12 @@ Objects in extension "test_ext7"
create extension test_ext8;
-- \dx+ would expose a variable pg_temp_nn schema name, so we can't use it here
select regexp_replace(pg_describe_object(classid, objid, objsubid),
- 'pg_temp_\d+', 'pg_temp', 'g') as "Object Description"
+ 'pg_temp_\d+', 'pg_temp', 'g') as "Object description"
from pg_depend
where refclassid = 'pg_extension'::regclass and deptype = 'e' and
refobjid = (select oid from pg_extension where extname = 'test_ext8')
order by 1;
- Object Description
+ Object description
-----------------------------------------
function ext8_even(posint)
function pg_temp.ext8_temp_even(posint)
@@ -85,12 +85,12 @@ order by 1;
drop extension test_ext8;
create extension test_ext8;
select regexp_replace(pg_describe_object(classid, objid, objsubid),
- 'pg_temp_\d+', 'pg_temp', 'g') as "Object Description"
+ 'pg_temp_\d+', 'pg_temp', 'g') as "Object description"
from pg_depend
where refclassid = 'pg_extension'::regclass and deptype = 'e' and
refobjid = (select oid from pg_extension where extname = 'test_ext8')
order by 1;
- Object Description
+ Object description
-----------------------------------------
function ext8_even(posint)
function pg_temp.ext8_temp_even(posint)
@@ -112,7 +112,7 @@ end';
-- extension should now contain no temp objects
\dx+ test_ext8
Objects in extension "test_ext8"
- Object Description
+ Object description
----------------------------
function ext8_even(posint)
table ext8_table1
diff --git a/src/test/modules/test_extensions/sql/test_extensions.sql b/src/test/modules/test_extensions/sql/test_extensions.sql
index 0bfc559295..9e64503eb5 100644
--- a/src/test/modules/test_extensions/sql/test_extensions.sql
+++ b/src/test/modules/test_extensions/sql/test_extensions.sql
@@ -31,7 +31,7 @@ create extension test_ext8;
-- \dx+ would expose a variable pg_temp_nn schema name, so we can't use it here
select regexp_replace(pg_describe_object(classid, objid, objsubid),
- 'pg_temp_\d+', 'pg_temp', 'g') as "Object Description"
+ 'pg_temp_\d+', 'pg_temp', 'g') as "Object description"
from pg_depend
where refclassid = 'pg_extension'::regclass and deptype = 'e' and
refobjid = (select oid from pg_extension where extname = 'test_ext8')
@@ -42,7 +42,7 @@ drop extension test_ext8;
create extension test_ext8;
select regexp_replace(pg_describe_object(classid, objid, objsubid),
- 'pg_temp_\d+', 'pg_temp', 'g') as "Object Description"
+ 'pg_temp_\d+', 'pg_temp', 'g') as "Object description"
from pg_depend
where refclassid = 'pg_extension'::regclass and deptype = 'e' and
refobjid = (select oid from pg_extension where extname = 'test_ext8')
diff --git a/src/test/modules/test_shm_mq/worker.c b/src/test/modules/test_shm_mq/worker.c
index 3e45c75dc0..f8aef263f7 100644
--- a/src/test/modules/test_shm_mq/worker.c
+++ b/src/test/modules/test_shm_mq/worker.c
@@ -95,7 +95,7 @@ test_shm_mq_main(Datum main_arg)
* find it. Our worker number gives our identity: there may be just one
* worker involved in this parallel operation, or there may be many.
*/
- hdr = shm_toc_lookup(toc, 0);
+ hdr = shm_toc_lookup(toc, 0, false);
SpinLockAcquire(&hdr->mutex);
myworkernumber = ++hdr->workers_attached;
SpinLockRelease(&hdr->mutex);
@@ -158,10 +158,10 @@ attach_to_queues(dsm_segment *seg, shm_toc *toc, int myworkernumber,
shm_mq *inq;
shm_mq *outq;
- inq = shm_toc_lookup(toc, myworkernumber);
+ inq = shm_toc_lookup(toc, myworkernumber, false);
shm_mq_set_receiver(inq, MyProc);
*inqhp = shm_mq_attach(inq, seg, NULL);
- outq = shm_toc_lookup(toc, myworkernumber + 1);
+ outq = shm_toc_lookup(toc, myworkernumber + 1, false);
shm_mq_set_sender(outq, MyProc);
*outqhp = shm_mq_attach(outq, seg, NULL);
}
diff --git a/src/test/modules/worker_spi/worker_spi.c b/src/test/modules/worker_spi/worker_spi.c
index 9abfc714a9..553baf0045 100644
--- a/src/test/modules/worker_spi/worker_spi.c
+++ b/src/test/modules/worker_spi/worker_spi.c
@@ -235,6 +235,8 @@ worker_spi_main(Datum main_arg)
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
+ CHECK_FOR_INTERRUPTS();
+
/*
* In case of a SIGHUP, just reload the configuration.
*/
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index 0a1068146a..f2b303a4c6 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -3322,6 +3322,12 @@ ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
ERROR: partition constraint is violated by some row
-- delete the faulting row and also add a constraint to skip the scan
DELETE FROM part_5_a WHERE a NOT IN (3);
+ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 5);
+ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
+INFO: partition constraint for table "part_5" is implied by existing constraints
+ALTER TABLE list_parted2 DETACH PARTITION part_5;
+ALTER TABLE part_5 DROP CONSTRAINT check_a;
+-- scan should again be skipped, even though NOT NULL is now a column property
ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)), ALTER a SET NOT NULL;
ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
INFO: partition constraint for table "part_5" is implied by existing constraints
@@ -3369,6 +3375,19 @@ SELECT coninhcount, conislocal FROM pg_constraint WHERE conrelid = 'part_3_4'::r
(1 row)
DROP TABLE part_3_4;
+-- check that a detached partition is not dropped on dropping a partitioned table
+CREATE TABLE range_parted2 (
+ a int
+) PARTITION BY RANGE(a);
+CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100);
+ALTER TABLE range_parted2 DETACH PARTITION part_rp;
+DROP TABLE range_parted2;
+SELECT * from part_rp;
+ a
+---
+(0 rows)
+
+DROP TABLE part_rp;
-- Check ALTER TABLE commands for partitioned tables and partitions
-- cannot add/drop column to/from *only* the parent
ALTER TABLE ONLY list_parted2 ADD COLUMN c int;
diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out
index f3b97ece59..3e677b0d55 100644
--- a/src/test/regress/expected/create_table.out
+++ b/src/test/regress/expected/create_table.out
@@ -315,7 +315,7 @@ CREATE FUNCTION retset (a int) RETURNS SETOF int AS $$ SELECT 1; $$ LANGUAGE SQL
CREATE TABLE partitioned (
a int
) PARTITION BY RANGE (retset(a));
-ERROR: set-returning functions are not allowed in partition key expression
+ERROR: set-returning functions are not allowed in partition key expressions
DROP FUNCTION retset(int);
CREATE TABLE partitioned (
a int
diff --git a/src/test/regress/expected/foreign_data.out b/src/test/regress/expected/foreign_data.out
index de22510740..cdafb90762 100644
--- a/src/test/regress/expected/foreign_data.out
+++ b/src/test/regress/expected/foreign_data.out
@@ -254,13 +254,13 @@ ERROR: foreign-data wrapper "foo" does not exist
\des+
List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
+ Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
------+-------+----------------------+-------------------+------+---------+-------------+-------------
(0 rows)
\deu+
List of user mappings
- Server | User name | FDW Options
+ Server | User name | FDW options
--------+-----------+-------------
(0 rows)
@@ -306,7 +306,6 @@ DETAIL: The feature is not currently supported
Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
------+-------+----------------------+-------------------+------+---------+-------------+-------------
(0 rows)
-
SET ROLE regress_test_role;
CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -- ERROR: no usage on FDW
ERROR: Postgres-XL does not support SERVER yet
@@ -1071,7 +1070,7 @@ GRANT USAGE ON FOREIGN SERVER s10 TO regress_unprivileged_role;
-- owner of server can see option fields
\deu+
List of user mappings
- Server | User name | FDW Options
+ Server | User name | FDW options
--------+---------------------------+-------------------
s10 | public | ("user" 'secret')
s4 | regress_foreign_data_user |
@@ -1087,7 +1086,7 @@ RESET ROLE;
-- superuser can see option fields
\deu+
List of user mappings
- Server | User name | FDW Options
+ Server | User name | FDW options
--------+---------------------------+---------------------
s10 | public | ("user" 'secret')
s4 | regress_foreign_data_user |
@@ -1103,7 +1102,7 @@ RESET ROLE;
SET ROLE regress_unprivileged_role;
\deu+
List of user mappings
- Server | User name | FDW Options
+ Server | User name | FDW options
--------+---------------------------+-------------
s10 | public |
s4 | regress_foreign_data_user |
@@ -1229,13 +1228,13 @@ Location Nodes: ALL DATANODES
\d+ ft2
Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW Options | Storage | Stats target | Description
+ Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
--------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
c1 | integer | | not null | | | plain | |
c2 | text | | | | | extended | |
c3 | date | | | | | plain | |
Server: s0
-FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
+FDW options: (delimiter ',', quote '"', "be quoted" 'value')
Inherits: pt1
CREATE TABLE ct3() INHERITS(ft2);
@@ -1512,7 +1511,7 @@ Partitions: pt2_1 FOR VALUES IN (1)
\d+ pt2_1
Foreign table "public.pt2_1"
- Column | Type | Collation | Nullable | Default | FDW Options | Storage | Stats target | Description
+ Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
--------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
c1 | integer | | not null | | | plain | |
c2 | text | | | | | extended | |
@@ -1520,7 +1519,7 @@ Partitions: pt2_1 FOR VALUES IN (1)
Partition of: pt2 FOR VALUES IN (1)
Partition constraint: ((c1 IS NOT NULL) AND (c1 = ANY (ARRAY[1])))
Server: s0
-FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
+FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-- partition cannot have additional columns
DROP FOREIGN TABLE pt2_1;
@@ -1532,14 +1531,14 @@ CREATE FOREIGN TABLE pt2_1 (
) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
\d+ pt2_1
Foreign table "public.pt2_1"
- Column | Type | Collation | Nullable | Default | FDW Options | Storage | Stats target | Description
+ Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
--------+--------------+-----------+----------+---------+-------------+----------+--------------+-------------
c1 | integer | | not null | | | plain | |
c2 | text | | | | | extended | |
c3 | date | | | | | plain | |
c4 | character(1) | | | | | extended | |
Server: s0
-FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
+FDW options: (delimiter ',', quote '"', "be quoted" 'value')
ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR
ERROR: table "pt2_1" contains column "c4" not found in parent "pt2"
@@ -1561,13 +1560,13 @@ CREATE FOREIGN TABLE pt2_1 (
) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
\d+ pt2_1
Foreign table "public.pt2_1"
- Column | Type | Collation | Nullable | Default | FDW Options | Storage | Stats target | Description
+ Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
--------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
c1 | integer | | not null | | | plain | |
c2 | text | | | | | extended | |
c3 | date | | | | | plain | |
Server: s0
-FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
+FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-- no attach partition validation occurs for foreign tables
ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1);
@@ -1583,7 +1582,7 @@ Partitions: pt2_1 FOR VALUES IN (1)
\d+ pt2_1
Foreign table "public.pt2_1"
- Column | Type | Collation | Nullable | Default | FDW Options | Storage | Stats target | Description
+ Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
--------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
c1 | integer | | not null | | | plain | |
c2 | text | | | | | extended | |
@@ -1591,7 +1590,7 @@ Partitions: pt2_1 FOR VALUES IN (1)
Partition of: pt2 FOR VALUES IN (1)
Partition constraint: ((c1 IS NOT NULL) AND (c1 = ANY (ARRAY[1])))
Server: s0
-FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
+FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-- cannot add column to a partition
ALTER TABLE pt2_1 ADD c4 char;
@@ -1611,7 +1610,7 @@ Partitions: pt2_1 FOR VALUES IN (1)
\d+ pt2_1
Foreign table "public.pt2_1"
- Column | Type | Collation | Nullable | Default | FDW Options | Storage | Stats target | Description
+ Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
--------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
c1 | integer | | not null | | | plain | |
c2 | text | | | | | extended | |
@@ -1621,7 +1620,7 @@ Partition constraint: ((c1 IS NOT NULL) AND (c1 = ANY (ARRAY[1])))
Check constraints:
"p21chk" CHECK (c2 <> ''::text)
Server: s0
-FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
+FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-- cannot drop inherited NOT NULL constraint from a partition
ALTER TABLE pt2_1 ALTER c1 DROP NOT NULL;
@@ -1640,7 +1639,7 @@ Partition key: LIST (c1)
\d+ pt2_1
Foreign table "public.pt2_1"
- Column | Type | Collation | Nullable | Default | FDW Options | Storage | Stats target | Description
+ Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
--------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
c1 | integer | | not null | | | plain | |
c2 | text | | | | | extended | |
@@ -1648,7 +1647,7 @@ Partition key: LIST (c1)
Check constraints:
"p21chk" CHECK (c2 <> ''::text)
Server: s0
-FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
+FDW options: (delimiter ',', quote '"', "be quoted" 'value')
ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR
ERROR: column "c2" in child table must be marked NOT NULL
@@ -1669,7 +1668,7 @@ Check constraints:
\d+ pt2_1
Foreign table "public.pt2_1"
- Column | Type | Collation | Nullable | Default | FDW Options | Storage | Stats target | Description
+ Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
--------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
c1 | integer | | not null | | | plain | |
c2 | text | | not null | | | extended | |
@@ -1677,7 +1676,7 @@ Check constraints:
Check constraints:
"p21chk" CHECK (c2 <> ''::text)
Server: s0
-FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
+FDW options: (delimiter ',', quote '"', "be quoted" 'value')
ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR
ERROR: child table is missing constraint "pt2chk1"
diff --git a/src/test/regress/expected/insert.out b/src/test/regress/expected/insert.out
index dcb2ed2c87..58b345f50a 100644
--- a/src/test/regress/expected/insert.out
+++ b/src/test/regress/expected/insert.out
@@ -381,7 +381,7 @@ drop function mlparted11_trig_fn();
-- checking its partition constraint before inserting into the leaf partition
-- selected by tuple-routing
insert into mlparted1 (a, b) values (2, 3);
-ERROR: new row for relation "mlparted11" violates partition constraint
+ERROR: new row for relation "mlparted1" violates partition constraint
DETAIL: Failing row contains (3, 2).
-- check routing error through a list partitioned table when the key is null
create table lparted_nonullpart (a int, b char) partition by list (b);
@@ -497,3 +497,16 @@ select tableoid::regclass::text, * from mcrparted order by 1;
-- cleanup
drop table mcrparted;
+-- check that a BR constraint can't make partition contain violating rows
+create table brtrigpartcon (a int, b text) partition by list (a);
+create table brtrigpartcon1 partition of brtrigpartcon for values in (1);
+create or replace function brtrigpartcon1trigf() returns trigger as $$begin new.a := 2; return new; end$$ language plpgsql;
+create trigger brtrigpartcon1trig before insert on brtrigpartcon1 for each row execute procedure brtrigpartcon1trigf();
+insert into brtrigpartcon values (1, 'hi there');
+ERROR: new row for relation "brtrigpartcon1" violates partition constraint
+DETAIL: Failing row contains (2, hi there).
+insert into brtrigpartcon1 values (1, 'hi there');
+ERROR: new row for relation "brtrigpartcon1" violates partition constraint
+DETAIL: Failing row contains (2, hi there).
+drop table brtrigpartcon;
+drop function brtrigpartcon1trigf();
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index 65a5356412..10a9db40e4 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -5800,48 +5800,44 @@ select * from j1 natural join j2;
explain (verbose, costs off)
select * from j1
inner join (select distinct id from j3) j3 on j1.id = j3.id;
- QUERY PLAN
------------------------------------------------
+ QUERY PLAN
+-----------------------------------------
Nested Loop
Output: j1.id, j3.id
Inner Unique: true
Join Filter: (j1.id = j3.id)
- -> Seq Scan on public.j1
- Output: j1.id
- -> Materialize
+ -> Unique
Output: j3.id
- -> Unique
+ -> Sort
Output: j3.id
- -> Sort
+ Sort Key: j3.id
+ -> Seq Scan on public.j3
Output: j3.id
- Sort Key: j3.id
- -> Seq Scan on public.j3
- Output: j3.id
-(15 rows)
+ -> Seq Scan on public.j1
+ Output: j1.id
+(13 rows)
-- ensure group by clause allows the inner to become unique
explain (verbose, costs off)
select * from j1
inner join (select id from j3 group by id) j3 on j1.id = j3.id;
- QUERY PLAN
------------------------------------------------
+ QUERY PLAN
+-----------------------------------------
Nested Loop
Output: j1.id, j3.id
Inner Unique: true
Join Filter: (j1.id = j3.id)
- -> Seq Scan on public.j1
- Output: j1.id
- -> Materialize
+ -> Group
Output: j3.id
- -> Group
+ Group Key: j3.id
+ -> Sort
Output: j3.id
- Group Key: j3.id
- -> Sort
+ Sort Key: j3.id
+ -> Seq Scan on public.j3
Output: j3.id
- Sort Key: j3.id
- -> Seq Scan on public.j3
- Output: j3.id
-(16 rows)
+ -> Seq Scan on public.j1
+ Output: j1.id
+(14 rows)
drop table j1;
drop table j2;
@@ -5882,13 +5878,11 @@ inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2;
Output: j1.id1, j1.id2, j2.id1, j2.id2
Inner Unique: true
Join Filter: ((j1.id1 = j2.id1) AND (j1.id2 = j2.id2))
+ -> Seq Scan on public.j2
+ Output: j2.id1, j2.id2
-> Seq Scan on public.j1
Output: j1.id1, j1.id2
- -> Materialize
- Output: j2.id1, j2.id2
- -> Seq Scan on public.j2
- Output: j2.id1, j2.id2
-(10 rows)
+(8 rows)
-- ensure we don't detect the join to be unique when quals are not part of the
-- join condition
diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out
index cab92b1ac0..70278fd9b6 100644
--- a/src/test/regress/expected/plpgsql.out
+++ b/src/test/regress/expected/plpgsql.out
@@ -5980,3 +5980,48 @@ LINE 1: SELECT (SELECT string_agg(id || '=' || name, ',') FROM d)
^
QUERY: SELECT (SELECT string_agg(id || '=' || name, ',') FROM d)
CONTEXT: PL/pgSQL function alter_table_under_transition_tables_upd_func() line 3 at RAISE
+--
+-- Check type parsing and record fetching from partitioned tables
+--
+CREATE TABLE partitioned_table (a int, b text) PARTITION BY LIST (a);
+CREATE TABLE pt_part1 PARTITION OF partitioned_table FOR VALUES IN (1);
+CREATE TABLE pt_part2 PARTITION OF partitioned_table FOR VALUES IN (2);
+INSERT INTO partitioned_table VALUES (1, 'Row 1');
+INSERT INTO partitioned_table VALUES (2, 'Row 2');
+CREATE OR REPLACE FUNCTION get_from_partitioned_table(partitioned_table.a%type)
+RETURNS partitioned_table AS $$
+DECLARE
+ a_val partitioned_table.a%TYPE;
+ result partitioned_table%ROWTYPE;
+BEGIN
+ a_val := $1;
+ SELECT * INTO result FROM partitioned_table WHERE a = a_val;
+ RETURN result;
+END; $$ LANGUAGE plpgsql;
+NOTICE: type reference partitioned_table.a%TYPE converted to integer
+SELECT * FROM get_from_partitioned_table(1) AS t;
+ a | b
+---+-------
+ 1 | Row 1
+(1 row)
+
+CREATE OR REPLACE FUNCTION list_partitioned_table()
+RETURNS SETOF partitioned_table.a%TYPE AS $$
+DECLARE
+ row partitioned_table%ROWTYPE;
+ a_val partitioned_table.a%TYPE;
+BEGIN
+ FOR row IN SELECT * FROM partitioned_table ORDER BY a LOOP
+ a_val := row.a;
+ RETURN NEXT a_val;
+ END LOOP;
+ RETURN;
+END; $$ LANGUAGE plpgsql;
+NOTICE: type reference partitioned_table.a%TYPE converted to integer
+SELECT * FROM list_partitioned_table() AS t;
+ t
+---
+ 1
+ 2
+(2 rows)
+
diff --git a/src/test/regress/expected/rangefuncs.out b/src/test/regress/expected/rangefuncs.out
index e18814425c..0f7ce901cd 100644
--- a/src/test/regress/expected/rangefuncs.out
+++ b/src/test/regress/expected/rangefuncs.out
@@ -1976,18 +1976,6 @@ select * from foobar(); -- fail
ERROR: function return row and query-specified return row do not match
DETAIL: Returned row contains 3 attributes, but query expects 2.
drop function foobar();
--- check behavior when a function's input sometimes returns a set (bug #8228)
-SELECT *,
- lower(CASE WHEN id = 2 THEN (regexp_matches(str, '^0*([1-9]\d+)$'))[1]
- ELSE str
- END)
-FROM
- (VALUES (1,''), (2,'0000000049404'), (3,'FROM 10000000876')) v(id, str);
- id | str | lower
-----+---------------+-------
- 2 | 0000000049404 | 49404
-(1 row)
-
-- check whole-row-Var handling in nested lateral functions (bug #11703)
create function extractq2(t int8_tbl) returns int8 as $$
select t.q2
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
index 67fd53a2a0..68c4e740fe 100644
--- a/src/test/regress/expected/rowsecurity.out
+++ b/src/test/regress/expected/rowsecurity.out
@@ -798,6 +798,442 @@ EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b);
Filter: f_leak(b)
(10 rows)
+--
+-- Partitioned Tables
+--
+SET SESSION AUTHORIZATION regress_rls_alice;
+CREATE TABLE part_document (
+ did int,
+ cid int,
+ dlevel int not null,
+ dauthor name,
+ dtitle text
+) PARTITION BY RANGE (cid);
+GRANT ALL ON part_document TO public;
+-- Create partitions for document categories
+CREATE TABLE part_document_fiction PARTITION OF part_document FOR VALUES FROM (11) to (12);
+CREATE TABLE part_document_satire PARTITION OF part_document FOR VALUES FROM (55) to (56);
+CREATE TABLE part_document_nonfiction PARTITION OF part_document FOR VALUES FROM (99) to (100);
+GRANT ALL ON part_document_fiction TO public;
+GRANT ALL ON part_document_satire TO public;
+GRANT ALL ON part_document_nonfiction TO public;
+INSERT INTO part_document VALUES
+ ( 1, 11, 1, 'regress_rls_bob', 'my first novel'),
+ ( 2, 11, 2, 'regress_rls_bob', 'my second novel'),
+ ( 3, 99, 2, 'regress_rls_bob', 'my science textbook'),
+ ( 4, 55, 1, 'regress_rls_bob', 'my first satire'),
+ ( 5, 99, 2, 'regress_rls_bob', 'my history book'),
+ ( 6, 11, 1, 'regress_rls_carol', 'great science fiction'),
+ ( 7, 99, 2, 'regress_rls_carol', 'great technology book'),
+ ( 8, 55, 2, 'regress_rls_carol', 'great satire'),
+ ( 9, 11, 1, 'regress_rls_dave', 'awesome science fiction'),
+ (10, 99, 2, 'regress_rls_dave', 'awesome technology book');
+ALTER TABLE part_document ENABLE ROW LEVEL SECURITY;
+-- Create policy on parent
+-- user's security level must be higher than or equal to document's
+CREATE POLICY pp1 ON part_document AS PERMISSIVE
+ USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user));
+-- Dave is only allowed to see cid < 55
+CREATE POLICY pp1r ON part_document AS RESTRICTIVE TO regress_rls_dave
+ USING (cid < 55);
+\d+ part_document
+ Table "regress_rls_schema.part_document"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+---------+---------+-----------+----------+---------+----------+--------------+-------------
+ did | integer | | | | plain | |
+ cid | integer | | | | plain | |
+ dlevel | integer | | not null | | plain | |
+ dauthor | name | | | | plain | |
+ dtitle | text | | | | extended | |
+Partition key: RANGE (cid)
+Policies:
+ POLICY "pp1"
+ USING ((dlevel <= ( SELECT uaccount.seclv
+ FROM uaccount
+ WHERE (uaccount.pguser = CURRENT_USER))))
+ POLICY "pp1r" AS RESTRICTIVE
+ TO regress_rls_dave
+ USING ((cid < 55))
+Partitions: part_document_fiction FOR VALUES FROM (11) TO (12),
+ part_document_nonfiction FOR VALUES FROM (99) TO (100),
+ part_document_satire FOR VALUES FROM (55) TO (56)
+
+SELECT * FROM pg_policies WHERE schemaname = 'regress_rls_schema' AND tablename like '%part_document%' ORDER BY policyname;
+ schemaname | tablename | policyname | permissive | roles | cmd | qual | with_check
+--------------------+---------------+------------+-------------+--------------------+-----+--------------------------------------------+------------
+ regress_rls_schema | part_document | pp1 | PERMISSIVE | {public} | ALL | (dlevel <= ( SELECT uaccount.seclv +|
+ | | | | | | FROM uaccount +|
+ | | | | | | WHERE (uaccount.pguser = CURRENT_USER))) |
+ regress_rls_schema | part_document | pp1r | RESTRICTIVE | {regress_rls_dave} | ALL | (cid < 55) |
+(2 rows)
+
+-- viewpoint from regress_rls_bob
+SET SESSION AUTHORIZATION regress_rls_bob;
+SET row_security TO ON;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => my first novel
+NOTICE: f_leak => great science fiction
+NOTICE: f_leak => awesome science fiction
+NOTICE: f_leak => my first satire
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+(4 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+ QUERY PLAN
+-----------------------------------------------------
+ Append
+ InitPlan 1 (returns $0)
+ -> Index Scan using uaccount_pkey on uaccount
+ Index Cond: (pguser = CURRENT_USER)
+ -> Seq Scan on part_document_fiction
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+ -> Seq Scan on part_document_satire
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+ -> Seq Scan on part_document_nonfiction
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+(10 rows)
+
+-- viewpoint from regress_rls_carol
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => my first novel
+NOTICE: f_leak => my second novel
+NOTICE: f_leak => great science fiction
+NOTICE: f_leak => awesome science fiction
+NOTICE: f_leak => my first satire
+NOTICE: f_leak => great satire
+NOTICE: f_leak => my science textbook
+NOTICE: f_leak => my history book
+NOTICE: f_leak => great technology book
+NOTICE: f_leak => awesome technology book
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 3 | 99 | 2 | regress_rls_bob | my science textbook
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 5 | 99 | 2 | regress_rls_bob | my history book
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 7 | 99 | 2 | regress_rls_carol | great technology book
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+ 10 | 99 | 2 | regress_rls_dave | awesome technology book
+(10 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+ QUERY PLAN
+-----------------------------------------------------
+ Append
+ InitPlan 1 (returns $0)
+ -> Index Scan using uaccount_pkey on uaccount
+ Index Cond: (pguser = CURRENT_USER)
+ -> Seq Scan on part_document_fiction
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+ -> Seq Scan on part_document_satire
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+ -> Seq Scan on part_document_nonfiction
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+(10 rows)
+
+-- viewpoint from regress_rls_dave
+SET SESSION AUTHORIZATION regress_rls_dave;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => my first novel
+NOTICE: f_leak => my second novel
+NOTICE: f_leak => great science fiction
+NOTICE: f_leak => awesome science fiction
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+(4 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+ QUERY PLAN
+--------------------------------------------------------------------
+ Append
+ InitPlan 1 (returns $0)
+ -> Index Scan using uaccount_pkey on uaccount
+ Index Cond: (pguser = CURRENT_USER)
+ -> Seq Scan on part_document_fiction
+ Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle))
+(6 rows)
+
+-- pp1 ERROR
+INSERT INTO part_document VALUES (100, 11, 5, 'regress_rls_dave', 'testing pp1'); -- fail
+ERROR: new row violates row-level security policy for table "part_document"
+-- pp1r ERROR
+INSERT INTO part_document VALUES (100, 99, 1, 'regress_rls_dave', 'testing pp1r'); -- fail
+ERROR: new row violates row-level security policy "pp1r" for table "part_document"
+-- Show that RLS policy does not apply for direct inserts to children
+-- This should fail with RLS POLICY pp1r violation.
+INSERT INTO part_document VALUES (100, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- fail
+ERROR: new row violates row-level security policy "pp1r" for table "part_document"
+-- But this should succeed.
+INSERT INTO part_document_satire VALUES (100, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- success
+-- We still cannot see the row using the parent
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => my first novel
+NOTICE: f_leak => my second novel
+NOTICE: f_leak => great science fiction
+NOTICE: f_leak => awesome science fiction
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+(4 rows)
+
+-- But we can if we look directly
+SELECT * FROM part_document_satire WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => my first satire
+NOTICE: f_leak => great satire
+NOTICE: f_leak => testing RLS with partitions
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------------
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions
+(3 rows)
+
+-- Turn on RLS and create policy on child to show RLS is checked before constraints
+SET SESSION AUTHORIZATION regress_rls_alice;
+ALTER TABLE part_document_satire ENABLE ROW LEVEL SECURITY;
+CREATE POLICY pp3 ON part_document_satire AS RESTRICTIVE
+ USING (cid < 55);
+-- This should fail with RLS violation now.
+SET SESSION AUTHORIZATION regress_rls_dave;
+INSERT INTO part_document_satire VALUES (101, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- fail
+ERROR: new row violates row-level security policy for table "part_document_satire"
+-- And now we cannot see directly into the partition either, due to RLS
+SELECT * FROM part_document_satire WHERE f_leak(dtitle) ORDER BY did;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+---------+--------
+(0 rows)
+
+-- The parent looks same as before
+-- viewpoint from regress_rls_dave
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => my first novel
+NOTICE: f_leak => my second novel
+NOTICE: f_leak => great science fiction
+NOTICE: f_leak => awesome science fiction
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+(4 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+ QUERY PLAN
+--------------------------------------------------------------------
+ Append
+ InitPlan 1 (returns $0)
+ -> Index Scan using uaccount_pkey on uaccount
+ Index Cond: (pguser = CURRENT_USER)
+ -> Seq Scan on part_document_fiction
+ Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle))
+(6 rows)
+
+-- viewpoint from regress_rls_carol
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => my first novel
+NOTICE: f_leak => my second novel
+NOTICE: f_leak => great science fiction
+NOTICE: f_leak => awesome science fiction
+NOTICE: f_leak => my first satire
+NOTICE: f_leak => great satire
+NOTICE: f_leak => testing RLS with partitions
+NOTICE: f_leak => my science textbook
+NOTICE: f_leak => my history book
+NOTICE: f_leak => great technology book
+NOTICE: f_leak => awesome technology book
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 3 | 99 | 2 | regress_rls_bob | my science textbook
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 5 | 99 | 2 | regress_rls_bob | my history book
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 7 | 99 | 2 | regress_rls_carol | great technology book
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+ 10 | 99 | 2 | regress_rls_dave | awesome technology book
+ 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions
+(11 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+ QUERY PLAN
+-----------------------------------------------------
+ Append
+ InitPlan 1 (returns $0)
+ -> Index Scan using uaccount_pkey on uaccount
+ Index Cond: (pguser = CURRENT_USER)
+ -> Seq Scan on part_document_fiction
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+ -> Seq Scan on part_document_satire
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+ -> Seq Scan on part_document_nonfiction
+ Filter: ((dlevel <= $0) AND f_leak(dtitle))
+(10 rows)
+
+-- only owner can change policies
+ALTER POLICY pp1 ON part_document USING (true); --fail
+ERROR: must be owner of relation part_document
+DROP POLICY pp1 ON part_document; --fail
+ERROR: must be owner of relation part_document
+SET SESSION AUTHORIZATION regress_rls_alice;
+ALTER POLICY pp1 ON part_document USING (dauthor = current_user);
+-- viewpoint from regress_rls_bob again
+SET SESSION AUTHORIZATION regress_rls_bob;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => my first novel
+NOTICE: f_leak => my second novel
+NOTICE: f_leak => my first satire
+NOTICE: f_leak => my science textbook
+NOTICE: f_leak => my history book
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-----------------+---------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 3 | 99 | 2 | regress_rls_bob | my science textbook
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 5 | 99 | 2 | regress_rls_bob | my history book
+(5 rows)
+
+-- viewpoint from rls_regres_carol again
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+NOTICE: f_leak => great science fiction
+NOTICE: f_leak => great satire
+NOTICE: f_leak => great technology book
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 7 | 99 | 2 | regress_rls_carol | great technology book
+ 8 | 55 | 2 | regress_rls_carol | great satire
+(3 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+ QUERY PLAN
+---------------------------------------------------------------
+ Append
+ -> Seq Scan on part_document_fiction
+ Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle))
+ -> Seq Scan on part_document_satire
+ Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle))
+ -> Seq Scan on part_document_nonfiction
+ Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle))
+(7 rows)
+
+-- database superuser does bypass RLS policy when enabled
+RESET SESSION AUTHORIZATION;
+SET row_security TO ON;
+SELECT * FROM part_document ORDER BY did;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 3 | 99 | 2 | regress_rls_bob | my science textbook
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 5 | 99 | 2 | regress_rls_bob | my history book
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 7 | 99 | 2 | regress_rls_carol | great technology book
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+ 10 | 99 | 2 | regress_rls_dave | awesome technology book
+ 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions
+(11 rows)
+
+SELECT * FROM part_document_satire ORDER by did;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------------
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions
+(3 rows)
+
+-- database non-superuser with bypass privilege can bypass RLS policy when disabled
+SET SESSION AUTHORIZATION regress_rls_exempt_user;
+SET row_security TO OFF;
+SELECT * FROM part_document ORDER BY did;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 3 | 99 | 2 | regress_rls_bob | my science textbook
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 5 | 99 | 2 | regress_rls_bob | my history book
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 7 | 99 | 2 | regress_rls_carol | great technology book
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+ 10 | 99 | 2 | regress_rls_dave | awesome technology book
+ 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions
+(11 rows)
+
+SELECT * FROM part_document_satire ORDER by did;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------------
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions
+(3 rows)
+
+-- RLS policy does not apply to table owner when RLS enabled.
+SET SESSION AUTHORIZATION regress_rls_alice;
+SET row_security TO ON;
+SELECT * FROM part_document ORDER by did;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 3 | 99 | 2 | regress_rls_bob | my science textbook
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 5 | 99 | 2 | regress_rls_bob | my history book
+ 6 | 11 | 1 | regress_rls_carol | great science fiction
+ 7 | 99 | 2 | regress_rls_carol | great technology book
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 9 | 11 | 1 | regress_rls_dave | awesome science fiction
+ 10 | 99 | 2 | regress_rls_dave | awesome technology book
+ 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions
+(11 rows)
+
+SELECT * FROM part_document_satire ORDER by did;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------------
+ 4 | 55 | 1 | regress_rls_bob | my first satire
+ 8 | 55 | 2 | regress_rls_carol | great satire
+ 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions
+(3 rows)
+
+-- When RLS disabled, other users get ERROR.
+SET SESSION AUTHORIZATION regress_rls_dave;
+SET row_security TO OFF;
+SELECT * FROM part_document ORDER by did;
+ERROR: query would be affected by row-level security policy for table "part_document"
+SELECT * FROM part_document_satire ORDER by did;
+ERROR: query would be affected by row-level security policy for table "part_document_satire"
+-- Check behavior with a policy that uses a SubPlan not an InitPlan.
+SET SESSION AUTHORIZATION regress_rls_alice;
+SET row_security TO ON;
+CREATE POLICY pp3 ON part_document AS RESTRICTIVE
+ USING ((SELECT dlevel <= seclv FROM uaccount WHERE pguser = current_user));
+SET SESSION AUTHORIZATION regress_rls_carol;
+INSERT INTO part_document VALUES (100, 11, 5, 'regress_rls_carol', 'testing pp3'); -- fail
+ERROR: new row violates row-level security policy "pp3" for table "part_document"
----- Dependencies -----
SET SESSION AUTHORIZATION regress_rls_alice;
SET row_security TO ON;
@@ -3230,6 +3666,7 @@ RESET SESSION AUTHORIZATION;
CREATE ROLE regress_rls_dob_role1;
CREATE ROLE regress_rls_dob_role2;
CREATE TABLE dob_t1 (c1 int);
+CREATE TABLE dob_t2 (c1 int) PARTITION BY RANGE (c1);
CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1 USING (true);
DROP OWNED BY regress_rls_dob_role1;
DROP POLICY p1 ON dob_t1; -- should fail, already gone
@@ -3237,6 +3674,9 @@ ERROR: policy "p1" for table "dob_t1" does not exist
CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true);
DROP OWNED BY regress_rls_dob_role1;
DROP POLICY p1 ON dob_t1; -- should succeed
+CREATE POLICY p1 ON dob_t2 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true);
+DROP OWNED BY regress_rls_dob_role1;
+DROP POLICY p1 ON dob_t2; -- should succeed
DROP USER regress_rls_dob_role1;
DROP USER regress_rls_dob_role2;
--
diff --git a/src/test/regress/expected/subscription.out b/src/test/regress/expected/subscription.out
index 91ba8ab95a..4fcbf7efe9 100644
--- a/src/test/regress/expected/subscription.out
+++ b/src/test/regress/expected/subscription.out
@@ -82,7 +82,7 @@ ERROR: invalid connection string syntax: missing "=" after "foobar" in connecti
testsub | regress_subscription_user | f | {testpub} | off | dbname=doesnotexist
(1 row)
-ALTER SUBSCRIPTION testsub SET PUBLICATION testpub2, testpub3 SKIP REFRESH;
+ALTER SUBSCRIPTION testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false);
ALTER SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist2';
ALTER SUBSCRIPTION testsub SET (slot_name = 'newname');
-- fail
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index 083e7da055..025e56663f 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -1507,6 +1507,35 @@ drop table self_ref_trigger;
drop function self_ref_trigger_ins_func();
drop function self_ref_trigger_del_func();
--
+-- Check that index creation (or DDL in general) is prohibited in a trigger
+--
+create table trigger_ddl_table (
+ col1 integer,
+ col2 integer
+);
+create function trigger_ddl_func() returns trigger as $$
+begin
+ alter table trigger_ddl_table add primary key (col1);
+ return new;
+end$$ language plpgsql;
+create trigger trigger_ddl_func before insert on trigger_ddl_table for each row
+ execute procedure trigger_ddl_func();
+insert into trigger_ddl_table values (1, 42); -- fail
+ERROR: cannot ALTER TABLE "trigger_ddl_table" because it is being used by active queries in this session
+CONTEXT: SQL statement "alter table trigger_ddl_table add primary key (col1)"
+PL/pgSQL function trigger_ddl_func() line 3 at SQL statement
+create or replace function trigger_ddl_func() returns trigger as $$
+begin
+ create index on trigger_ddl_table (col2);
+ return new;
+end$$ language plpgsql;
+insert into trigger_ddl_table values (1, 42); -- fail
+ERROR: cannot CREATE INDEX "trigger_ddl_table" because it is being used by active queries in this session
+CONTEXT: SQL statement "create index on trigger_ddl_table (col2)"
+PL/pgSQL function trigger_ddl_func() line 3 at SQL statement
+drop table trigger_ddl_table;
+drop function trigger_ddl_func();
+--
-- Verify behavior of before and after triggers with INSERT...ON CONFLICT
-- DO UPDATE
--
diff --git a/src/test/regress/expected/tsrf.out b/src/test/regress/expected/tsrf.out
index c8ae361e75..b691abe714 100644
--- a/src/test/regress/expected/tsrf.out
+++ b/src/test/regress/expected/tsrf.out
@@ -41,6 +41,11 @@ SELECT generate_series(1, generate_series(1, 3));
3
(6 rows)
+-- but we've traditionally rejected the same in FROM
+SELECT * FROM generate_series(1, generate_series(1, 3));
+ERROR: set-returning functions must appear at top level of FROM
+LINE 1: SELECT * FROM generate_series(1, generate_series(1, 3));
+ ^
-- srf, with two SRF arguments
SELECT generate_series(generate_series(1,3), generate_series(2, 4));
generate_series
@@ -190,16 +195,29 @@ SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa, unnest
a | 4
(2 rows)
+-- SRFs are not allowed if they'd need to be conditionally executed
+SELECT q1, case when q1 > 0 then generate_series(1,3) else 0 end FROM int8_tbl;
+ERROR: set-returning functions are not allowed in CASE
+LINE 1: SELECT q1, case when q1 > 0 then generate_series(1,3) else 0...
+ ^
+HINT: You might be able to move the set-returning function into a LATERAL FROM item.
+SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl;
+ERROR: set-returning functions are not allowed in COALESCE
+LINE 1: SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl;
+ ^
+HINT: You might be able to move the set-returning function into a LATERAL FROM item.
-- SRFs are not allowed in aggregate arguments
SELECT min(generate_series(1, 3)) FROM few;
-ERROR: set-valued function called in context that cannot accept a set
+ERROR: aggregate function calls cannot contain set-returning function calls
LINE 1: SELECT min(generate_series(1, 3)) FROM few;
^
+HINT: You might be able to move the set-returning function into a LATERAL FROM item.
-- SRFs are not allowed in window function arguments, either
SELECT min(generate_series(1, 3)) OVER() FROM few;
-ERROR: set-valued function called in context that cannot accept a set
+ERROR: window function calls cannot contain set-returning function calls
LINE 1: SELECT min(generate_series(1, 3)) OVER() FROM few;
^
+HINT: You might be able to move the set-returning function into a LATERAL FROM item.
-- SRFs are normally computed after window functions
SELECT id,lag(id) OVER(), count(*) OVER(), generate_series(1,3) FROM few;
id | lag | count | generate_series
@@ -425,9 +443,17 @@ SELECT int4mul(generate_series(1,2), 10);
20
(2 rows)
+SELECT generate_series(1,3) IS DISTINCT FROM 2;
+ ?column?
+----------
+ t
+ f
+ t
+(3 rows)
+
-- but SRFs in function RTEs must be at top level (annoying restriction)
SELECT * FROM int4mul(generate_series(1,2), 10);
-ERROR: set-valued function called in context that cannot accept a set
+ERROR: set-returning functions must appear at top level of FROM
LINE 1: SELECT * FROM int4mul(generate_series(1,2), 10);
^
-- DISTINCT ON is evaluated before tSRF evaluation if SRF is not
diff --git a/src/test/regress/expected/updatable_views.out b/src/test/regress/expected/updatable_views.out
index f77e246f17..97aeee83e3 100644
--- a/src/test/regress/expected/updatable_views.out
+++ b/src/test/regress/expected/updatable_views.out
@@ -2426,6 +2426,42 @@ alter table pt11 add a int not null;
alter table pt1 attach partition pt11 for values from (2) to (5);
alter table pt attach partition pt1 for values from (1, 2) to (1, 10);
create view ptv as select * from pt;
+select events & 4 != 0 AS upd,
+ events & 8 != 0 AS ins,
+ events & 16 != 0 AS del
+ from pg_catalog.pg_relation_is_updatable('pt'::regclass, false) t(events);
+ upd | ins | del
+-----+-----+-----
+ t | t | t
+(1 row)
+
+select pg_catalog.pg_column_is_updatable('pt'::regclass, 1::smallint, false);
+ pg_column_is_updatable
+------------------------
+ t
+(1 row)
+
+select pg_catalog.pg_column_is_updatable('pt'::regclass, 2::smallint, false);
+ pg_column_is_updatable
+------------------------
+ t
+(1 row)
+
+select table_name, is_updatable, is_insertable_into
+ from information_schema.views where table_name = 'ptv';
+ table_name | is_updatable | is_insertable_into
+------------+--------------+--------------------
+ ptv | YES | YES
+(1 row)
+
+select table_name, column_name, is_updatable
+ from information_schema.columns where table_name = 'ptv' order by column_name;
+ table_name | column_name | is_updatable
+------------+-------------+--------------
+ ptv | a | YES
+ ptv | b | YES
+(2 rows)
+
insert into ptv values (1, 2);
select tableoid::regclass, * from pt;
tableoid | a | b
diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql
index be155dacf4..3d2bccddf8 100644
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -2169,9 +2169,14 @@ ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
-- delete the faulting row and also add a constraint to skip the scan
DELETE FROM part_5_a WHERE a NOT IN (3);
-ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)), ALTER a SET NOT NULL;
+ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 5);
ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
+ALTER TABLE list_parted2 DETACH PARTITION part_5;
+ALTER TABLE part_5 DROP CONSTRAINT check_a;
+-- scan should again be skipped, even though NOT NULL is now a column property
+ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)), ALTER a SET NOT NULL;
+ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
-- check that the table being attached is not already a partition
ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
@@ -2204,6 +2209,16 @@ SELECT attinhcount, attislocal FROM pg_attribute WHERE attrelid = 'part_3_4'::re
SELECT coninhcount, conislocal FROM pg_constraint WHERE conrelid = 'part_3_4'::regclass AND conname = 'check_a';
DROP TABLE part_3_4;
+-- check that a detached partition is not dropped on dropping a partitioned table
+CREATE TABLE range_parted2 (
+ a int
+) PARTITION BY RANGE(a);
+CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100);
+ALTER TABLE range_parted2 DETACH PARTITION part_rp;
+DROP TABLE range_parted2;
+SELECT * from part_rp;
+DROP TABLE part_rp;
+
-- Check ALTER TABLE commands for partitioned tables and partitions
-- cannot add/drop column to/from *only* the parent
diff --git a/src/test/regress/sql/insert.sql b/src/test/regress/sql/insert.sql
index ffcf5bf821..80e1da4be7 100644
--- a/src/test/regress/sql/insert.sql
+++ b/src/test/regress/sql/insert.sql
@@ -332,3 +332,13 @@ select tableoid::regclass::text, * from mcrparted order by 1;
-- cleanup
drop table mcrparted;
+
+-- check that a BR constraint can't make partition contain violating rows
+create table brtrigpartcon (a int, b text) partition by list (a);
+create table brtrigpartcon1 partition of brtrigpartcon for values in (1);
+create or replace function brtrigpartcon1trigf() returns trigger as $$begin new.a := 2; return new; end$$ language plpgsql;
+create trigger brtrigpartcon1trig before insert on brtrigpartcon1 for each row execute procedure brtrigpartcon1trigf();
+insert into brtrigpartcon values (1, 'hi there');
+insert into brtrigpartcon1 values (1, 'hi there');
+drop table brtrigpartcon;
+drop function brtrigpartcon1trigf();
diff --git a/src/test/regress/sql/plpgsql.sql b/src/test/regress/sql/plpgsql.sql
index 7bc74fbebf..f5cc3265ae 100644
--- a/src/test/regress/sql/plpgsql.sql
+++ b/src/test/regress/sql/plpgsql.sql
@@ -4875,3 +4875,42 @@ ALTER TABLE alter_table_under_transition_tables
DROP column name;
UPDATE alter_table_under_transition_tables
SET id = id;
+
+--
+-- Check type parsing and record fetching from partitioned tables
+--
+
+CREATE TABLE partitioned_table (a int, b text) PARTITION BY LIST (a);
+CREATE TABLE pt_part1 PARTITION OF partitioned_table FOR VALUES IN (1);
+CREATE TABLE pt_part2 PARTITION OF partitioned_table FOR VALUES IN (2);
+
+INSERT INTO partitioned_table VALUES (1, 'Row 1');
+INSERT INTO partitioned_table VALUES (2, 'Row 2');
+
+CREATE OR REPLACE FUNCTION get_from_partitioned_table(partitioned_table.a%type)
+RETURNS partitioned_table AS $$
+DECLARE
+ a_val partitioned_table.a%TYPE;
+ result partitioned_table%ROWTYPE;
+BEGIN
+ a_val := $1;
+ SELECT * INTO result FROM partitioned_table WHERE a = a_val;
+ RETURN result;
+END; $$ LANGUAGE plpgsql;
+
+SELECT * FROM get_from_partitioned_table(1) AS t;
+
+CREATE OR REPLACE FUNCTION list_partitioned_table()
+RETURNS SETOF partitioned_table.a%TYPE AS $$
+DECLARE
+ row partitioned_table%ROWTYPE;
+ a_val partitioned_table.a%TYPE;
+BEGIN
+ FOR row IN SELECT * FROM partitioned_table ORDER BY a LOOP
+ a_val := row.a;
+ RETURN NEXT a_val;
+ END LOOP;
+ RETURN;
+END; $$ LANGUAGE plpgsql;
+
+SELECT * FROM list_partitioned_table() AS t;
diff --git a/src/test/regress/sql/rangefuncs.sql b/src/test/regress/sql/rangefuncs.sql
index ece8609b4f..7f0cf9eb91 100644
--- a/src/test/regress/sql/rangefuncs.sql
+++ b/src/test/regress/sql/rangefuncs.sql
@@ -600,15 +600,6 @@ select * from foobar(); -- fail
drop function foobar();
--- check behavior when a function's input sometimes returns a set (bug #8228)
-
-SELECT *,
- lower(CASE WHEN id = 2 THEN (regexp_matches(str, '^0*([1-9]\d+)$'))[1]
- ELSE str
- END)
-FROM
- (VALUES (1,''), (2,'0000000049404'), (3,'FROM 10000000876')) v(id, str);
-
-- check whole-row-Var handling in nested lateral functions (bug #11703)
create function extractq2(t int8_tbl) returns int8 as $$
diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql
index 46a54797a4..2d23d64ed8 100644
--- a/src/test/regress/sql/rowsecurity.sql
+++ b/src/test/regress/sql/rowsecurity.sql
@@ -312,6 +312,157 @@ SET row_security TO OFF;
SELECT * FROM t1 WHERE f_leak(b);
EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b);
+--
+-- Partitioned Tables
+--
+
+SET SESSION AUTHORIZATION regress_rls_alice;
+
+CREATE TABLE part_document (
+ did int,
+ cid int,
+ dlevel int not null,
+ dauthor name,
+ dtitle text
+) PARTITION BY RANGE (cid);
+GRANT ALL ON part_document TO public;
+
+-- Create partitions for document categories
+CREATE TABLE part_document_fiction PARTITION OF part_document FOR VALUES FROM (11) to (12);
+CREATE TABLE part_document_satire PARTITION OF part_document FOR VALUES FROM (55) to (56);
+CREATE TABLE part_document_nonfiction PARTITION OF part_document FOR VALUES FROM (99) to (100);
+
+GRANT ALL ON part_document_fiction TO public;
+GRANT ALL ON part_document_satire TO public;
+GRANT ALL ON part_document_nonfiction TO public;
+
+INSERT INTO part_document VALUES
+ ( 1, 11, 1, 'regress_rls_bob', 'my first novel'),
+ ( 2, 11, 2, 'regress_rls_bob', 'my second novel'),
+ ( 3, 99, 2, 'regress_rls_bob', 'my science textbook'),
+ ( 4, 55, 1, 'regress_rls_bob', 'my first satire'),
+ ( 5, 99, 2, 'regress_rls_bob', 'my history book'),
+ ( 6, 11, 1, 'regress_rls_carol', 'great science fiction'),
+ ( 7, 99, 2, 'regress_rls_carol', 'great technology book'),
+ ( 8, 55, 2, 'regress_rls_carol', 'great satire'),
+ ( 9, 11, 1, 'regress_rls_dave', 'awesome science fiction'),
+ (10, 99, 2, 'regress_rls_dave', 'awesome technology book');
+
+ALTER TABLE part_document ENABLE ROW LEVEL SECURITY;
+
+-- Create policy on parent
+-- user's security level must be higher than or equal to document's
+CREATE POLICY pp1 ON part_document AS PERMISSIVE
+ USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user));
+
+-- Dave is only allowed to see cid < 55
+CREATE POLICY pp1r ON part_document AS RESTRICTIVE TO regress_rls_dave
+ USING (cid < 55);
+
+\d+ part_document
+SELECT * FROM pg_policies WHERE schemaname = 'regress_rls_schema' AND tablename like '%part_document%' ORDER BY policyname;
+
+-- viewpoint from regress_rls_bob
+SET SESSION AUTHORIZATION regress_rls_bob;
+SET row_security TO ON;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+
+-- viewpoint from regress_rls_carol
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+
+-- viewpoint from regress_rls_dave
+SET SESSION AUTHORIZATION regress_rls_dave;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+
+-- pp1 ERROR
+INSERT INTO part_document VALUES (100, 11, 5, 'regress_rls_dave', 'testing pp1'); -- fail
+-- pp1r ERROR
+INSERT INTO part_document VALUES (100, 99, 1, 'regress_rls_dave', 'testing pp1r'); -- fail
+
+-- Show that RLS policy does not apply for direct inserts to children
+-- This should fail with RLS POLICY pp1r violation.
+INSERT INTO part_document VALUES (100, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- fail
+-- But this should succeed.
+INSERT INTO part_document_satire VALUES (100, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- success
+-- We still cannot see the row using the parent
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+-- But we can if we look directly
+SELECT * FROM part_document_satire WHERE f_leak(dtitle) ORDER BY did;
+
+-- Turn on RLS and create policy on child to show RLS is checked before constraints
+SET SESSION AUTHORIZATION regress_rls_alice;
+ALTER TABLE part_document_satire ENABLE ROW LEVEL SECURITY;
+CREATE POLICY pp3 ON part_document_satire AS RESTRICTIVE
+ USING (cid < 55);
+-- This should fail with RLS violation now.
+SET SESSION AUTHORIZATION regress_rls_dave;
+INSERT INTO part_document_satire VALUES (101, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- fail
+-- And now we cannot see directly into the partition either, due to RLS
+SELECT * FROM part_document_satire WHERE f_leak(dtitle) ORDER BY did;
+-- The parent looks same as before
+-- viewpoint from regress_rls_dave
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+
+-- viewpoint from regress_rls_carol
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+
+-- only owner can change policies
+ALTER POLICY pp1 ON part_document USING (true); --fail
+DROP POLICY pp1 ON part_document; --fail
+
+SET SESSION AUTHORIZATION regress_rls_alice;
+ALTER POLICY pp1 ON part_document USING (dauthor = current_user);
+
+-- viewpoint from regress_rls_bob again
+SET SESSION AUTHORIZATION regress_rls_bob;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+
+-- viewpoint from rls_regres_carol again
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did;
+
+EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle);
+
+-- database superuser does bypass RLS policy when enabled
+RESET SESSION AUTHORIZATION;
+SET row_security TO ON;
+SELECT * FROM part_document ORDER BY did;
+SELECT * FROM part_document_satire ORDER by did;
+
+-- database non-superuser with bypass privilege can bypass RLS policy when disabled
+SET SESSION AUTHORIZATION regress_rls_exempt_user;
+SET row_security TO OFF;
+SELECT * FROM part_document ORDER BY did;
+SELECT * FROM part_document_satire ORDER by did;
+
+-- RLS policy does not apply to table owner when RLS enabled.
+SET SESSION AUTHORIZATION regress_rls_alice;
+SET row_security TO ON;
+SELECT * FROM part_document ORDER by did;
+SELECT * FROM part_document_satire ORDER by did;
+
+-- When RLS disabled, other users get ERROR.
+SET SESSION AUTHORIZATION regress_rls_dave;
+SET row_security TO OFF;
+SELECT * FROM part_document ORDER by did;
+SELECT * FROM part_document_satire ORDER by did;
+
+-- Check behavior with a policy that uses a SubPlan not an InitPlan.
+SET SESSION AUTHORIZATION regress_rls_alice;
+SET row_security TO ON;
+CREATE POLICY pp3 ON part_document AS RESTRICTIVE
+ USING ((SELECT dlevel <= seclv FROM uaccount WHERE pguser = current_user));
+
+SET SESSION AUTHORIZATION regress_rls_carol;
+INSERT INTO part_document VALUES (100, 11, 5, 'regress_rls_carol', 'testing pp3'); -- fail
+
----- Dependencies -----
SET SESSION AUTHORIZATION regress_rls_alice;
SET row_security TO ON;
@@ -1593,6 +1744,7 @@ CREATE ROLE regress_rls_dob_role1;
CREATE ROLE regress_rls_dob_role2;
CREATE TABLE dob_t1 (c1 int);
+CREATE TABLE dob_t2 (c1 int) PARTITION BY RANGE (c1);
CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1 USING (true);
DROP OWNED BY regress_rls_dob_role1;
@@ -1602,6 +1754,10 @@ CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role2 USING
DROP OWNED BY regress_rls_dob_role1;
DROP POLICY p1 ON dob_t1; -- should succeed
+CREATE POLICY p1 ON dob_t2 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true);
+DROP OWNED BY regress_rls_dob_role1;
+DROP POLICY p1 ON dob_t2; -- should succeed
+
DROP USER regress_rls_dob_role1;
DROP USER regress_rls_dob_role2;
diff --git a/src/test/regress/sql/subscription.sql b/src/test/regress/sql/subscription.sql
index 4b694a357e..36fa1bbac8 100644
--- a/src/test/regress/sql/subscription.sql
+++ b/src/test/regress/sql/subscription.sql
@@ -61,7 +61,7 @@ ALTER SUBSCRIPTION testsub CONNECTION 'foobar';
\dRs+
-ALTER SUBSCRIPTION testsub SET PUBLICATION testpub2, testpub3 SKIP REFRESH;
+ALTER SUBSCRIPTION testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false);
ALTER SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist2';
ALTER SUBSCRIPTION testsub SET (slot_name = 'newname');
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index a3942a247a..5d95794ebf 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -1184,6 +1184,37 @@ drop function self_ref_trigger_ins_func();
drop function self_ref_trigger_del_func();
--
+-- Check that index creation (or DDL in general) is prohibited in a trigger
+--
+
+create table trigger_ddl_table (
+ col1 integer,
+ col2 integer
+);
+
+create function trigger_ddl_func() returns trigger as $$
+begin
+ alter table trigger_ddl_table add primary key (col1);
+ return new;
+end$$ language plpgsql;
+
+create trigger trigger_ddl_func before insert on trigger_ddl_table for each row
+ execute procedure trigger_ddl_func();
+
+insert into trigger_ddl_table values (1, 42); -- fail
+
+create or replace function trigger_ddl_func() returns trigger as $$
+begin
+ create index on trigger_ddl_table (col2);
+ return new;
+end$$ language plpgsql;
+
+insert into trigger_ddl_table values (1, 42); -- fail
+
+drop table trigger_ddl_table;
+drop function trigger_ddl_func();
+
+--
-- Verify behavior of before and after triggers with INSERT...ON CONFLICT
-- DO UPDATE
--
diff --git a/src/test/regress/sql/tsrf.sql b/src/test/regress/sql/tsrf.sql
index 417e78c53d..0be7530beb 100644
--- a/src/test/regress/sql/tsrf.sql
+++ b/src/test/regress/sql/tsrf.sql
@@ -14,6 +14,9 @@ SELECT generate_series(1, 2), generate_series(1,4);
-- srf, with SRF argument
SELECT generate_series(1, generate_series(1, 3));
+-- but we've traditionally rejected the same in FROM
+SELECT * FROM generate_series(1, generate_series(1, 3));
+
-- srf, with two SRF arguments
SELECT generate_series(generate_series(1,3), generate_series(2, 4));
@@ -51,6 +54,10 @@ SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1, 2 HAVING count
SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa ORDER BY 2;
SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa, unnest('{1,1,3}'::int[]) ORDER BY 2;
+-- SRFs are not allowed if they'd need to be conditionally executed
+SELECT q1, case when q1 > 0 then generate_series(1,3) else 0 end FROM int8_tbl;
+SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl;
+
-- SRFs are not allowed in aggregate arguments
SELECT min(generate_series(1, 3)) FROM few;
@@ -91,6 +98,7 @@ VALUES(1, generate_series(1,2));
-- We allow tSRFs that are not at top level
SELECT int4mul(generate_series(1,2), 10);
+SELECT generate_series(1,3) IS DISTINCT FROM 2;
-- but SRFs in function RTEs must be at top level (annoying restriction)
SELECT * FROM int4mul(generate_series(1,2), 10);
diff --git a/src/test/regress/sql/updatable_views.sql b/src/test/regress/sql/updatable_views.sql
index f5620bf327..a1d0ccfb99 100644
--- a/src/test/regress/sql/updatable_views.sql
+++ b/src/test/regress/sql/updatable_views.sql
@@ -1125,6 +1125,16 @@ alter table pt1 attach partition pt11 for values from (2) to (5);
alter table pt attach partition pt1 for values from (1, 2) to (1, 10);
create view ptv as select * from pt;
+select events & 4 != 0 AS upd,
+ events & 8 != 0 AS ins,
+ events & 16 != 0 AS del
+ from pg_catalog.pg_relation_is_updatable('pt'::regclass, false) t(events);
+select pg_catalog.pg_column_is_updatable('pt'::regclass, 1::smallint, false);
+select pg_catalog.pg_column_is_updatable('pt'::regclass, 2::smallint, false);
+select table_name, is_updatable, is_insertable_into
+ from information_schema.views where table_name = 'ptv';
+select table_name, column_name, is_updatable
+ from information_schema.columns where table_name = 'ptv' order by column_name;
insert into ptv values (1, 2);
select tableoid::regclass, * from pt;
create view ptv_wco as select * from pt where a = 0 with check option;
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
index e5638d3322..f9cf5e4392 100644
--- a/src/test/subscription/t/001_rep_changes.pl
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -143,7 +143,7 @@ $oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
);
$node_subscriber->safe_psql('postgres',
-"ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)"
+"ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only WITH (copy_data = false)"
);
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index 70cd23b888..fc71ebe7ad 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -220,6 +220,10 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c
{
print $o "#define ENABLE_GSS 1\n";
}
+ if ($self->{options}->{icu})
+ {
+ print $o "#define USE_ICU 1\n";
+ }
if (my $port = $self->{options}->{"--with-pgport"})
{
print $o "#undef DEF_PGPORT\n";
@@ -523,10 +527,20 @@ sub AddProject
if ($self->{options}->{openssl})
{
$proj->AddIncludeDir($self->{options}->{openssl} . '\include');
- $proj->AddLibrary(
- $self->{options}->{openssl} . '\lib\VC\ssleay32.lib', 1);
- $proj->AddLibrary(
- $self->{options}->{openssl} . '\lib\VC\libeay32.lib', 1);
+ if (-e "$self->{options}->{openssl}/lib/VC/ssleay32MD.lib")
+ {
+ $proj->AddLibrary(
+ $self->{options}->{openssl} . '\lib\VC\ssleay32.lib', 1);
+ $proj->AddLibrary(
+ $self->{options}->{openssl} . '\lib\VC\libeay32.lib', 1);
+ }
+ else
+ {
+ $proj->AddLibrary(
+ $self->{options}->{openssl} . '\lib\ssleay32.lib', 1);
+ $proj->AddLibrary(
+ $self->{options}->{openssl} . '\lib\libeay32.lib', 1);
+ }
}
if ($self->{options}->{nls})
{
@@ -545,6 +559,22 @@ sub AddProject
$proj->AddIncludeDir($self->{options}->{iconv} . '\include');
$proj->AddLibrary($self->{options}->{iconv} . '\lib\iconv.lib');
}
+ if ($self->{options}->{icu})
+ {
+ $proj->AddIncludeDir($self->{options}->{icu} . '\include');
+ if ($self->{platform} eq 'Win32')
+ {
+ $proj->AddLibrary($self->{options}->{icu} . '\lib\icuin.lib');
+ $proj->AddLibrary($self->{options}->{icu} . '\lib\icuuc.lib');
+ $proj->AddLibrary($self->{options}->{icu} . '\lib\icudt.lib');
+ }
+ else
+ {
+ $proj->AddLibrary($self->{options}->{icu} . '\lib64\icuin.lib');
+ $proj->AddLibrary($self->{options}->{icu} . '\lib64\icuuc.lib');
+ $proj->AddLibrary($self->{options}->{icu} . '\lib64\icudt.lib');
+ }
+ }
if ($self->{options}->{xml})
{
$proj->AddIncludeDir($self->{options}->{xml} . '\include');
@@ -667,6 +697,7 @@ sub GetFakeConfigure
$cfg .= ' --with-libxml' if ($self->{options}->{xml});
$cfg .= ' --with-libxslt' if ($self->{options}->{xslt});
$cfg .= ' --with-gssapi' if ($self->{options}->{gss});
+ $cfg .= ' --with-icu' if ($self->{options}->{icu});
$cfg .= ' --with-tcl' if ($self->{options}->{tcl});
$cfg .= ' --with-perl' if ($self->{options}->{perl});
$cfg .= ' --with-python' if ($self->{options}->{python});
diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl
index 93f7887075..4d69dc2a2e 100644
--- a/src/tools/msvc/config_default.pl
+++ b/src/tools/msvc/config_default.pl
@@ -15,6 +15,7 @@ our $config = {
ldap => 1, # --with-ldap
extraver => undef, # --with-extra-version=<string>
gss => undef, # --with-gssapi=<path>
+ icu => undef, # --with-icu=<path>
nls => undef, # --enable-nls=<path>
tap_tests => undef, # --enable-tap-tests
tcl => undef, # --with-tls=<path>
diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl
index 468a62d8aa..eeba30ec8b 100644
--- a/src/tools/msvc/vcregress.pl
+++ b/src/tools/msvc/vcregress.pl
@@ -178,12 +178,18 @@ sub tap_check
die "Tap tests not enabled in configuration"
unless $config->{tap_tests};
+ my @flags;
+ foreach my $arg (0 .. scalar(@_))
+ {
+ next unless $_[$arg] =~ /^PROVE_FLAGS=(.*)/;
+ @flags = split(/\s+/, $1);
+ splice(@_,$arg,1);
+ last;
+ }
+
my $dir = shift;
chdir $dir;
- my @flags;
- @flags = split(/\s+/, $ENV{PROVE_FLAGS}) if exists $ENV{PROVE_FLAGS};
-
my @args = ("prove", @flags, "t/*.pl");
# adjust the environment for just this test