summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBernd Helmle2011-04-21 14:56:30 +0000
committerBernd Helmle2011-04-21 14:56:30 +0000
commitb112eb8096d2f96cf74de1ad1bd3b5d637fd38f8 (patch)
tree9c4e8a8862bd095decec2fec99dd7d55952490a8
parent640c5d6ad1583f06b14d108e7e162c5c70275867 (diff)
parenta0e8df527ec24e8dba98f295c0e2ab6ccf3e5d2c (diff)
Merge branch 'master' of ../bernd_pg into notnull_constraint
Conflicts: src/backend/commands/tablecmds.c
-rw-r--r--.gitignore6
-rw-r--r--contrib/pg_test_fsync/pg_test_fsync.c2
-rw-r--r--contrib/pg_upgrade/check.c48
-rw-r--r--doc/src/sgml/install-windows.sgml25
-rw-r--r--doc/src/sgml/plpgsql.sgml32
-rw-r--r--doc/src/sgml/ref/alter_table.sgml26
-rw-r--r--doc/src/sgml/ref/alter_type.sgml2
-rw-r--r--doc/src/sgml/ref/create_type.sgml9
-rw-r--r--src/backend/access/index/indexam.c12
-rw-r--r--src/backend/access/transam/xlog.c6
-rw-r--r--src/backend/bootstrap/bootstrap.c2
-rw-r--r--src/backend/catalog/aclchk.c2
-rw-r--r--src/backend/catalog/dependency.c13
-rw-r--r--src/backend/catalog/heap.c2
-rw-r--r--src/backend/catalog/index.c153
-rw-r--r--src/backend/commands/cluster.c12
-rw-r--r--src/backend/commands/indexcmds.c6
-rw-r--r--src/backend/commands/tablecmds.c304
-rw-r--r--src/backend/commands/variable.c30
-rw-r--r--src/backend/nodes/copyfuncs.c1
-rw-r--r--src/backend/nodes/equalfuncs.c1
-rw-r--r--src/backend/nodes/outfuncs.c1
-rw-r--r--src/backend/nodes/readfuncs.c1
-rw-r--r--src/backend/optimizer/path/costsize.c6
-rw-r--r--src/backend/optimizer/plan/planner.c29
-rw-r--r--src/backend/optimizer/plan/setrefs.c1
-rw-r--r--src/backend/optimizer/prep/prepjointree.c2
-rw-r--r--src/backend/optimizer/prep/prepunion.c131
-rw-r--r--src/backend/optimizer/util/tlist.c34
-rw-r--r--src/backend/parser/analyze.c125
-rw-r--r--src/backend/parser/gram.y17
-rw-r--r--src/backend/parser/parse_relation.c24
-rw-r--r--src/backend/parser/parse_utilcmd.c7
-rw-r--r--src/backend/storage/lmgr/lock.c2
-rw-r--r--src/backend/utils/mb/mbutils.c12
-rw-r--r--src/bin/initdb/initdb.c111
-rw-r--r--src/bin/pg_dump/pg_dump.c68
-rw-r--r--src/bin/psql/describe.c34
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/catalog/index.h12
-rw-r--r--src/include/commands/tablecmds.h2
-rw-r--r--src/include/nodes/parsenodes.h5
-rw-r--r--src/include/optimizer/tlist.h1
-rw-r--r--src/include/parser/parse_relation.h1
-rw-r--r--src/interfaces/ecpg/compatlib/.gitignore1
-rw-r--r--src/interfaces/ecpg/ecpglib/.gitignore1
-rw-r--r--src/interfaces/ecpg/pgtypeslib/.gitignore1
-rw-r--r--src/interfaces/libpq/.gitignore2
-rw-r--r--src/interfaces/libpq/fe-connect.c2
-rw-r--r--src/pl/plpgsql/src/gram.y47
-rw-r--r--src/pl/plpgsql/src/pl_scanner.c1
-rw-r--r--src/pl/plpython/expected/README3
-rw-r--r--src/pl/plpython/expected/plpython_unicode.out1
-rw-r--r--src/pl/plpython/expected/plpython_unicode_0.out50
-rw-r--r--src/pl/plpython/expected/plpython_unicode_3.out1
-rw-r--r--src/pl/plpython/plpython.c24
-rw-r--r--src/pl/plpython/sql/plpython_unicode.sql2
-rw-r--r--src/test/regress/expected/alter_table.out149
-rw-r--r--src/test/regress/expected/collate.linux.utf8.out46
-rw-r--r--src/test/regress/expected/collate.out8
-rw-r--r--src/test/regress/expected/typed_table.out2
-rw-r--r--src/test/regress/pg_regress.c8
-rw-r--r--src/test/regress/sql/alter_table.sql34
-rw-r--r--src/test/regress/sql/collate.linux.utf8.sql28
-rw-r--r--src/test/regress/sql/collate.sql3
-rw-r--r--src/test/regress/sql/typed_table.sql2
-rw-r--r--src/tools/msvc/config_default.pl2
-rw-r--r--src/tools/msvc/vcregress.pl11
68 files changed, 1368 insertions, 383 deletions
diff --git a/.gitignore b/.gitignore
index 3f11f2e8aa..81c4d5e862 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,8 +17,14 @@ objfiles.txt
*.gcov
*.gcov.out
lcov.info
+*.vcproj
+win32ver.rc
# Local excludes in root directory
/GNUmakefile
/config.log
/config.status
+/pgsql.sln
+/pgsql.sln.cache
+/Debug/
+/Release/
diff --git a/contrib/pg_test_fsync/pg_test_fsync.c b/contrib/pg_test_fsync/pg_test_fsync.c
index 305b3d0723..2b2e292022 100644
--- a/contrib/pg_test_fsync/pg_test_fsync.c
+++ b/contrib/pg_test_fsync/pg_test_fsync.c
@@ -359,9 +359,11 @@ test_open_syncs(void)
static void
test_open_sync(const char *msg, int writes_size)
{
+#ifdef OPEN_SYNC_FLAG
int tmpfile,
ops,
writes;
+#endif
printf(LABEL_FORMAT, msg);
fflush(stdout);
diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c
index 747244072d..d1dc5dbeaa 100644
--- a/contrib/pg_upgrade/check.c
+++ b/contrib/pg_upgrade/check.c
@@ -11,7 +11,8 @@
static void set_locale_and_encoding(ClusterInfo *cluster);
-static void check_new_db_is_empty(void);
+static void check_new_cluster_is_empty(void);
+static void check_old_cluster_has_new_cluster_dbs(void);
static void check_locale_and_encoding(ControlData *oldctrl,
ControlData *newctrl);
static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster);
@@ -112,7 +113,10 @@ check_new_cluster(void)
{
set_locale_and_encoding(&new_cluster);
- check_new_db_is_empty();
+ get_db_and_rel_infos(&new_cluster);
+
+ check_new_cluster_is_empty();
+ check_old_cluster_has_new_cluster_dbs();
check_loadable_libraries();
@@ -341,12 +345,9 @@ check_locale_and_encoding(ControlData *oldctrl,
static void
-check_new_db_is_empty(void)
+check_new_cluster_is_empty(void)
{
int dbnum;
- bool found = false;
-
- get_db_and_rel_infos(&new_cluster);
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
{
@@ -358,15 +359,38 @@ check_new_db_is_empty(void)
{
/* pg_largeobject and its index should be skipped */
if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
- {
- found = true;
- break;
- }
+ pg_log(PG_FATAL, "New cluster database \"%s\" is not empty\n",
+ new_cluster.dbarr.dbs[dbnum].db_name);
}
}
- if (found)
- pg_log(PG_FATAL, "New cluster is not empty; exiting\n");
+}
+
+
+/*
+ * If someone removes the 'postgres' database from the old cluster and
+ * the new cluster has a 'postgres' database, the number of databases
+ * will not match. We actually could upgrade such a setup, but it would
+ * violate the 1-to-1 mapping of database counts, so we throw an error
+ * instead. We would detect this as a database count mismatch during
+ * upgrade, but we want to detect it during the check phase and report
+ * the database name.
+ */
+static void
+check_old_cluster_has_new_cluster_dbs(void)
+{
+ int old_dbnum, new_dbnum;
+
+ for (new_dbnum = 0; new_dbnum < new_cluster.dbarr.ndbs; new_dbnum++)
+ {
+ for (old_dbnum = 0; old_dbnum < old_cluster.dbarr.ndbs; old_dbnum++)
+ if (strcmp(old_cluster.dbarr.dbs[old_dbnum].db_name,
+ new_cluster.dbarr.dbs[new_dbnum].db_name) == 0)
+ break;
+ if (old_dbnum == old_cluster.dbarr.ndbs)
+ pg_log(PG_FATAL, "New cluster database \"%s\" does not exist in the old cluster\n",
+ new_cluster.dbarr.dbs[new_dbnum].db_name);
+ }
}
diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml
index f6d38c1a67..3c9d90ef33 100644
--- a/doc/src/sgml/install-windows.sgml
+++ b/doc/src/sgml/install-windows.sgml
@@ -19,7 +19,7 @@
<para>
There are several different ways of building PostgreSQL on
<productname>Windows</productname>. The simplest way to build with
- Microsoft tools is to install a modern version of the
+ Microsoft tools is to install a supported version of the
<productname>Microsoft Platform SDK</productname> and use use the included
compiler. It is also possible to build with the full
<productname>Microsoft Visual C++ 2005 or 2008</productname>. In some cases
@@ -74,7 +74,7 @@
<para>
PostgreSQL can be built using the Visual C++ compiler suite from Microsoft.
These compilers can be either from <productname>Visual Studio</productname>,
- <productname>Visual Studio Express</productname> or recent versions of the
+ <productname>Visual Studio Express</productname> or some versions of the
<productname>Platform SDK</productname>. If you do not already have a
<productname>Visual Studio</productname> environment set up, the easiest
way us to use the compilers in the <productname>Platform SDK</productname>,
@@ -87,6 +87,14 @@
<productname>Visual Studio 2008</productname>. When using the Platform SDK
only, or when building for 64-bit Windows, only
<productname>Visual Studio 2008</productname> is supported.
+ <productname>Visual Studio 2010</productname> is not yet supported.
+ </para>
+
+ <para>
+ When building using the <productname>Platform SDK</productname>, versions
+ 6.0 to 7.0 of the SDK are supported. Older or newer versions will not work.
+ In particular, versions from 7.0a and later will not work, since
+ they include compilers from <productname>Visual Studio 2010</productname>.
</para>
<para>
@@ -193,7 +201,18 @@ $ENV{PATH}=$ENV{PATH} . ';c:\some\where\bison\bin';
Bison can be downloaded from <ulink url="http://gnuwin32.sourceforge.net"></>.
Flex can be downloaded from
<ulink url="http://www.postgresql.org/ftp/misc/winflex/"></>.
- </para></listitem>
+ </para>
+
+ <note>
+ <para>
+ The Bison distribution from GnuWin32 appears to have a bug that
+ causes Bison to malfunction when installed in a directory with
+ spaces in the name, such as the default location on English
+ installations <filename>C:\Program Files\GnuWin32</filename>.
+ Consider installing into <filename>C:\GnuWin32</filename> instead.
+ </para>
+ </note>
+ </listitem>
</varlistentry>
<varlistentry>
diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml
index a04ab13912..1866e43e0e 100644
--- a/doc/src/sgml/plpgsql.sgml
+++ b/doc/src/sgml/plpgsql.sgml
@@ -328,15 +328,17 @@ arow RECORD;
<para>
The general syntax of a variable declaration is:
<synopsis>
-<replaceable>name</replaceable> <optional> CONSTANT </optional> <replaceable>type</replaceable> <optional> NOT NULL </optional> <optional> { DEFAULT | := } <replaceable>expression</replaceable> </optional>;
+<replaceable>name</replaceable> <optional> CONSTANT </optional> <replaceable>type</replaceable> <optional> COLLATE <replaceable>collation_name</replaceable> </optional> <optional> NOT NULL </optional> <optional> { DEFAULT | := } <replaceable>expression</replaceable> </optional>;
</synopsis>
The <literal>DEFAULT</> clause, if given, specifies the initial value assigned
to the variable when the block is entered. If the <literal>DEFAULT</> clause
is not given then the variable is initialized to the
<acronym>SQL</acronym> null value.
The <literal>CONSTANT</> option prevents the variable from being
- assigned to, so that its value will remain constant for the duration of
- the block.
+ assigned to after initialization, so that its value will remain constant
+ for the duration of the block.
+ The <literal>COLLATE</> option specifies a collation to use for the
+ variable (see <xref linkend="plpgsql-declaration-collation">).
If <literal>NOT NULL</>
is specified, an assignment of a null value results in a run-time
error. All variables declared as <literal>NOT NULL</>
@@ -768,9 +770,23 @@ $$ LANGUAGE plpgsql;
</para>
<para>
- Explicit <literal>COLLATE</> clauses can be written inside a function
- if it is desired to force a particular collation to be used regardless
- of what the function is called with. For example,
+ A local variable of a collatable data type can have a different collation
+ associated with it by including the <literal>COLLATE</> option in its
+ declaration, for example
+
+<programlisting>
+DECLARE
+ local_a text COLLATE "en_US";
+</programlisting>
+
+ This option overrides the collation that would otherwise be
+ given to the variable according to the rules above.
+ </para>
+
+ <para>
+ Also, of course explicit <literal>COLLATE</> clauses can be written inside
+ a function if it is desired to force a particular collation to be used in
+ a particular operation. For example,
<programlisting>
CREATE FUNCTION less_than_c(a text, b text) RETURNS boolean AS $$
@@ -779,6 +795,10 @@ BEGIN
END;
$$ LANGUAGE plpgsql;
</programlisting>
+
+ This overrides the collations associated with the table columns,
+ parameters, or local variables used in the expression, just as would
+ happen in a plain SQL command.
</para>
</sect2>
</sect1>
diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index c1948624d7..4e02438483 100644
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -63,6 +63,8 @@ ALTER TABLE <replaceable class="PARAMETER">name</replaceable>
RESET ( <replaceable class="PARAMETER">storage_parameter</replaceable> [, ... ] )
INHERIT <replaceable class="PARAMETER">parent_table</replaceable>
NO INHERIT <replaceable class="PARAMETER">parent_table</replaceable>
+ OF <replaceable class="PARAMETER">type_name</replaceable>
+ NOT OF
OWNER TO <replaceable class="PARAMETER">new_owner</replaceable>
SET TABLESPACE <replaceable class="PARAMETER">new_tablespace</replaceable>
@@ -491,6 +493,30 @@ ALTER TABLE <replaceable class="PARAMETER">name</replaceable>
</varlistentry>
<varlistentry>
+ <term><literal>OF <replaceable class="PARAMETER">type_name</replaceable></literal></term>
+ <listitem>
+ <para>
+ This form links the table to a composite type as though <command>CREATE
+ TABLE OF</> had formed it. The table's list of column names and types
+ must precisely match that of the composite type; the presence of
+ an <literal>oid</> system column is permitted to differ. The table must
+ not inherit from any other table. These restrictions ensure
+ that <command>CREATE TABLE OF</> would permit an equivalent table
+ definition.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>NOT OF</literal></term>
+ <listitem>
+ <para>
+ This form dissociates a typed table from its type.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><literal>OWNER</literal></term>
<listitem>
<para>
diff --git a/doc/src/sgml/ref/alter_type.sgml b/doc/src/sgml/ref/alter_type.sgml
index e889ffbc35..a417c0d01b 100644
--- a/doc/src/sgml/ref/alter_type.sgml
+++ b/doc/src/sgml/ref/alter_type.sgml
@@ -122,7 +122,7 @@ ALTER TYPE <replaceable class="PARAMETER">name</replaceable> ADD VALUE <replacea
<listitem>
<para>
Automatically propagate the operation to typed tables of the
- type being altered.
+ type being altered, and their descendants.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/create_type.sgml b/doc/src/sgml/ref/create_type.sgml
index f5527d8981..98e1764b1e 100644
--- a/doc/src/sgml/ref/create_type.sgml
+++ b/doc/src/sgml/ref/create_type.sgml
@@ -22,7 +22,7 @@ PostgreSQL documentation
<refsynopsisdiv>
<synopsis>
CREATE TYPE <replaceable class="parameter">name</replaceable> AS
- ( [ <replaceable class="PARAMETER">attribute_name</replaceable> <replaceable class="PARAMETER">data_type</replaceable> [, ... ] ] )
+ ( [ <replaceable class="PARAMETER">attribute_name</replaceable> <replaceable class="PARAMETER">data_type</replaceable> [ COLLATE <replaceable>collation</replaceable> ] [, ... ] ] )
CREATE TYPE <replaceable class="parameter">name</replaceable> AS ENUM
( [ '<replaceable class="parameter">label</replaceable>' [, ... ] ] )
@@ -77,11 +77,12 @@ CREATE TYPE <replaceable class="parameter">name</replaceable>
The first form of <command>CREATE TYPE</command>
creates a composite type.
The composite type is specified by a list of attribute names and data types.
- This is essentially the same as the row type
+ An attribute's collation can be specified too, if its data type is
+ collatable. A composite type is essentially the same as the row type
of a table, but using <command>CREATE TYPE</command> avoids the need to
create an actual table when all that is wanted is to define a type.
- A stand-alone composite type is useful as the argument or return type of a
- function.
+ A stand-alone composite type is useful, for example, as the argument or
+ return type of a function.
</para>
</refsect2>
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 6e0db79517..08de8b4f88 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -65,6 +65,7 @@
#include "access/relscan.h"
#include "access/transam.h"
#include "access/xact.h"
+#include "catalog/index.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
@@ -76,12 +77,21 @@
/* ----------------------------------------------------------------
* macros used in index_ routines
+ *
+ * Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
+ * to check that we don't try to scan or do retail insertions into an index
+ * that is currently being rebuilt or pending rebuild. This helps to catch
+ * things that don't work when reindexing system catalogs. The assertion
+ * doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
+ * when calling the index AM's ambuild routine, and there is no reason for
+ * ambuild to call its subsidiary routines through this file.
* ----------------------------------------------------------------
*/
#define RELATION_CHECKS \
( \
AssertMacro(RelationIsValid(indexRelation)), \
- AssertMacro(PointerIsValid(indexRelation->rd_am)) \
+ AssertMacro(PointerIsValid(indexRelation->rd_am)), \
+ AssertMacro(!ReindexIsProcessingIndex(RelationGetRelid(indexRelation))) \
)
#define SCAN_CHECKS \
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 742d8551f6..b0e4c41d6f 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -5583,21 +5583,21 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
if (record->xl_rmid != RM_XACT_ID && record->xl_rmid != RM_XLOG_ID)
return false;
record_info = record->xl_info & ~XLR_INFO_MASK;
- if (record_info == XLOG_XACT_COMMIT)
+ if (record->xl_rmid == RM_XACT_ID && record_info == XLOG_XACT_COMMIT)
{
xl_xact_commit *recordXactCommitData;
recordXactCommitData = (xl_xact_commit *) XLogRecGetData(record);
recordXtime = recordXactCommitData->xact_time;
}
- else if (record_info == XLOG_XACT_ABORT)
+ else if (record->xl_rmid == RM_XACT_ID && record_info == XLOG_XACT_ABORT)
{
xl_xact_abort *recordXactAbortData;
recordXactAbortData = (xl_xact_abort *) XLogRecGetData(record);
recordXtime = recordXactAbortData->xact_time;
}
- else if (record_info == XLOG_RESTORE_POINT)
+ else if (record->xl_rmid == RM_XLOG_ID && record_info == XLOG_RESTORE_POINT)
{
xl_restore_point *recordRestorePointData;
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 528ea23d4c..fc093ccc4b 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -1134,7 +1134,7 @@ build_indices(void)
heap = heap_open(ILHead->il_heap, NoLock);
ind = index_open(ILHead->il_ind, NoLock);
- index_build(heap, ind, ILHead->il_info, false);
+ index_build(heap, ind, ILHead->il_info, false, false);
index_close(ind, NoLock);
heap_close(heap, NoLock);
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 693b634398..db58ec29f6 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -1299,7 +1299,7 @@ RemoveRoleFromObjectACL(Oid roleid, Oid classid, Oid objid)
case DEFACLOBJ_RELATION:
iacls.objtype = ACL_OBJECT_RELATION;
break;
- case ACL_OBJECT_SEQUENCE:
+ case DEFACLOBJ_SEQUENCE:
iacls.objtype = ACL_OBJECT_SEQUENCE;
break;
case DEFACLOBJ_FUNCTION:
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index ec9bb48c63..b3ed946530 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -1671,10 +1671,15 @@ find_expr_references_walker(Node *node,
/*
* Add whole-relation refs for each plain relation mentioned in the
* subquery's rtable, as well as refs for any datatypes and collations
- * used in a RECORD function's output. (Note: query_tree_walker takes
- * care of recursing into RTE_FUNCTION RTEs, subqueries, etc, so no
- * need to do that here. But keep it from looking at join alias
- * lists.)
+ * used in a RECORD function's output.
+ *
+ * Note: query_tree_walker takes care of recursing into RTE_FUNCTION
+ * RTEs, subqueries, etc, so no need to do that here. But keep it
+ * from looking at join alias lists.
+ *
+ * Note: we don't need to worry about collations mentioned in
+ * RTE_VALUES or RTE_CTE RTEs, because those must just duplicate
+ * collations referenced in other parts of the Query.
*/
foreach(lc, query->rtable)
{
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 1f074d6853..a63e0aa655 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -2613,7 +2613,7 @@ RelationTruncateIndexes(Relation heapRelation)
/* Initialize the index and rebuild */
/* Note: we do not need to re-establish pkey setting */
- index_build(heapRelation, currentIndex, indexInfo, false);
+ index_build(heapRelation, currentIndex, indexInfo, false, true);
/* We're done with this index */
index_close(currentIndex, NoLock);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 1bf74b3d4f..c79402c72a 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1061,7 +1061,7 @@ index_create(Relation heapRelation,
}
else
{
- index_build(heapRelation, indexRelation, indexInfo, isprimary);
+ index_build(heapRelation, indexRelation, indexInfo, isprimary, false);
}
/*
@@ -1680,8 +1680,11 @@ index_update_stats(Relation rel,
* entries of the index and heap relation as needed, using statistics
* returned by ambuild as well as data passed by the caller.
*
- * Note: when reindexing an existing index, isprimary can be false;
- * the index is already properly marked and need not be re-marked.
+ * isprimary tells whether to mark the index as a primary-key index.
+ * isreindex indicates we are recreating a previously-existing index.
+ *
+ * Note: when reindexing an existing index, isprimary can be false even if
+ * the index is a PK; it's already properly marked and need not be re-marked.
*
* Note: before Postgres 8.2, the passed-in heap and index Relations
* were automatically closed by this routine. This is no longer the case.
@@ -1691,7 +1694,8 @@ void
index_build(Relation heapRelation,
Relation indexRelation,
IndexInfo *indexInfo,
- bool isprimary)
+ bool isprimary,
+ bool isreindex)
{
RegProcedure procedure;
IndexBuildResult *stats;
@@ -1762,8 +1766,15 @@ index_build(Relation heapRelation,
* If we found any potentially broken HOT chains, mark the index as not
* being usable until the current transaction is below the event horizon.
* See src/backend/access/heap/README.HOT for discussion.
- */
- if (indexInfo->ii_BrokenHotChain)
+ *
+ * However, when reindexing an existing index, we should do nothing here.
+ * Any HOT chains that are broken with respect to the index must predate
+ * the index's original creation, so there is no need to change the
+ * index's usability horizon. Moreover, we *must not* try to change
+ * the index's pg_index entry while reindexing pg_index itself, and this
+ * optimization nicely prevents that.
+ */
+ if (indexInfo->ii_BrokenHotChain && !isreindex)
{
Oid indexId = RelationGetRelid(indexRelation);
Relation pg_index;
@@ -1778,6 +1789,9 @@ index_build(Relation heapRelation,
elog(ERROR, "cache lookup failed for index %u", indexId);
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
+ /* If it's a new index, indcheckxmin shouldn't be set ... */
+ Assert(!indexForm->indcheckxmin);
+
indexForm->indcheckxmin = true;
simple_heap_update(pg_index, &indexTuple->t_self, indexTuple);
CatalogUpdateIndexes(pg_index, indexTuple);
@@ -1826,8 +1840,9 @@ index_build(Relation heapRelation,
*
* A side effect is to set indexInfo->ii_BrokenHotChain to true if we detect
* any potentially broken HOT chains. Currently, we set this if there are
- * any RECENTLY_DEAD entries in a HOT chain, without trying very hard to
- * detect whether they're really incompatible with the chain tip.
+ * any RECENTLY_DEAD or DELETE_IN_PROGRESS entries in a HOT chain, without
+ * trying very hard to detect whether they're really incompatible with the
+ * chain tip.
*/
double
IndexBuildHeapScan(Relation heapRelation,
@@ -1939,8 +1954,14 @@ IndexBuildHeapScan(Relation heapRelation,
* buffer continuously while visiting the page, so no pruning
* operation can occur either.
*
+ * Also, although our opinions about tuple liveness could change while
+ * we scan the page (due to concurrent transaction commits/aborts),
+ * the chain root locations won't, so this info doesn't need to be
+ * rebuilt after waiting for another transaction.
+ *
* Note the implied assumption that there is no more than one live
- * tuple per HOT-chain ...
+ * tuple per HOT-chain --- else we could create more than one index
+ * entry pointing to the same root tuple.
*/
if (scan->rs_cblock != root_blkno)
{
@@ -1994,11 +2015,6 @@ IndexBuildHeapScan(Relation heapRelation,
* the live tuple at the end of the HOT-chain. Since this
* breaks semantics for pre-existing snapshots, mark the
* index as unusable for them.
- *
- * If we've already decided that the index will be unsafe
- * for old snapshots, we may as well stop indexing
- * recently-dead tuples, since there's no longer any
- * point.
*/
if (HeapTupleIsHotUpdated(heapTuple))
{
@@ -2006,8 +2022,6 @@ IndexBuildHeapScan(Relation heapRelation,
/* mark the index as unsafe for old snapshots */
indexInfo->ii_BrokenHotChain = true;
}
- else if (indexInfo->ii_BrokenHotChain)
- indexIt = false;
else
indexIt = true;
/* In any case, exclude the tuple from unique-checking */
@@ -2057,7 +2071,8 @@ IndexBuildHeapScan(Relation heapRelation,
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
- * Similar situation to INSERT_IN_PROGRESS case.
+ * As with INSERT_IN_PROGRESS case, this is unexpected
+ * unless it's our own deletion or a system catalog.
*/
Assert(!(heapTuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI));
xwait = HeapTupleHeaderGetXmax(heapTuple->t_data);
@@ -2072,8 +2087,17 @@ IndexBuildHeapScan(Relation heapRelation,
* the tuple is dead could lead to missing a
* uniqueness violation. In that case we wait for the
* deleting transaction to finish and check again.
+ *
+ * Also, if it's a HOT-updated tuple, we should not
+ * index it but rather the live tuple at the end of
+ * the HOT-chain. However, the deleting transaction
+ * could abort, possibly leaving this tuple as live
+ * after all, in which case it has to be indexed. The
+ * only way to know what to do is to wait for the
+ * deleting transaction to finish and check again.
*/
- if (checking_uniqueness)
+ if (checking_uniqueness ||
+ HeapTupleIsHotUpdated(heapTuple))
{
/*
* Must drop the lock on the buffer before we wait
@@ -2082,22 +2106,34 @@ IndexBuildHeapScan(Relation heapRelation,
XactLockTableWait(xwait);
goto recheck;
}
- }
- /*
- * Otherwise, we have to treat these tuples just like
- * RECENTLY_DELETED ones.
- */
- if (HeapTupleIsHotUpdated(heapTuple))
+ /*
+ * Otherwise index it but don't check for uniqueness,
+ * the same as a RECENTLY_DEAD tuple.
+ */
+ indexIt = true;
+ }
+ else if (HeapTupleIsHotUpdated(heapTuple))
{
+ /*
+ * It's a HOT-updated tuple deleted by our own xact.
+ * We can assume the deletion will commit (else the
+ * index contents don't matter), so treat the same
+ * as RECENTLY_DEAD HOT-updated tuples.
+ */
indexIt = false;
/* mark the index as unsafe for old snapshots */
indexInfo->ii_BrokenHotChain = true;
}
- else if (indexInfo->ii_BrokenHotChain)
- indexIt = false;
else
+ {
+ /*
+ * It's a regular tuple deleted by our own xact.
+ * Index it but don't check for uniqueness, the same
+ * as a RECENTLY_DEAD tuple.
+ */
indexIt = true;
+ }
/* In any case, exclude the tuple from unique-checking */
tupleIsAlive = false;
break;
@@ -2767,7 +2803,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
/* Initialize the index and rebuild */
/* Note: we do not need to re-establish pkey setting */
- index_build(heapRelation, iRel, indexInfo, false);
+ index_build(heapRelation, iRel, indexInfo, false, true);
}
PG_CATCH();
{
@@ -2786,7 +2822,22 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
*
* We can also reset indcheckxmin, because we have now done a
* non-concurrent index build, *except* in the case where index_build
- * found some still-broken HOT chains.
+ * found some still-broken HOT chains. If it did, we normally leave
+ * indcheckxmin alone (note that index_build won't have changed it,
+ * because this is a reindex). But if the index was invalid or not ready
+ * and there were broken HOT chains, it seems best to force indcheckxmin
+ * true, because the normal argument that the HOT chains couldn't conflict
+ * with the index is suspect for an invalid index.
+ *
+ * Note that it is important to not update the pg_index entry if we don't
+ * have to, because updating it will move the index's usability horizon
+ * (recorded as the tuple's xmin value) if indcheckxmin is true. We don't
+ * really want REINDEX to move the usability horizon forward ever, but we
+ * have no choice if we are to fix indisvalid or indisready. Of course,
+ * clearing indcheckxmin eliminates the issue, so we're happy to do that
+ * if we can. Another reason for caution here is that while reindexing
+ * pg_index itself, we must not try to update it. We assume that
+ * pg_index's indexes will always have these flags in their clean state.
*/
if (!skipped_constraint)
{
@@ -2801,10 +2852,12 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
if (!indexForm->indisvalid || !indexForm->indisready ||
(indexForm->indcheckxmin && !indexInfo->ii_BrokenHotChain))
{
- indexForm->indisvalid = true;
- indexForm->indisready = true;
if (!indexInfo->ii_BrokenHotChain)
indexForm->indcheckxmin = false;
+ else if (!indexForm->indisvalid || !indexForm->indisready)
+ indexForm->indcheckxmin = true;
+ indexForm->indisvalid = true;
+ indexForm->indisready = true;
simple_heap_update(pg_index, &indexTuple->t_self, indexTuple);
CatalogUpdateIndexes(pg_index, indexTuple);
}
@@ -2821,29 +2874,33 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
* reindex_relation - This routine is used to recreate all indexes
* of a relation (and optionally its toast relation too, if any).
*
- * "flags" can include REINDEX_SUPPRESS_INDEX_USE and REINDEX_CHECK_CONSTRAINTS.
+ * "flags" is a bitmask that can include any combination of these bits:
+ *
+ * REINDEX_REL_PROCESS_TOAST: if true, process the toast table too (if any).
*
- * If flags has REINDEX_SUPPRESS_INDEX_USE, the relation was just completely
+ * REINDEX_REL_SUPPRESS_INDEX_USE: if true, the relation was just completely
* rebuilt by an operation such as VACUUM FULL or CLUSTER, and therefore its
* indexes are inconsistent with it. This makes things tricky if the relation
* is a system catalog that we might consult during the reindexing. To deal
* with that case, we mark all of the indexes as pending rebuild so that they
* won't be trusted until rebuilt. The caller is required to call us *without*
- * having made the rebuilt versions visible by doing CommandCounterIncrement;
+ * having made the rebuilt table visible by doing CommandCounterIncrement;
* we'll do CCI after having collected the index list. (This way we can still
* use catalog indexes while collecting the list.)
*
- * To avoid deadlocks, VACUUM FULL or CLUSTER on a system catalog must omit the
- * REINDEX_CHECK_CONSTRAINTS flag. REINDEX should be used to rebuild an index
- * if constraint inconsistency is suspected. For optimal performance, other
- * callers should include the flag only after transforming the data in a manner
- * that risks a change in constraint validity.
+ * REINDEX_REL_CHECK_CONSTRAINTS: if true, recheck unique and exclusion
+ * constraint conditions, else don't. To avoid deadlocks, VACUUM FULL or
+ * CLUSTER on a system catalog must omit this flag. REINDEX should be used to
+ * rebuild an index if constraint inconsistency is suspected. For optimal
+ * performance, other callers should include the flag only after transforming
+ * the data in a manner that risks a change in constraint validity.
*
- * Returns true if any indexes were rebuilt. Note that a
- * CommandCounterIncrement will occur after each index rebuild.
+ * Returns true if any indexes were rebuilt (including toast table's index
+ * when relevant). Note that a CommandCounterIncrement will occur after each
+ * index rebuild.
*/
bool
-reindex_relation(Oid relid, bool toast_too, int flags)
+reindex_relation(Oid relid, int flags)
{
Relation rel;
Oid toast_relid;
@@ -2899,7 +2956,7 @@ reindex_relation(Oid relid, bool toast_too, int flags)
List *doneIndexes;
ListCell *indexId;
- if (flags & REINDEX_SUPPRESS_INDEX_USE)
+ if (flags & REINDEX_REL_SUPPRESS_INDEX_USE)
{
/* Suppress use of all the indexes until they are rebuilt */
SetReindexPending(indexIds);
@@ -2920,11 +2977,11 @@ reindex_relation(Oid relid, bool toast_too, int flags)
if (is_pg_class)
RelationSetIndexList(rel, doneIndexes, InvalidOid);
- reindex_index(indexOid, !(flags & REINDEX_CHECK_CONSTRAINTS));
+ reindex_index(indexOid, !(flags & REINDEX_REL_CHECK_CONSTRAINTS));
CommandCounterIncrement();
- if (flags & REINDEX_SUPPRESS_INDEX_USE)
+ if (flags & REINDEX_REL_SUPPRESS_INDEX_USE)
RemoveReindexPending(indexOid);
if (is_pg_class)
@@ -2952,12 +3009,10 @@ reindex_relation(Oid relid, bool toast_too, int flags)
/*
* If the relation has a secondary toast rel, reindex that too while we
- * still hold the lock on the master table. There's never a reason to
- * reindex the toast table right after rebuilding the heap.
+ * still hold the lock on the master table.
*/
- Assert(!(toast_too && (flags & REINDEX_SUPPRESS_INDEX_USE)));
- if (toast_too && OidIsValid(toast_relid))
- result |= reindex_relation(toast_relid, false, flags);
+ if ((flags & REINDEX_REL_PROCESS_TOAST) && OidIsValid(toast_relid))
+ result |= reindex_relation(toast_relid, flags);
return result;
}
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 2cc2aaa8f6..ff228b7d53 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -1398,11 +1398,17 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
* advantage to the other order anyway because this is all transactional,
* so no chance to reclaim disk space before commit. We do not need a
* final CommandCounterIncrement() because reindex_relation does it.
+ *
+ * Note: because index_build is called via reindex_relation, it will never
+ * set indcheckxmin true for the indexes. This is OK even though in some
+ * sense we are building new indexes rather than rebuilding existing ones,
+ * because the new heap won't contain any HOT chains at all, let alone
+ * broken ones, so it can't be necessary to set indcheckxmin.
*/
- reindex_flags = REINDEX_SUPPRESS_INDEX_USE;
+ reindex_flags = REINDEX_REL_SUPPRESS_INDEX_USE;
if (check_constraints)
- reindex_flags |= REINDEX_CHECK_CONSTRAINTS;
- reindex_relation(OIDOldHeap, false, reindex_flags);
+ reindex_flags |= REINDEX_REL_CHECK_CONSTRAINTS;
+ reindex_relation(OIDOldHeap, reindex_flags);
/* Destroy new heap with old filenode */
object.classId = RelationRelationId;
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 05e8234a0f..53a6aafbbf 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -505,7 +505,7 @@ DefineIndex(RangeVar *heapRelation,
indexInfo->ii_BrokenHotChain = false;
/* Now build the index */
- index_build(rel, indexRelation, indexInfo, primary);
+ index_build(rel, indexRelation, indexInfo, primary, false);
/* Close both the relations, but keep the locks */
heap_close(rel, NoLock);
@@ -1566,7 +1566,7 @@ ReindexTable(RangeVar *relation)
ReleaseSysCache(tuple);
- if (!reindex_relation(heapOid, true, 0))
+ if (!reindex_relation(heapOid, REINDEX_REL_PROCESS_TOAST))
ereport(NOTICE,
(errmsg("table \"%s\" has no indexes",
relation->relname)));
@@ -1679,7 +1679,7 @@ ReindexDatabase(const char *databaseName, bool do_system, bool do_user)
StartTransactionCommand();
/* functions in indexes may want a snapshot set */
PushActiveSnapshot(GetTransactionSnapshot());
- if (reindex_relation(relid, true, 0))
+ if (reindex_relation(relid, REINDEX_REL_PROCESS_TOAST))
ereport(NOTICE,
(errmsg("table \"%s.%s\" was reindexed",
get_namespace_name(get_rel_namespace(relid)),
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index e122a10618..29b539e185 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -81,6 +81,7 @@
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
+#include "utils/typcache.h"
/*
@@ -390,6 +391,9 @@ static void ATExecEnableDisableRule(Relation rel, char *rulename,
static void ATPrepAddInherit(Relation child_rel);
static void ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode);
static void ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode);
+static void drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid);
+static void ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode);
+static void ATExecDropOf(Relation rel, LOCKMODE lockmode);
static void ATExecGenericOptions(Relation rel, List *options);
static void copy_relation_data(SMgrRelation rel, SMgrRelation dst,
@@ -1184,7 +1188,7 @@ ExecuteTruncate(TruncateStmt *stmt)
/*
* Reconstruct the indexes to match, and we're done.
*/
- reindex_relation(heap_relid, true, 0);
+ reindex_relation(heap_relid, REINDEX_REL_PROCESS_TOAST);
}
}
@@ -1548,7 +1552,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
/*
* Yes, try to merge the two column definitions. They must
- * have the same type and typmod.
+ * have the same type, typmod, and collation.
*/
ereport(NOTICE,
(errmsg("merging multiple inherited definitions of column \"%s\"",
@@ -1789,7 +1793,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
/*
* Yes, try to merge the two column definitions. They must
- * have the same type and typmod.
+ * have the same type, typmod, and collation.
*/
ereport(NOTICE,
(errmsg("merging column \"%s\" with inherited definition",
@@ -2992,6 +2996,16 @@ AlterTableGetLockLevel(List *cmds)
break;
/*
+ * These subcommands affect implicit row type conversion. They
+ * have affects similar to CREATE/DROP CAST on queries. We
+ * don't provide for invalidating parse trees as a result of
+ * such changes. Do avoid concurrent pg_class updates, though.
+ */
+ case AT_AddOf:
+ case AT_DropOf:
+ cmd_lockmode = ShareUpdateExclusiveLock;
+
+ /*
* These subcommands affect general strategies for performance
* and maintenance, though don't change the semantic results
* from normal data reads and writes. Delaying an ALTER TABLE
@@ -3258,13 +3272,11 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
case AT_EnableAlwaysRule:
case AT_EnableReplicaRule:
case AT_DisableRule:
- ATSimplePermissions(rel, ATT_TABLE);
- /* These commands never recurse */
- /* No command-specific prep needed */
- pass = AT_PASS_MISC;
- break;
case AT_DropInherit: /* NO INHERIT */
+ case AT_AddOf: /* OF */
+ case AT_DropOf: /* NOT OF */
ATSimplePermissions(rel, ATT_TABLE);
+ /* These commands never recurse */
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
@@ -3535,6 +3547,12 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
case AT_DropInherit:
ATExecDropInherit(rel, (RangeVar *) cmd->def, lockmode);
break;
+ case AT_AddOf:
+ ATExecAddOf(rel, (TypeName *) cmd->def, lockmode);
+ break;
+ case AT_DropOf:
+ ATExecDropOf(rel, lockmode);
+ break;
case AT_GenericOptions:
ATExecGenericOptions(rel, (List *) cmd->def);
break;
@@ -4183,7 +4201,8 @@ ATSimpleRecursion(List **wqueue, Relation rel,
* ATTypedTableRecursion
*
* Propagate ALTER TYPE operations to the typed tables of that type.
- * Also check the RESTRICT/CASCADE behavior.
+ * Also check the RESTRICT/CASCADE behavior. Given CASCADE, also permit
+ * recursion to inheritance children of the typed tables.
*/
static void
ATTypedTableRecursion(List **wqueue, Relation rel, AlterTableCmd *cmd,
@@ -4205,7 +4224,7 @@ ATTypedTableRecursion(List **wqueue, Relation rel, AlterTableCmd *cmd,
childrel = relation_open(childrelid, lockmode);
CheckTableNotInUse(childrel, "ALTER TABLE");
- ATPrepCmd(wqueue, childrel, cmd, false, true, lockmode);
+ ATPrepCmd(wqueue, childrel, cmd, true, true, lockmode);
relation_close(childrel, NoLock);
}
}
@@ -4371,6 +4390,42 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
/*
+ * check_of_type
+ *
+ * Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it
+ * isn't suitable, throw an error. Currently, we require that the type
+ * originated with CREATE TYPE AS. We could support any row type, but doing so
+ * would require handling a number of extra corner cases in the DDL commands.
+ */
+void
+check_of_type(HeapTuple typetuple)
+{
+ Form_pg_type typ = (Form_pg_type) GETSTRUCT(typetuple);
+ bool typeOk = false;
+
+ if (typ->typtype == TYPTYPE_COMPOSITE)
+ {
+ Relation typeRelation;
+
+ Assert(OidIsValid(typ->typrelid));
+ typeRelation = relation_open(typ->typrelid, AccessShareLock);
+ typeOk = (typeRelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE);
+ /*
+ * Close the parent rel, but keep our AccessShareLock on it until xact
+ * commit. That will prevent someone else from deleting or ALTERing
+ * the type before the typed table creation/conversion commits.
+ */
+ relation_close(typeRelation, NoLock);
+ }
+ if (!typeOk)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("type %s is not a composite type",
+ format_type_be(HeapTupleGetOid(typetuple)))));
+}
+
+
+/*
* ALTER TABLE ADD COLUMN
*
* Adds an additional attribute to a relation making the assumption that
@@ -4449,7 +4504,7 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
bool new_constraint;
Oid ccollid;
- /* Child column must match by type */
+ /* Child column must match on type, typmod, and collation */
typenameTypeIdAndMod(NULL, colDef->typeName, &ctypeId, &ctypmod);
if (ctypeId != childatt->atttypid ||
ctypmod != childatt->atttypmod)
@@ -9175,7 +9230,7 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel,
attributeName);
if (HeapTupleIsValid(tuple))
{
- /* Check they are same type and typmod */
+ /* Check they are same type, typmod, and collation */
Form_pg_attribute childatt = (Form_pg_attribute) GETSTRUCT(tuple);
if (attribute->atttypid != childatt->atttypid ||
@@ -9186,6 +9241,17 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel,
RelationGetRelationName(child_rel),
attributeName)));
+ if (attribute->attcollation != childatt->attcollation)
+ ereport(ERROR,
+ (errcode(ERRCODE_COLLATION_MISMATCH),
+ errmsg("child table \"%s\" has different collation for column \"%s\"",
+ RelationGetRelationName(child_rel),
+ attributeName)));
+
+ /*
+ * Check child doesn't discard NOT NULL property. (Other
+ * constraints are checked elsewhere.)
+ */
if (attribute->attnotnull && !childatt->attnotnull)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
@@ -9376,9 +9442,9 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
ScanKeyData key[3];
HeapTuple inheritsTuple,
attributeTuple,
- constraintTuple,
- depTuple;
+ constraintTuple;
List *constraints;
+ List *connames;
bool found = false;
/*
@@ -9595,11 +9661,29 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
systable_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
- /*
- * Drop the dependency
- *
- * There's no convenient way to do this, so go trawling through pg_depend
- */
+ drop_parent_dependency(RelationGetRelid(rel),
+ RelationRelationId,
+ RelationGetRelid(parent_rel));
+
+ /* keep our lock on the parent relation until commit */
+ heap_close(parent_rel, NoLock);
+}
+
+/*
+ * Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
+ * INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
+ * heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
+ * be TypeRelationId). There's no convenient way to do this, so go trawling
+ * through pg_depend.
+ */
+static void
+drop_parent_dependency(Oid relid, Oid refclassid, Oid refobjid)
+{
+ Relation catalogRelation;
+ SysScanDesc scan;
+ ScanKeyData key[3];
+ HeapTuple depTuple;
+
catalogRelation = heap_open(DependRelationId, RowExclusiveLock);
ScanKeyInit(&key[0],
@@ -9609,7 +9693,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
ScanKeyInit(&key[1],
Anum_pg_depend_objid,
BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(rel)));
+ ObjectIdGetDatum(relid));
ScanKeyInit(&key[2],
Anum_pg_depend_objsubid,
BTEqualStrategyNumber, F_INT4EQ,
@@ -9622,8 +9706,8 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
{
Form_pg_depend dep = (Form_pg_depend) GETSTRUCT(depTuple);
- if (dep->refclassid == RelationRelationId &&
- dep->refobjid == RelationGetRelid(parent_rel) &&
+ if (dep->refclassid == refclassid &&
+ dep->refobjid == refobjid &&
dep->refobjsubid == 0 &&
dep->deptype == DEPENDENCY_NORMAL)
simple_heap_delete(catalogRelation, &depTuple->t_self);
@@ -9631,9 +9715,181 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
systable_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
+}
- /* keep our lock on the parent relation until commit */
- heap_close(parent_rel, NoLock);
+/*
+ * ALTER TABLE OF
+ *
+ * Attach a table to a composite type, as though it had been created with CREATE
+ * TABLE OF. All attname, atttypid, atttypmod and attcollation must match. The
+ * subject table must not have inheritance parents. These restrictions ensure
+ * that you cannot create a configuration impossible with CREATE TABLE OF alone.
+ */
+static void
+ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
+{
+ Oid relid = RelationGetRelid(rel);
+ Type typetuple;
+ Form_pg_type typ;
+ Oid typeid;
+ Relation inheritsRelation,
+ relationRelation;
+ SysScanDesc scan;
+ ScanKeyData key;
+ AttrNumber table_attno,
+ type_attno;
+ TupleDesc typeTupleDesc,
+ tableTupleDesc;
+ ObjectAddress tableobj,
+ typeobj;
+ HeapTuple classtuple;
+
+ /* Validate the type. */
+ typetuple = typenameType(NULL, ofTypename, NULL);
+ check_of_type(typetuple);
+ typ = (Form_pg_type) GETSTRUCT(typetuple);
+ typeid = HeapTupleGetOid(typetuple);
+
+ /* Fail if the table has any inheritance parents. */
+ inheritsRelation = heap_open(InheritsRelationId, AccessShareLock);
+ ScanKeyInit(&key,
+ Anum_pg_inherits_inhrelid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(relid));
+ scan = systable_beginscan(inheritsRelation, InheritsRelidSeqnoIndexId,
+ true, SnapshotNow, 1, &key);
+ if (HeapTupleIsValid(systable_getnext(scan)))
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("typed tables cannot inherit")));
+ systable_endscan(scan);
+ heap_close(inheritsRelation, AccessShareLock);
+
+ /*
+ * Check the tuple descriptors for compatibility. Unlike inheritance, we
+ * require that the order also match. However, attnotnull need not match.
+ * Also unlike inheritance, we do not require matching relhasoids.
+ */
+ typeTupleDesc = lookup_rowtype_tupdesc(typeid, -1);
+ tableTupleDesc = RelationGetDescr(rel);
+ table_attno = 1;
+ for (type_attno = 1; type_attno <= typeTupleDesc->natts; type_attno++)
+ {
+ Form_pg_attribute type_attr,
+ table_attr;
+ const char *type_attname,
+ *table_attname;
+
+ /* Get the next non-dropped type attribute. */
+ type_attr = typeTupleDesc->attrs[type_attno - 1];
+ if (type_attr->attisdropped)
+ continue;
+ type_attname = NameStr(type_attr->attname);
+
+ /* Get the next non-dropped table attribute. */
+ do
+ {
+ if (table_attno > tableTupleDesc->natts)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("table is missing column \"%s\"",
+ type_attname)));
+ table_attr = tableTupleDesc->attrs[table_attno++ - 1];
+ } while (table_attr->attisdropped);
+ table_attname = NameStr(table_attr->attname);
+
+ /* Compare name. */
+ if (strncmp(table_attname, type_attname, NAMEDATALEN) != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("table has column \"%s\" where type requires \"%s\"",
+ table_attname, type_attname)));
+
+ /* Compare type. */
+ if (table_attr->atttypid != type_attr->atttypid ||
+ table_attr->atttypmod != type_attr->atttypmod ||
+ table_attr->attcollation != type_attr->attcollation)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("table \"%s\" has different type for column \"%s\"",
+ RelationGetRelationName(rel), type_attname)));
+ }
+ DecrTupleDescRefCount(typeTupleDesc);
+
+ /* Any remaining columns at the end of the table had better be dropped. */
+ for (; table_attno <= tableTupleDesc->natts; table_attno++)
+ {
+ Form_pg_attribute table_attr = tableTupleDesc->attrs[table_attno - 1];
+ if (!table_attr->attisdropped)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("table has extra column \"%s\"",
+ NameStr(table_attr->attname))));
+ }
+
+ /* If the table was already typed, drop the existing dependency. */
+ if (rel->rd_rel->reloftype)
+ drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);
+
+ /* Record a dependency on the new type. */
+ tableobj.classId = RelationRelationId;
+ tableobj.objectId = relid;
+ tableobj.objectSubId = 0;
+ typeobj.classId = TypeRelationId;
+ typeobj.objectId = typeid;
+ typeobj.objectSubId = 0;
+ recordDependencyOn(&tableobj, &typeobj, DEPENDENCY_NORMAL);
+
+ /* Update pg_class.reloftype */
+ relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
+ classtuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid));
+ if (!HeapTupleIsValid(classtuple))
+ elog(ERROR, "cache lookup failed for relation %u", relid);
+ ((Form_pg_class) GETSTRUCT(classtuple))->reloftype = typeid;
+ simple_heap_update(relationRelation, &classtuple->t_self, classtuple);
+ CatalogUpdateIndexes(relationRelation, classtuple);
+ heap_freetuple(classtuple);
+ heap_close(relationRelation, RowExclusiveLock);
+
+ ReleaseSysCache(typetuple);
+}
+
+/*
+ * ALTER TABLE NOT OF
+ *
+ * Detach a typed table from its originating type. Just clear reloftype and
+ * remove the dependency.
+ */
+static void
+ATExecDropOf(Relation rel, LOCKMODE lockmode)
+{
+ Oid relid = RelationGetRelid(rel);
+ Relation relationRelation;
+ HeapTuple tuple;
+
+ if (!OidIsValid(rel->rd_rel->reloftype))
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is not a typed table",
+ RelationGetRelationName(rel))));
+
+ /*
+ * We don't bother to check ownership of the type --- ownership of the table
+ * is presumed enough rights. No lock required on the type, either.
+ */
+
+ drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);
+
+ /* Clear pg_class.reloftype */
+ relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
+ tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid));
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "cache lookup failed for relation %u", relid);
+ ((Form_pg_class) GETSTRUCT(tuple))->reloftype = InvalidOid;
+ simple_heap_update(relationRelation, &tuple->t_self, tuple);
+ CatalogUpdateIndexes(relationRelation, tuple);
+ heap_freetuple(tuple);
+ heap_close(relationRelation, RowExclusiveLock);
}
/*
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 5d0fbdfb40..9efd20f2bc 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -759,12 +759,16 @@ bool
check_client_encoding(char **newval, void **extra, GucSource source)
{
int encoding;
+ const char *canonical_name;
/* Look up the encoding by name */
encoding = pg_valid_client_encoding(*newval);
if (encoding < 0)
return false;
+ /* Get the canonical name (no aliases, uniform case) */
+ canonical_name = pg_encoding_to_char(encoding);
+
/*
* If we are not within a transaction then PrepareClientEncoding will not
* be able to look up the necessary conversion procs. If we are still
@@ -786,7 +790,7 @@ check_client_encoding(char **newval, void **extra, GucSource source)
/* Must be a genuine no-such-conversion problem */
GUC_check_errcode(ERRCODE_FEATURE_NOT_SUPPORTED);
GUC_check_errdetail("Conversion between %s and %s is not supported.",
- pg_encoding_to_char(encoding),
+ canonical_name,
GetDatabaseEncodingName());
}
else
@@ -798,13 +802,27 @@ check_client_encoding(char **newval, void **extra, GucSource source)
}
/*
- * Return the encoding's canonical name, and save its ID in *extra.
+ * Replace the user-supplied string with the encoding's canonical name.
+ * This gets rid of aliases and case-folding variations.
+ *
+ * XXX Although canonicalizing seems like a good idea in the abstract, it
+ * breaks pre-9.1 JDBC drivers, which expect that if they send "UNICODE"
+ * as the client_encoding setting then it will read back the same way.
+ * As a workaround, don't replace the string if it's "UNICODE". Remove
+ * that hack when pre-9.1 JDBC drivers are no longer in use.
*/
- free(*newval);
- *newval = strdup(pg_encoding_to_char(encoding));
- if (!*newval)
- return false;
+ if (strcmp(*newval, canonical_name) != 0 &&
+ strcmp(*newval, "UNICODE") != 0)
+ {
+ free(*newval);
+ *newval = strdup(canonical_name);
+ if (!*newval)
+ return false;
+ }
+ /*
+ * Save the encoding's ID in *extra, for use by assign_client_encoding.
+ */
*extra = malloc(sizeof(int));
if (!*extra)
return false;
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index c0d2294317..c9133ddfa1 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -1951,6 +1951,7 @@ _copyRangeTblEntry(RangeTblEntry *from)
COPY_NODE_FIELD(funccoltypmods);
COPY_NODE_FIELD(funccolcollations);
COPY_NODE_FIELD(values_lists);
+ COPY_NODE_FIELD(values_collations);
COPY_STRING_FIELD(ctename);
COPY_SCALAR_FIELD(ctelevelsup);
COPY_SCALAR_FIELD(self_reference);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index c811077563..3a0267c419 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -2310,6 +2310,7 @@ _equalRangeTblEntry(RangeTblEntry *a, RangeTblEntry *b)
COMPARE_NODE_FIELD(funccoltypmods);
COMPARE_NODE_FIELD(funccolcollations);
COMPARE_NODE_FIELD(values_lists);
+ COMPARE_NODE_FIELD(values_collations);
COMPARE_STRING_FIELD(ctename);
COMPARE_SCALAR_FIELD(ctelevelsup);
COMPARE_SCALAR_FIELD(self_reference);
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 47f3523366..681f5f85bc 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -2324,6 +2324,7 @@ _outRangeTblEntry(StringInfo str, RangeTblEntry *node)
break;
case RTE_VALUES:
WRITE_NODE_FIELD(values_lists);
+ WRITE_NODE_FIELD(values_collations);
break;
case RTE_CTE:
WRITE_STRING_FIELD(ctename);
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 5f1fd32b9f..22885147cf 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -1203,6 +1203,7 @@ _readRangeTblEntry(void)
break;
case RTE_VALUES:
READ_NODE_FIELD(values_lists);
+ READ_NODE_FIELD(values_collations);
break;
case RTE_CTE:
READ_STRING_FIELD(ctename);
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index e200dcf472..d345522866 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1704,10 +1704,10 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
innerendsel;
Path sort_path; /* dummy for result of cost_sort */
- /* Protect some assumptions below that rowcounts aren't zero */
- if (outer_path_rows <= 0)
+ /* Protect some assumptions below that rowcounts aren't zero or NaN */
+ if (outer_path_rows <= 0 || isnan(outer_path_rows))
outer_path_rows = 1;
- if (inner_path_rows <= 0)
+ if (inner_path_rows <= 0 || isnan(inner_path_rows))
inner_path_rows = 1;
if (!enable_mergejoin)
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 58a5bf8ece..4b0b633c03 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -3093,15 +3093,6 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
/* Build RelOptInfo */
rel = build_simple_rel(root, 1, RELOPT_BASEREL);
- /*
- * Rather than doing all the pushups that would be needed to use
- * set_baserel_size_estimates, just do a quick hack for rows and width.
- */
- rel->rows = rel->tuples;
- rel->width = get_relation_data_width(tableOid, NULL);
-
- root->total_table_pages = rel->pages;
-
/* Locate IndexOptInfo for the target index */
indexInfo = NULL;
foreach(lc, rel->indexlist)
@@ -3110,9 +3101,25 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
if (indexInfo->indexoid == indexOid)
break;
}
+
+ /*
+ * It's possible that get_relation_info did not generate an IndexOptInfo
+ * for the desired index; this could happen if it's not yet reached its
+ * indcheckxmin usability horizon, or if it's a system index and we're
+ * ignoring system indexes. In such cases we should tell CLUSTER to not
+ * trust the index contents but use seqscan-and-sort.
+ */
if (lc == NULL) /* not in the list? */
- elog(ERROR, "index %u does not belong to table %u",
- indexOid, tableOid);
+ return true; /* use sort */
+
+ /*
+ * Rather than doing all the pushups that would be needed to use
+ * set_baserel_size_estimates, just do a quick hack for rows and width.
+ */
+ rel->rows = rel->tuples;
+ rel->width = get_relation_data_width(tableOid, NULL);
+
+ root->total_table_pages = rel->pages;
/*
* Determine eval cost of the index expressions, if any. We need to
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 432d6483be..60a1484c99 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -216,6 +216,7 @@ set_plan_references(PlannerGlobal *glob, Plan *plan,
newrte->funccoltypmods = NIL;
newrte->funccolcollations = NIL;
newrte->values_lists = NIL;
+ newrte->values_collations = NIL;
newrte->ctecoltypes = NIL;
newrte->ctecoltypmods = NIL;
newrte->ctecolcollations = NIL;
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index a40f116bf9..a70439cc67 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -1136,7 +1136,7 @@ is_simple_union_all_recurse(Node *setOp, Query *setOpQuery, List *colTypes)
Assert(subquery != NULL);
/* Leaf nodes are OK if they match the toplevel column types */
- /* We don't have to compare typmods here */
+ /* We don't have to compare typmods or collations here */
return tlist_same_datatypes(subquery->targetList, colTypes, true);
}
else if (IsA(setOp, SetOperationStmt))
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 0ed9535d94..76adb7cdae 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -54,7 +54,8 @@
static Plan *recurse_set_operations(Node *setOp, PlannerInfo *root,
double tuple_fraction,
- List *colTypes, bool junkOK,
+ List *colTypes, List *colCollations,
+ bool junkOK,
int flag, List *refnames_tlist,
List **sortClauses, double *pNumGroups);
static Plan *generate_recursion_plan(SetOperationStmt *setOp,
@@ -81,12 +82,14 @@ static bool choose_hashed_setop(PlannerInfo *root, List *groupClauses,
double dNumGroups, double dNumOutputRows,
double tuple_fraction,
const char *construct);
-static List *generate_setop_tlist(List *colTypes, int flag,
+static List *generate_setop_tlist(List *colTypes, List *colCollations,
+ int flag,
Index varno,
bool hack_constants,
List *input_tlist,
List *refnames_tlist);
-static List *generate_append_tlist(List *colTypes, List *colCollations, bool flag,
+static List *generate_append_tlist(List *colTypes, List *colCollations,
+ bool flag,
List *input_plans,
List *refnames_tlist);
static List *generate_setop_grouplist(SetOperationStmt *op, List *targetlist);
@@ -169,7 +172,8 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
* on upper-level nodes to deal with that).
*/
return recurse_set_operations((Node *) topop, root, tuple_fraction,
- topop->colTypes, true, -1,
+ topop->colTypes, topop->colCollations,
+ true, -1,
leftmostQuery->targetList,
sortClauses, NULL);
}
@@ -179,7 +183,8 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
* Recursively handle one step in a tree of set operations
*
* tuple_fraction: fraction of tuples we expect to retrieve from node
- * colTypes: list of type OIDs of expected output columns
+ * colTypes: OID list of set-op's result column datatypes
+ * colCollations: OID list of set-op's result column collations
* junkOK: if true, child resjunk columns may be left in the result
* flag: if >= 0, add a resjunk output column indicating value of flag
* refnames_tlist: targetlist to take column names from
@@ -196,7 +201,8 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
static Plan *
recurse_set_operations(Node *setOp, PlannerInfo *root,
double tuple_fraction,
- List *colTypes, bool junkOK,
+ List *colTypes, List *colCollations,
+ bool junkOK,
int flag, List *refnames_tlist,
List **sortClauses, double *pNumGroups)
{
@@ -239,7 +245,8 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
* Add a SubqueryScan with the caller-requested targetlist
*/
plan = (Plan *)
- make_subqueryscan(generate_setop_tlist(colTypes, flag,
+ make_subqueryscan(generate_setop_tlist(colTypes, colCollations,
+ flag,
rtr->rtindex,
true,
subplan->targetlist,
@@ -287,11 +294,13 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
* generate_setop_tlist() to use varno 0.
*/
if (flag >= 0 ||
- !tlist_same_datatypes(plan->targetlist, colTypes, junkOK))
+ !tlist_same_datatypes(plan->targetlist, colTypes, junkOK) ||
+ !tlist_same_collations(plan->targetlist, colCollations, junkOK))
{
plan = (Plan *)
make_result(root,
- generate_setop_tlist(colTypes, flag,
+ generate_setop_tlist(colTypes, colCollations,
+ flag,
0,
false,
plan->targetlist,
@@ -336,12 +345,14 @@ generate_recursion_plan(SetOperationStmt *setOp, PlannerInfo *root,
* separately without any intention of combining them into one Append.
*/
lplan = recurse_set_operations(setOp->larg, root, tuple_fraction,
- setOp->colTypes, false, -1,
+ setOp->colTypes, setOp->colCollations,
+ false, -1,
refnames_tlist, sortClauses, NULL);
/* The right plan will want to look at the left one ... */
root->non_recursive_plan = lplan;
rplan = recurse_set_operations(setOp->rarg, root, tuple_fraction,
- setOp->colTypes, false, -1,
+ setOp->colTypes, setOp->colCollations,
+ false, -1,
refnames_tlist, sortClauses, NULL);
root->non_recursive_plan = NULL;
@@ -499,12 +510,14 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
/* Recurse on children, ensuring their outputs are marked */
lplan = recurse_set_operations(op->larg, root,
0.0 /* all tuples needed */ ,
- op->colTypes, false, 0,
+ op->colTypes, op->colCollations,
+ false, 0,
refnames_tlist,
&child_sortclauses, &dLeftGroups);
rplan = recurse_set_operations(op->rarg, root,
0.0 /* all tuples needed */ ,
- op->colTypes, false, 1,
+ op->colTypes, op->colCollations,
+ false, 1,
refnames_tlist,
&child_sortclauses, &dRightGroups);
@@ -620,6 +633,13 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
*
* NOTE: we can also pull a UNION ALL up into a UNION, since the distinct
* output rows will be lost anyway.
+ *
+ * NOTE: currently, we ignore collations while determining if a child has
+ * the same properties. This is semantically sound only so long as all
+ * collations have the same notion of equality. It is valid from an
+ * implementation standpoint because we don't care about the ordering of
+ * a UNION child's result: UNION ALL results are always unordered, and
+ * generate_union_plan will force a fresh sort if the top level is a UNION.
*/
static List *
recurse_union_children(Node *setOp, PlannerInfo *root,
@@ -660,8 +680,10 @@ recurse_union_children(Node *setOp, PlannerInfo *root,
*/
return list_make1(recurse_set_operations(setOp, root,
tuple_fraction,
- top_union->colTypes, false,
- -1, refnames_tlist,
+ top_union->colTypes,
+ top_union->colCollations,
+ false, -1,
+ refnames_tlist,
&child_sortclauses, NULL));
}
@@ -830,7 +852,8 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
/*
* Generate targetlist for a set-operation plan node
*
- * colTypes: column datatypes for non-junk columns
+ * colTypes: OID list of set-op's result column datatypes
+ * colCollations: OID list of set-op's result column collations
* flag: -1 if no flag column needed, 0 or 1 to create a const flag column
* varno: varno to use in generated Vars
* hack_constants: true to copy up constants (see comments in code)
@@ -838,7 +861,8 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
* refnames_tlist: targetlist to take column names from
*/
static List *
-generate_setop_tlist(List *colTypes, int flag,
+generate_setop_tlist(List *colTypes, List *colCollations,
+ int flag,
Index varno,
bool hack_constants,
List *input_tlist,
@@ -846,19 +870,23 @@ generate_setop_tlist(List *colTypes, int flag,
{
List *tlist = NIL;
int resno = 1;
- ListCell *i,
- *j,
- *k;
+ ListCell *ctlc,
+ *cclc,
+ *itlc,
+ *rtlc;
TargetEntry *tle;
Node *expr;
- j = list_head(input_tlist);
- k = list_head(refnames_tlist);
- foreach(i, colTypes)
+ /* there's no forfour() so we must chase one list manually */
+ rtlc = list_head(refnames_tlist);
+ forthree(ctlc, colTypes, cclc, colCollations, itlc, input_tlist)
{
- Oid colType = lfirst_oid(i);
- TargetEntry *inputtle = (TargetEntry *) lfirst(j);
- TargetEntry *reftle = (TargetEntry *) lfirst(k);
+ Oid colType = lfirst_oid(ctlc);
+ Oid colColl = lfirst_oid(cclc);
+ TargetEntry *inputtle = (TargetEntry *) lfirst(itlc);
+ TargetEntry *reftle = (TargetEntry *) lfirst(rtlc);
+
+ rtlc = lnext(rtlc);
Assert(inputtle->resno == resno);
Assert(reftle->resno == resno);
@@ -887,21 +915,48 @@ generate_setop_tlist(List *colTypes, int flag,
exprTypmod((Node *) inputtle->expr),
exprCollation((Node *) inputtle->expr),
0);
+
if (exprType(expr) != colType)
{
+ /*
+ * Note: it's not really cool to be applying coerce_to_common_type
+ * here; one notable point is that assign_expr_collations never
+ * gets run on any generated nodes. For the moment that's not a
+ * problem because we force the correct exposed collation below.
+ * It would likely be best to make the parser generate the correct
+ * output tlist for every set-op to begin with, though.
+ */
expr = coerce_to_common_type(NULL, /* no UNKNOWNs here */
expr,
colType,
"UNION/INTERSECT/EXCEPT");
}
+
+ /*
+ * Ensure the tlist entry's exposed collation matches the set-op.
+ * This is necessary because plan_set_operations() reports the result
+ * ordering as a list of SortGroupClauses, which don't carry collation
+ * themselves but just refer to tlist entries. If we don't show the
+ * right collation then planner.c might do the wrong thing in
+ * higher-level queries.
+ *
+ * Note we use RelabelType, not CollateExpr, since this expression
+ * will reach the executor without any further processing.
+ */
+ if (exprCollation(expr) != colColl)
+ {
+ expr = (Node *) makeRelabelType((Expr *) expr,
+ exprType(expr),
+ exprTypmod(expr),
+ colColl,
+ COERCE_DONTCARE);
+ }
+
tle = makeTargetEntry((Expr *) expr,
(AttrNumber) resno++,
pstrdup(reftle->resname),
false);
tlist = lappend(tlist, tle);
-
- j = lnext(j);
- k = lnext(k);
}
if (flag >= 0)
@@ -928,17 +983,19 @@ generate_setop_tlist(List *colTypes, int flag,
/*
* Generate targetlist for a set-operation Append node
*
- * colTypes: column datatypes for non-junk columns
+ * colTypes: OID list of set-op's result column datatypes
+ * colCollations: OID list of set-op's result column collations
* flag: true to create a flag column copied up from subplans
* input_plans: list of sub-plans of the Append
* refnames_tlist: targetlist to take column names from
*
* The entries in the Append's targetlist should always be simple Vars;
- * we just have to make sure they have the right datatypes and typmods.
+ * we just have to make sure they have the right datatypes/typmods/collations.
* The Vars are always generated with varno 0.
*/
static List *
-generate_append_tlist(List *colTypes, List *colCollations, bool flag,
+generate_append_tlist(List *colTypes, List *colCollations,
+ bool flag,
List *input_plans,
List *refnames_tlist)
{
@@ -1000,7 +1057,8 @@ generate_append_tlist(List *colTypes, List *colCollations, bool flag,
* Now we can build the tlist for the Append.
*/
colindex = 0;
- forthree(curColType, colTypes, curColCollation, colCollations, ref_tl_item, refnames_tlist)
+ forthree(curColType, colTypes, curColCollation, colCollations,
+ ref_tl_item, refnames_tlist)
{
Oid colType = lfirst_oid(curColType);
int32 colTypmod = colTypmods[colindex++];
@@ -1331,7 +1389,7 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
* Build the list of translations from parent Vars to child Vars for
* an inheritance child.
*
- * For paranoia's sake, we match type as well as attribute name.
+ * For paranoia's sake, we match type/collation as well as attribute name.
*/
static void
make_inh_translation_list(Relation oldrelation, Relation newrelation,
@@ -1410,10 +1468,13 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation,
attname, RelationGetRelationName(newrelation));
}
- /* Found it, check type */
+ /* Found it, check type and collation match */
if (atttypid != att->atttypid || atttypmod != att->atttypmod)
elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type",
attname, RelationGetRelationName(newrelation));
+ if (attcollation != att->attcollation)
+ elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's collation",
+ attname, RelationGetRelationName(newrelation));
vars = lappend(vars, makeVar(newvarno,
(AttrNumber) (new_attno + 1),
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index d7e3a38e6f..d17424e40f 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -195,6 +195,40 @@ tlist_same_datatypes(List *tlist, List *colTypes, bool junkOK)
return true;
}
+/*
+ * Does tlist have same exposed collations as listed in colCollations?
+ *
+ * Identical logic to the above, but for collations.
+ */
+bool
+tlist_same_collations(List *tlist, List *colCollations, bool junkOK)
+{
+ ListCell *l;
+ ListCell *curColColl = list_head(colCollations);
+
+ foreach(l, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->resjunk)
+ {
+ if (!junkOK)
+ return false;
+ }
+ else
+ {
+ if (curColColl == NULL)
+ return false; /* tlist longer than colCollations */
+ if (exprCollation((Node *) tle->expr) != lfirst_oid(curColColl))
+ return false;
+ curColColl = lnext(curColColl);
+ }
+ }
+ if (curColColl != NULL)
+ return false; /* tlist shorter than colCollations */
+ return true;
+}
+
/*
* get_sortgroupref_tle
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index e4e83a6716..4947a7d837 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -536,7 +536,9 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* RTE.
*/
List *exprsLists = NIL;
+ List *collations = NIL;
int sublist_length = -1;
+ int i;
foreach(lc, selectStmt->valuesLists)
{
@@ -573,7 +575,13 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* We must assign collations now because assign_query_collations
* doesn't process rangetable entries. We just assign all the
* collations independently in each row, and don't worry about
- * whether they are consistent vertically either.
+ * whether they are consistent vertically. The outer INSERT query
+ * isn't going to care about the collations of the VALUES columns,
+ * so it's not worth the effort to identify a common collation for
+ * each one here. (But note this does have one user-visible
+ * consequence: INSERT ... VALUES won't complain about conflicting
+ * explicit COLLATEs in a column, whereas the same VALUES
+ * construct in another context would complain.)
*/
assign_list_collations(pstate, sublist);
@@ -581,6 +589,13 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
}
/*
+ * Although we don't really need collation info, let's just make sure
+ * we provide a correctly-sized list in the VALUES RTE.
+ */
+ for (i = 0; i < sublist_length; i++)
+ collations = lappend_oid(collations, InvalidOid);
+
+ /*
* There mustn't have been any table references in the expressions,
* else strange things would happen, like Cartesian products of those
* tables with the VALUES list ...
@@ -610,7 +625,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
/*
* Generate the VALUES RTE
*/
- rte = addRangeTableEntryForValues(pstate, exprsLists, NULL, true);
+ rte = addRangeTableEntryForValues(pstate, exprsLists, collations,
+ NULL, true);
rtr = makeNode(RangeTblRef);
/* assume new rte is at end */
rtr->rtindex = list_length(pstate->p_rtable);
@@ -989,11 +1005,10 @@ static Query *
transformValuesClause(ParseState *pstate, SelectStmt *stmt)
{
Query *qry = makeNode(Query);
- List *exprsLists = NIL;
+ List *exprsLists;
+ List *collations;
List **colexprs = NULL;
- Oid *coltypes = NULL;
int sublist_length = -1;
- List *newExprsLists;
RangeTblEntry *rte;
RangeTblRef *rtr;
ListCell *lc;
@@ -1021,9 +1036,13 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
}
/*
- * For each row of VALUES, transform the raw expressions and gather type
- * information. This is also a handy place to reject DEFAULT nodes, which
- * the grammar allows for simplicity.
+ * For each row of VALUES, transform the raw expressions. This is also a
+ * handy place to reject DEFAULT nodes, which the grammar allows for
+ * simplicity.
+ *
+ * Note that the intermediate representation we build is column-organized
+ * not row-organized. That simplifies the type and collation processing
+ * below.
*/
foreach(lc, stmt->valuesLists)
{
@@ -1041,9 +1060,8 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
{
/* Remember post-transformation length of first sublist */
sublist_length = list_length(sublist);
- /* and allocate arrays for per-column info */
+ /* and allocate array for per-column lists */
colexprs = (List **) palloc0(sublist_length * sizeof(List *));
- coltypes = (Oid *) palloc0(sublist_length * sizeof(Oid));
}
else if (sublist_length != list_length(sublist))
{
@@ -1054,8 +1072,6 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
exprLocation((Node *) sublist))));
}
- exprsLists = lappend(exprsLists, sublist);
-
/* Check for DEFAULT and build per-column expression lists */
i = 0;
foreach(lc2, sublist)
@@ -1070,48 +1086,77 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
colexprs[i] = lappend(colexprs[i], col);
i++;
}
+
+ /* Release sub-list's cells to save memory */
+ list_free(sublist);
}
/*
* Now resolve the common types of the columns, and coerce everything to
- * those types.
+ * those types. Then identify the common collation, if any, of each
+ * column.
+ *
+ * We must do collation processing now because (1) assign_query_collations
+ * doesn't process rangetable entries, and (2) we need to label the VALUES
+ * RTE with column collations for use in the outer query. We don't
+ * consider conflict of implicit collations to be an error here; instead
+ * the column will just show InvalidOid as its collation, and you'll get
+ * a failure later if that results in failure to resolve a collation.
+ *
+ * Note we modify the per-column expression lists in-place.
*/
+ collations = NIL;
for (i = 0; i < sublist_length; i++)
{
- coltypes[i] = select_common_type(pstate, colexprs[i], "VALUES", NULL);
- }
+ Oid coltype;
+ Oid colcoll;
- newExprsLists = NIL;
- foreach(lc, exprsLists)
- {
- List *sublist = (List *) lfirst(lc);
- List *newsublist = NIL;
+ coltype = select_common_type(pstate, colexprs[i], "VALUES", NULL);
- i = 0;
- foreach(lc2, sublist)
+ foreach(lc, colexprs[i])
{
- Node *col = (Node *) lfirst(lc2);
+ Node *col = (Node *) lfirst(lc);
- col = coerce_to_common_type(pstate, col, coltypes[i], "VALUES");
- newsublist = lappend(newsublist, col);
- i++;
+ col = coerce_to_common_type(pstate, col, coltype, "VALUES");
+ lfirst(lc) = (void *) col;
}
- /*
- * We must assign collations now because assign_query_collations
- * doesn't process rangetable entries. We just assign all the
- * collations independently in each row, and don't worry about whether
- * they are consistent vertically either.
- */
- assign_list_collations(pstate, newsublist);
+ colcoll = select_common_collation(pstate, colexprs[i], true);
+
+ collations = lappend_oid(collations, colcoll);
+ }
- newExprsLists = lappend(newExprsLists, newsublist);
+ /*
+ * Finally, rearrange the coerced expressions into row-organized lists.
+ */
+ exprsLists = NIL;
+ foreach(lc, colexprs[0])
+ {
+ Node *col = (Node *) lfirst(lc);
+ List *sublist;
+
+ sublist = list_make1(col);
+ exprsLists = lappend(exprsLists, sublist);
+ }
+ list_free(colexprs[0]);
+ for (i = 1; i < sublist_length; i++)
+ {
+ forboth(lc, colexprs[i], lc2, exprsLists)
+ {
+ Node *col = (Node *) lfirst(lc);
+ List *sublist = lfirst(lc2);
+
+ /* sublist pointer in exprsLists won't need adjustment */
+ (void) lappend(sublist, col);
+ }
+ list_free(colexprs[i]);
}
/*
* Generate the VALUES RTE
*/
- rte = addRangeTableEntryForValues(pstate, newExprsLists, NULL, true);
+ rte = addRangeTableEntryForValues(pstate, exprsLists, collations,
+ NULL, true);
rtr = makeNode(RangeTblRef);
/* assume new rte is at end */
rtr->rtindex = list_length(pstate->p_rtable);
@@ -1164,7 +1209,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("VALUES must not contain table references"),
parser_errposition(pstate,
- locate_var_of_level((Node *) newExprsLists, 0))));
+ locate_var_of_level((Node *) exprsLists, 0))));
/*
* Another thing we can't currently support is NEW/OLD references in rules
@@ -1173,13 +1218,13 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
* This is a shame. FIXME
*/
if (list_length(pstate->p_rtable) != 1 &&
- contain_vars_of_level((Node *) newExprsLists, 0))
+ contain_vars_of_level((Node *) exprsLists, 0))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("VALUES must not contain OLD or NEW references"),
errhint("Use SELECT ... UNION ALL ... instead."),
parser_errposition(pstate,
- locate_var_of_level((Node *) newExprsLists, 0))));
+ locate_var_of_level((Node *) exprsLists, 0))));
qry->rtable = pstate->p_rtable;
qry->jointree = makeFromExpr(pstate->p_joinlist, NULL);
@@ -1191,13 +1236,13 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
(errcode(ERRCODE_GROUPING_ERROR),
errmsg("cannot use aggregate function in VALUES"),
parser_errposition(pstate,
- locate_agg_of_level((Node *) newExprsLists, 0))));
+ locate_agg_of_level((Node *) exprsLists, 0))));
if (pstate->p_hasWindowFuncs)
ereport(ERROR,
(errcode(ERRCODE_WINDOWING_ERROR),
errmsg("cannot use window function in VALUES"),
parser_errposition(pstate,
- locate_windowfunc((Node *) newExprsLists))));
+ locate_windowfunc((Node *) exprsLists))));
assign_query_collations(pstate, qry);
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index a22ab66ae5..1e4f8f698b 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -1933,6 +1933,23 @@ alter_table_cmd:
n->def = (Node *) $3;
$$ = (Node *)n;
}
+ /* ALTER TABLE <name> OF <type_name> */
+ | OF any_name
+ {
+ AlterTableCmd *n = makeNode(AlterTableCmd);
+ TypeName *def = makeTypeNameFromNameList($2);
+ def->location = @2;
+ n->subtype = AT_AddOf;
+ n->def = (Node *) def;
+ $$ = (Node *)n;
+ }
+ /* ALTER TABLE <name> NOT OF */
+ | NOT OF
+ {
+ AlterTableCmd *n = makeNode(AlterTableCmd);
+ n->subtype = AT_DropOf;
+ $$ = (Node *)n;
+ }
/* ALTER TABLE <name> OWNER TO RoleId */
| OWNER TO RoleId
{
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 0dbf5cbf38..2a94f73a9a 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -1220,6 +1220,7 @@ addRangeTableEntryForFunction(ParseState *pstate,
RangeTblEntry *
addRangeTableEntryForValues(ParseState *pstate,
List *exprs,
+ List *collations,
Alias *alias,
bool inFromCl)
{
@@ -1233,6 +1234,7 @@ addRangeTableEntryForValues(ParseState *pstate,
rte->relid = InvalidOid;
rte->subquery = NULL;
rte->values_lists = exprs;
+ rte->values_collations = collations;
rte->alias = alias;
eref = alias ? copyObject(alias) : makeAlias(refname, NIL);
@@ -1657,7 +1659,9 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
ListCell *l3;
int attnum = 0;
- forthree(l1, rte->funccoltypes, l2, rte->funccoltypmods, l3, rte->funccolcollations)
+ forthree(l1, rte->funccoltypes,
+ l2, rte->funccoltypmods,
+ l3, rte->funccolcollations)
{
Oid attrtype = lfirst_oid(l1);
int32 attrtypmod = lfirst_int(l2);
@@ -1687,12 +1691,15 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
{
/* Values RTE */
ListCell *aliasp_item = list_head(rte->eref->colnames);
- ListCell *lc;
+ ListCell *lcv;
+ ListCell *lcc;
varattno = 0;
- foreach(lc, (List *) linitial(rte->values_lists))
+ forboth(lcv, (List *) linitial(rte->values_lists),
+ lcc, rte->values_collations)
{
- Node *col = (Node *) lfirst(lc);
+ Node *col = (Node *) lfirst(lcv);
+ Oid colcollation = lfirst_oid(lcc);
varattno++;
if (colnames)
@@ -1712,7 +1719,7 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
varnode = makeVar(rtindex, varattno,
exprType(col),
exprTypmod(col),
- exprCollation(col),
+ colcollation,
sublevels_up);
varnode->location = location;
*colvars = lappend(*colvars, varnode);
@@ -1789,7 +1796,9 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
ListCell *lcc;
varattno = 0;
- forthree(lct, rte->ctecoltypes, lcm, rte->ctecoltypmods, lcc, rte->ctecolcollations)
+ forthree(lct, rte->ctecoltypes,
+ lcm, rte->ctecoltypmods,
+ lcc, rte->ctecolcollations)
{
Oid coltype = lfirst_oid(lct);
int32 coltypmod = lfirst_int(lcm);
@@ -2116,6 +2125,7 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
case RTE_VALUES:
{
/* Values RTE --- get type info from first sublist */
+ /* collation is stored separately, though */
List *collist = (List *) linitial(rte->values_lists);
Node *col;
@@ -2125,7 +2135,7 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
col = (Node *) list_nth(collist, attnum - 1);
*vartype = exprType(col);
*vartypmod = exprTypmod(col);
- *varcollid = exprCollation(col);
+ *varcollid = list_nth_oid(rte->values_collations, attnum - 1);
}
break;
case RTE_JOIN:
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 02e93306e4..04289c799d 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -833,16 +833,11 @@ transformOfType(CreateStmtContext *cxt, TypeName *ofTypename)
AssertArg(ofTypename);
tuple = typenameType(NULL, ofTypename, NULL);
+ check_of_type(tuple);
typ = (Form_pg_type) GETSTRUCT(tuple);
ofTypeId = HeapTupleGetOid(tuple);
ofTypename->typeOid = ofTypeId; /* cached for later */
- if (typ->typtype != TYPTYPE_COMPOSITE)
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("type %s is not a composite type",
- format_type_be(ofTypeId))));
-
tupdesc = lookup_rowtype_tupdesc(ofTypeId, -1);
for (i = 0; i < tupdesc->natts; i++)
{
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 3fbe14a409..e3ad3199c4 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -346,7 +346,7 @@ InitLocks(void)
hash_flags = (HASH_ELEM | HASH_FUNCTION);
LockMethodLocalHash = hash_create("LOCALLOCK hash",
- 128,
+ 16,
&info,
hash_flags);
}
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 234bb0cf6e..3cb7ce3269 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -189,6 +189,8 @@ SetClientEncoding(int encoding)
int current_server_encoding;
bool found;
ListCell *lc;
+ ListCell *prev;
+ ListCell *next;
if (!PG_VALID_FE_ENCODING(encoding))
return -1;
@@ -222,10 +224,13 @@ SetClientEncoding(int encoding)
* leak memory.
*/
found = false;
- foreach(lc, ConvProcList)
+ prev = NULL;
+ for (lc = list_head(ConvProcList); lc; lc = next)
{
ConvProcInfo *convinfo = (ConvProcInfo *) lfirst(lc);
+ next = lnext(lc);
+
if (convinfo->s_encoding == current_server_encoding &&
convinfo->c_encoding == encoding)
{
@@ -240,10 +245,13 @@ SetClientEncoding(int encoding)
else
{
/* Duplicate entry, release it */
- ConvProcList = list_delete_ptr(ConvProcList, convinfo);
+ ConvProcList = list_delete_cell(ConvProcList, lc, prev);
pfree(convinfo);
+ continue; /* prev mustn't advance */
}
}
+
+ prev = lc;
}
if (found)
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index fd1f20ee86..d26ff63e1d 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -185,6 +185,7 @@ static int locale_date_order(const char *locale);
static bool check_locale_name(const char *locale);
static bool check_locale_encoding(const char *locale, int encoding);
static void setlocales(void);
+static char *localemap(char *locale);
static void usage(const char *progname);
#ifdef WIN32
@@ -1164,9 +1165,9 @@ bootstrap_template1(void)
bki_lines = replace_token(bki_lines, "ENCODING", encodingid);
- bki_lines = replace_token(bki_lines, "LC_COLLATE", lc_collate);
+ bki_lines = replace_token(bki_lines, "LC_COLLATE", escape_quotes(lc_collate));
- bki_lines = replace_token(bki_lines, "LC_CTYPE", lc_ctype);
+ bki_lines = replace_token(bki_lines, "LC_CTYPE", escape_quotes(lc_ctype));
/*
* Pass correct LC_xxx environment to bootstrap.
@@ -2250,6 +2251,83 @@ check_locale_encoding(const char *locale, int user_enc)
return true;
}
+#ifdef WIN32
+
+/*
+ * Replace 'needle' with 'replacement' in 'str' . Note that the replacement
+ * is done in-place, so 'replacement' must be shorter than 'needle'.
+ */
+static void
+strreplace(char *str, char *needle, char *replacement)
+{
+ char *s;
+
+ s = strstr(str, needle);
+ if (s != NULL)
+ {
+ int replacementlen = strlen(replacement);
+ char *rest = s + strlen(needle);
+
+ memcpy(s, replacement, replacementlen);
+ memmove(s + replacementlen, rest, strlen(rest) + 1);
+ }
+}
+
+#endif /* WIN32 */
+
+/*
+ * Windows has a problem with locale names that have a dot in the country
+ * name. For example:
+ *
+ * "Chinese (Traditional)_Hong Kong S.A.R..950"
+ *
+ * For some reason, setlocale() doesn't accept that. Fortunately, Windows'
+ * setlocale() accepts various alternative names for such countries, so we
+ * map the full country names to accepted aliases.
+ *
+ * The returned string is always malloc'd - if no mapping is done it is
+ * just a malloc'd copy of the original.
+ */
+static char *
+localemap(char *locale)
+{
+ locale = xstrdup(locale);
+
+#ifdef WIN32
+ /*
+ * Map the full country name to an abbreviation that setlocale() accepts.
+ *
+ * "HKG" is listed here:
+ * http://msdn.microsoft.com/en-us/library/cdax410z%28v=vs.71%29.aspx
+ * (Country/Region Strings).
+ *
+ * "ARE" is the ISO-3166 three-letter code for U.A.E. It is not on the
+ * above list, but seems to work anyway.
+ */
+ strreplace(locale, "Hong Kong S.A.R.", "HKG");
+ strreplace(locale, "U.A.E.", "ARE");
+
+ /*
+ * The ISO-3166 country code for Macau S.A.R. is MAC, but Windows doesn't
+ * seem to recognize that. And Macau isn't listed in the table of
+ * accepted abbreviations linked above.
+ *
+ * Fortunately, "ZHM" seems to be accepted as an alias for
+ * "Chinese (Traditional)_Macau S.A.R..950", so we use that. Note that
+ * it's unlike HKG and ARE, ZHM is an alias for the whole locale name,
+ * not just the country part. I'm not sure where that "ZHM" comes from,
+ * must be some legacy naming scheme. But hey, it works.
+ *
+ * Some versions of Windows spell it "Macau", others "Macao".
+ */
+ strreplace(locale, "Chinese (Traditional)_Macau S.A.R..950", "ZHM");
+ strreplace(locale, "Chinese_Macau S.A.R..950", "ZHM");
+ strreplace(locale, "Chinese (Traditional)_Macao S.A.R..950", "ZHM");
+ strreplace(locale, "Chinese_Macao S.A.R..950", "ZHM");
+#endif /* WIN32 */
+
+ return locale;
+}
/*
* set up the locale variables
@@ -2282,25 +2360,25 @@ setlocales(void)
*/
if (strlen(lc_ctype) == 0 || !check_locale_name(lc_ctype))
- lc_ctype = xstrdup(setlocale(LC_CTYPE, NULL));
+ lc_ctype = localemap(setlocale(LC_CTYPE, NULL));
if (strlen(lc_collate) == 0 || !check_locale_name(lc_collate))
- lc_collate = xstrdup(setlocale(LC_COLLATE, NULL));
+ lc_collate = localemap(setlocale(LC_COLLATE, NULL));
if (strlen(lc_numeric) == 0 || !check_locale_name(lc_numeric))
- lc_numeric = xstrdup(setlocale(LC_NUMERIC, NULL));
+ lc_numeric = localemap(setlocale(LC_NUMERIC, NULL));
if (strlen(lc_time) == 0 || !check_locale_name(lc_time))
- lc_time = xstrdup(setlocale(LC_TIME, NULL));
+ lc_time = localemap(setlocale(LC_TIME, NULL));
if (strlen(lc_monetary) == 0 || !check_locale_name(lc_monetary))
- lc_monetary = xstrdup(setlocale(LC_MONETARY, NULL));
+ lc_monetary = localemap(setlocale(LC_MONETARY, NULL));
if (strlen(lc_messages) == 0 || !check_locale_name(lc_messages))
#if defined(LC_MESSAGES) && !defined(WIN32)
{
/* when available get the current locale setting */
- lc_messages = xstrdup(setlocale(LC_MESSAGES, NULL));
+ lc_messages = localemap(setlocale(LC_MESSAGES, NULL));
}
#else
{
/* when not available, get the CTYPE setting */
- lc_messages = xstrdup(setlocale(LC_CTYPE, NULL));
+ lc_messages = localemap(setlocale(LC_CTYPE, NULL));
}
#endif
@@ -2906,7 +2984,19 @@ main(int argc, char *argv[])
}
else if (!pg_valid_server_encoding_id(ctype_enc))
{
- /* We recognized it, but it's not a legal server encoding */
+ /*
+ * We recognized it, but it's not a legal server encoding.
+ * On Windows, UTF-8 works with any locale, so we can fall back
+ * to UTF-8.
+ */
+#ifdef WIN32
+ printf(_("Encoding %s implied by locale is not allowed as a server-side encoding.\n"
+ "The default database encoding will be set to %s instead.\n"),
+ pg_encoding_to_char(ctype_enc),
+ pg_encoding_to_char(PG_UTF8));
+ ctype_enc = PG_UTF8;
+ encodingid = encodingid_to_string(ctype_enc);
+#else
fprintf(stderr,
_("%s: locale %s requires unsupported encoding %s\n"),
progname, lc_ctype, pg_encoding_to_char(ctype_enc));
@@ -2915,6 +3005,7 @@ main(int argc, char *argv[])
"Rerun %s with a different locale selection.\n"),
pg_encoding_to_char(ctype_enc), progname);
exit(1);
+#endif
}
else
{
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index f086a1e636..c2f6180e99 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -7944,6 +7944,7 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
int ntups;
int i_attname;
int i_atttypdefn;
+ int i_attcollation;
int i_typrelid;
int i;
@@ -7951,17 +7952,42 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
selectSourceSchema(tyinfo->dobj.namespace->dobj.name);
/* Fetch type specific details */
- /* We assume here that remoteVersion must be at least 70300 */
-
- appendPQExpBuffer(query, "SELECT a.attname, "
- "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
- "typrelid "
- "FROM pg_catalog.pg_type t, pg_catalog.pg_attribute a "
- "WHERE t.oid = '%u'::pg_catalog.oid "
- "AND a.attrelid = t.typrelid "
- "AND NOT a.attisdropped "
- "ORDER BY a.attnum ",
- tyinfo->dobj.catId.oid);
+ if (fout->remoteVersion >= 90100)
+ {
+ /*
+ * attcollation is new in 9.1. Since we only want to dump COLLATE
+ * clauses for attributes whose collation is different from their
+ * type's default, we use a CASE here to suppress uninteresting
+ * attcollations cheaply.
+ */
+ appendPQExpBuffer(query, "SELECT a.attname, "
+ "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
+ "CASE WHEN a.attcollation <> at.typcollation "
+ "THEN a.attcollation ELSE 0 END AS attcollation, "
+ "ct.typrelid "
+ "FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a, "
+ "pg_catalog.pg_type at "
+ "WHERE ct.oid = '%u'::pg_catalog.oid "
+ "AND a.attrelid = ct.typrelid "
+ "AND a.atttypid = at.oid "
+ "AND NOT a.attisdropped "
+ "ORDER BY a.attnum ",
+ tyinfo->dobj.catId.oid);
+ }
+ else
+ {
+ /* We assume here that remoteVersion must be at least 70300 */
+ appendPQExpBuffer(query, "SELECT a.attname, "
+ "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
+ "0 AS attcollation, "
+ "ct.typrelid "
+ "FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a "
+ "WHERE ct.oid = '%u'::pg_catalog.oid "
+ "AND a.attrelid = ct.typrelid "
+ "AND NOT a.attisdropped "
+ "ORDER BY a.attnum ",
+ tyinfo->dobj.catId.oid);
+ }
res = PQexec(g_conn, query->data);
check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK);
@@ -7970,6 +7996,7 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
i_attname = PQfnumber(res, "attname");
i_atttypdefn = PQfnumber(res, "atttypdefn");
+ i_attcollation = PQfnumber(res, "attcollation");
i_typrelid = PQfnumber(res, "typrelid");
if (binary_upgrade)
@@ -7987,11 +8014,30 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
{
char *attname;
char *atttypdefn;
+ Oid attcollation;
attname = PQgetvalue(res, i, i_attname);
atttypdefn = PQgetvalue(res, i, i_atttypdefn);
+ attcollation = atooid(PQgetvalue(res, i, i_attcollation));
appendPQExpBuffer(q, "\n\t%s %s", fmtId(attname), atttypdefn);
+
+ /* Add collation if not default for the column type */
+ if (OidIsValid(attcollation))
+ {
+ CollInfo *coll;
+
+ coll = findCollationByOid(attcollation);
+ if (coll)
+ {
+ /* always schema-qualify, don't try to be smart */
+ appendPQExpBuffer(q, " COLLATE %s.",
+ fmtId(coll->dobj.namespace->dobj.name));
+ appendPQExpBuffer(q, "%s",
+ fmtId(coll->dobj.name));
+ }
+ }
+
if (i < ntups - 1)
appendPQExpBuffer(q, ",");
}
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index bab67174a2..e01fb7bdeb 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -1287,11 +1287,12 @@ describeOneTableDetails(const char *schemaname,
"\n (SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) for 128)"
"\n FROM pg_catalog.pg_attrdef d"
"\n WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef),"
- "\n a.attnotnull, a.attnum");
+ "\n a.attnotnull, a.attnum,");
if (pset.sversion >= 90100)
- appendPQExpBuffer(&buf, ",\n (SELECT collname FROM pg_collation WHERE oid = a.attcollation AND collname <> 'default') AS attcollation");
+ appendPQExpBuffer(&buf, "\n (SELECT c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type t\n"
+ " WHERE c.oid = a.attcollation AND t.oid = a.atttypid AND a.attcollation <> t.typcollation) AS attcollation");
else
- appendPQExpBuffer(&buf, ",\n NULL AS attcollation");
+ appendPQExpBuffer(&buf, "\n NULL AS attcollation");
if (tableinfo.relkind == 'i')
appendPQExpBuffer(&buf, ",\n pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, TRUE) AS indexdef");
if (verbose)
@@ -1362,7 +1363,7 @@ describeOneTableDetails(const char *schemaname,
cols = 2;
if (tableinfo.relkind == 'r' || tableinfo.relkind == 'v' ||
- tableinfo.relkind == 'f')
+ tableinfo.relkind == 'f' || tableinfo.relkind == 'c')
{
show_modifiers = true;
headers[cols++] = gettext_noop("Modifiers");
@@ -2697,22 +2698,27 @@ listDomains(const char *pattern, bool showSystem)
printfPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
" t.typname as \"%s\",\n"
- " pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
- " CASE WHEN t.typnotnull AND t.typdefault IS NOT NULL THEN 'not null default '||t.typdefault\n"
- " WHEN t.typnotnull AND t.typdefault IS NULL THEN 'not null'\n"
- " WHEN NOT t.typnotnull AND t.typdefault IS NOT NULL THEN 'default '||t.typdefault\n"
- " ELSE ''\n"
- " END as \"%s\",\n"
+ " pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
+ " TRIM(LEADING\n",
+ gettext_noop("Schema"),
+ gettext_noop("Name"),
+ gettext_noop("Type"));
+ if (pset.sversion >= 90100)
+ appendPQExpBuffer(&buf,
+ " COALESCE((SELECT ' collate ' || c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt\n"
+ " WHERE c.oid = t.typcollation AND bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation), '') ||\n");
+ appendPQExpBuffer(&buf,
+ " CASE WHEN t.typnotnull THEN ' not null' ELSE '' END ||\n"
+ " CASE WHEN t.typdefault IS NOT NULL THEN ' default ' || t.typdefault ELSE '' END\n"
+ " ) as \"%s\",\n",
+ gettext_noop("Modifier"));
+ appendPQExpBuffer(&buf,
" pg_catalog.array_to_string(ARRAY(\n"
" SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid\n"
" ), ' ') as \"%s\"\n"
"FROM pg_catalog.pg_type t\n"
" LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n"
"WHERE t.typtype = 'd'\n",
- gettext_noop("Schema"),
- gettext_noop("Name"),
- gettext_noop("Type"),
- gettext_noop("Modifier"),
gettext_noop("Check"));
if (!showSystem && !pattern)
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index b74526b3b7..53c684aa4e 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -53,6 +53,6 @@
*/
/* yyyymmddN */
-#define CATALOG_VERSION_NO 201104051
+#define CATALOG_VERSION_NO 201104181
#endif
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index 2ce6806e50..071db7f401 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -75,7 +75,8 @@ extern void FormIndexDatum(IndexInfo *indexInfo,
extern void index_build(Relation heapRelation,
Relation indexRelation,
IndexInfo *indexInfo,
- bool isprimary);
+ bool isprimary,
+ bool isreindex);
extern double IndexBuildHeapScan(Relation heapRelation,
Relation indexRelation,
@@ -88,9 +89,12 @@ extern void validate_index(Oid heapId, Oid indexId, Snapshot snapshot);
extern void reindex_index(Oid indexId, bool skip_constraint_checks);
-#define REINDEX_CHECK_CONSTRAINTS 0x1
-#define REINDEX_SUPPRESS_INDEX_USE 0x2
-extern bool reindex_relation(Oid relid, bool toast_too, int flags);
+/* Flag bits for reindex_relation(): */
+#define REINDEX_REL_PROCESS_TOAST 0x01
+#define REINDEX_REL_SUPPRESS_INDEX_USE 0x02
+#define REINDEX_REL_CHECK_CONSTRAINTS 0x04
+
+extern bool reindex_relation(Oid relid, int flags);
extern bool ReindexIsProcessingHeap(Oid heapOid);
extern bool ReindexIsProcessingIndex(Oid indexOid);
diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h
index d4383525db..3f971eb218 100644
--- a/src/include/commands/tablecmds.h
+++ b/src/include/commands/tablecmds.h
@@ -56,6 +56,8 @@ extern void find_composite_type_dependencies(Oid typeOid,
Relation origRelation,
const char *origTypeName);
+extern void check_of_type(HeapTuple typetuple);
+
extern AttrNumber *varattnos_map(TupleDesc olddesc, TupleDesc newdesc);
extern AttrNumber *varattnos_map_schema(TupleDesc old, List *schema);
extern void change_varattnos_of_a_node(Node *node, const AttrNumber *newattno);
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 74c9985597..b46cccc04b 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -725,7 +725,7 @@ typedef struct RangeTblEntry
*
* If the function returns RECORD, funccoltypes lists the column types
* declared in the RTE's column type specification, funccoltypmods lists
- * their declared typmods, funccolcollations their collations. Otherwise,
+ * their declared typmods, funccolcollations their collations. Otherwise,
* those fields are NIL.
*/
Node *funcexpr; /* expression tree for func call */
@@ -737,6 +737,7 @@ typedef struct RangeTblEntry
* Fields valid for a values RTE (else NIL):
*/
List *values_lists; /* list of expression lists */
+ List *values_collations; /* OID list of column collation OIDs */
/*
* Fields valid for a CTE RTE (else NULL/zero):
@@ -1220,6 +1221,8 @@ typedef enum AlterTableType
AT_DisableRule, /* DISABLE RULE name */
AT_AddInherit, /* INHERIT parent */
AT_DropInherit, /* NO INHERIT parent */
+ AT_AddOf, /* OF <type_name> */
+ AT_DropOf, /* NOT OF */
AT_GenericOptions, /* OPTIONS (...) */
} AlterTableType;
diff --git a/src/include/optimizer/tlist.h b/src/include/optimizer/tlist.h
index f7606d79a3..7af59589dc 100644
--- a/src/include/optimizer/tlist.h
+++ b/src/include/optimizer/tlist.h
@@ -25,6 +25,7 @@ extern List *add_to_flat_tlist(List *tlist, List *exprs);
extern List *get_tlist_exprs(List *tlist, bool includeJunk);
extern bool tlist_same_datatypes(List *tlist, List *colTypes, bool junkOK);
+extern bool tlist_same_collations(List *tlist, List *colCollations, bool junkOK);
extern TargetEntry *get_sortgroupref_tle(Index sortref,
List *targetList);
diff --git a/src/include/parser/parse_relation.h b/src/include/parser/parse_relation.h
index 55066681f7..0158465c91 100644
--- a/src/include/parser/parse_relation.h
+++ b/src/include/parser/parse_relation.h
@@ -63,6 +63,7 @@ extern RangeTblEntry *addRangeTableEntryForFunction(ParseState *pstate,
bool inFromCl);
extern RangeTblEntry *addRangeTableEntryForValues(ParseState *pstate,
List *exprs,
+ List *collations,
Alias *alias,
bool inFromCl);
extern RangeTblEntry *addRangeTableEntryForJoin(ParseState *pstate,
diff --git a/src/interfaces/ecpg/compatlib/.gitignore b/src/interfaces/ecpg/compatlib/.gitignore
index e4ba84ae30..e1dfd687ea 100644
--- a/src/interfaces/ecpg/compatlib/.gitignore
+++ b/src/interfaces/ecpg/compatlib/.gitignore
@@ -1,3 +1,4 @@
+/compatlib.def
/libecpg_compatdll.def
/libecpg_compatddll.def
/blibecpg_compatdll.def
diff --git a/src/interfaces/ecpg/ecpglib/.gitignore b/src/interfaces/ecpg/ecpglib/.gitignore
index e6c60b16fc..f17eacde91 100644
--- a/src/interfaces/ecpg/ecpglib/.gitignore
+++ b/src/interfaces/ecpg/ecpglib/.gitignore
@@ -1,3 +1,4 @@
+/ecpglib.def
/libecpgdll.def
/libecpgddll.def
/blibecpgdll.def
diff --git a/src/interfaces/ecpg/pgtypeslib/.gitignore b/src/interfaces/ecpg/pgtypeslib/.gitignore
index aa5bdb837f..79b264428e 100644
--- a/src/interfaces/ecpg/pgtypeslib/.gitignore
+++ b/src/interfaces/ecpg/pgtypeslib/.gitignore
@@ -1,3 +1,4 @@
+/pgtypeslib.def
/libpgtypesdll.def
/libpgtypesddll.def
/blibpgtypesdll.def
diff --git a/src/interfaces/libpq/.gitignore b/src/interfaces/libpq/.gitignore
index 29024ae67c..e79f3872cb 100644
--- a/src/interfaces/libpq/.gitignore
+++ b/src/interfaces/libpq/.gitignore
@@ -17,3 +17,5 @@
/ip.c
/encnames.c
/wchar.c
+/libpqdll.def
+/libpq.rc
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 729730aa8c..1b409d1e44 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -1632,7 +1632,9 @@ keep_going: /* We will come back to here until there is
if (!IS_AF_UNIX(addr_cur->ai_family))
{
+#ifndef WIN32
int on = 1;
+#endif
int usekeepalives = useKeepalives(conn);
int err = 0;
diff --git a/src/pl/plpgsql/src/gram.y b/src/pl/plpgsql/src/gram.y
index fbd441a1bc..4e2b7058f0 100644
--- a/src/pl/plpgsql/src/gram.y
+++ b/src/pl/plpgsql/src/gram.y
@@ -21,6 +21,7 @@
#include "parser/parse_type.h"
#include "parser/scanner.h"
#include "parser/scansup.h"
+#include "utils/builtins.h"
/* Location tracking support --- simpler than bison's default */
@@ -122,6 +123,7 @@ static List *read_raise_options(void);
PLcword cword;
PLwdatum wdatum;
bool boolean;
+ Oid oid;
struct
{
char *name;
@@ -167,6 +169,7 @@ static List *read_raise_options(void);
%type <boolean> decl_const decl_notnull exit_type
%type <expr> decl_defval decl_cursor_query
%type <dtype> decl_datatype
+%type <oid> decl_collate
%type <datum> decl_cursor_args
%type <list> decl_cursor_arglist
%type <nsitem> decl_aliasitem
@@ -245,6 +248,7 @@ static List *read_raise_options(void);
%token <keyword> K_BY
%token <keyword> K_CASE
%token <keyword> K_CLOSE
+%token <keyword> K_COLLATE
%token <keyword> K_CONSTANT
%token <keyword> K_CONTINUE
%token <keyword> K_CURSOR
@@ -428,10 +432,27 @@ decl_stmt : decl_statement
}
;
-decl_statement : decl_varname decl_const decl_datatype decl_notnull decl_defval
+decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull decl_defval
{
PLpgSQL_variable *var;
+ /*
+ * If a collation is supplied, insert it into the
+ * datatype. We assume decl_datatype always returns
+ * a freshly built struct not shared with other
+ * variables.
+ */
+ if (OidIsValid($4))
+ {
+ if (!OidIsValid($3->collation))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("collations are not supported by type %s",
+ format_type_be($3->typoid)),
+ parser_errposition(@4)));
+ $3->collation = $4;
+ }
+
var = plpgsql_build_variable($1.name, $1.lineno,
$3, true);
if ($2)
@@ -444,10 +465,10 @@ decl_statement : decl_varname decl_const decl_datatype decl_notnull decl_defval
errmsg("row or record variable cannot be CONSTANT"),
parser_errposition(@2)));
}
- if ($4)
+ if ($5)
{
if (var->dtype == PLPGSQL_DTYPE_VAR)
- ((PLpgSQL_var *) var)->notnull = $4;
+ ((PLpgSQL_var *) var)->notnull = $5;
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -455,10 +476,10 @@ decl_statement : decl_varname decl_const decl_datatype decl_notnull decl_defval
parser_errposition(@4)));
}
- if ($5 != NULL)
+ if ($6 != NULL)
{
if (var->dtype == PLPGSQL_DTYPE_VAR)
- ((PLpgSQL_var *) var)->default_val = $5;
+ ((PLpgSQL_var *) var)->default_val = $6;
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -685,6 +706,19 @@ decl_datatype :
}
;
+decl_collate :
+ { $$ = InvalidOid; }
+ | K_COLLATE T_WORD
+ {
+ $$ = get_collation_oid(list_make1(makeString($2.ident)),
+ false);
+ }
+ | K_COLLATE T_CWORD
+ {
+ $$ = get_collation_oid($2.idents, false);
+ }
+ ;
+
decl_notnull :
{ $$ = false; }
| K_NOT K_NULL
@@ -2432,7 +2466,8 @@ read_datatype(int tok)
yyerror("incomplete data type declaration");
}
/* Possible followers for datatype in a declaration */
- if (tok == K_NOT || tok == '=' || tok == COLON_EQUALS || tok == K_DEFAULT)
+ if (tok == K_COLLATE || tok == K_NOT ||
+ tok == '=' || tok == COLON_EQUALS || tok == K_DEFAULT)
break;
/* Possible followers for datatype in a cursor_arg list */
if ((tok == ',' || tok == ')') && parenlevel == 0)
diff --git a/src/pl/plpgsql/src/pl_scanner.c b/src/pl/plpgsql/src/pl_scanner.c
index e8a2628f2f..e1c0b62595 100644
--- a/src/pl/plpgsql/src/pl_scanner.c
+++ b/src/pl/plpgsql/src/pl_scanner.c
@@ -64,6 +64,7 @@ static const ScanKeyword reserved_keywords[] = {
PG_KEYWORD("by", K_BY, RESERVED_KEYWORD)
PG_KEYWORD("case", K_CASE, RESERVED_KEYWORD)
PG_KEYWORD("close", K_CLOSE, RESERVED_KEYWORD)
+ PG_KEYWORD("collate", K_COLLATE, RESERVED_KEYWORD)
PG_KEYWORD("continue", K_CONTINUE, RESERVED_KEYWORD)
PG_KEYWORD("declare", K_DECLARE, RESERVED_KEYWORD)
PG_KEYWORD("default", K_DEFAULT, RESERVED_KEYWORD)
diff --git a/src/pl/plpython/expected/README b/src/pl/plpython/expected/README
index 031b77dd0a..11424877b6 100644
--- a/src/pl/plpython/expected/README
+++ b/src/pl/plpython/expected/README
@@ -2,8 +2,7 @@ Guide to alternative expected files:
plpython_error_0.out Python 2.4 and older
-plpython_unicode.out server encoding != SQL_ASCII and client encoding == UTF8; else ...
-plpython_unicode_0.out server encoding != SQL_ASCII and client encoding != UTF8; else ...
+plpython_unicode.out server encoding != SQL_ASCII
plpython_unicode_3.out server encoding == SQL_ASCII
plpython_subtransaction_0.out Python 2.4 and older (without with statement)
diff --git a/src/pl/plpython/expected/plpython_unicode.out b/src/pl/plpython/expected/plpython_unicode.out
index c4ab73fd24..859edbbbaa 100644
--- a/src/pl/plpython/expected/plpython_unicode.out
+++ b/src/pl/plpython/expected/plpython_unicode.out
@@ -1,6 +1,7 @@
--
-- Unicode handling
--
+SET client_encoding TO UTF8;
CREATE TABLE unicode_test (
testvalue text NOT NULL
);
diff --git a/src/pl/plpython/expected/plpython_unicode_0.out b/src/pl/plpython/expected/plpython_unicode_0.out
deleted file mode 100644
index b1c6e39849..0000000000
--- a/src/pl/plpython/expected/plpython_unicode_0.out
+++ /dev/null
@@ -1,50 +0,0 @@
---
--- Unicode handling
---
-CREATE TABLE unicode_test (
- testvalue text NOT NULL
-);
-CREATE FUNCTION unicode_return() RETURNS text AS E'
-return u"\\x80"
-' LANGUAGE plpythonu;
-CREATE FUNCTION unicode_trigger() RETURNS trigger AS E'
-TD["new"]["testvalue"] = u"\\x80"
-return "MODIFY"
-' LANGUAGE plpythonu;
-CREATE TRIGGER unicode_test_bi BEFORE INSERT ON unicode_test
- FOR EACH ROW EXECUTE PROCEDURE unicode_trigger();
-CREATE FUNCTION unicode_plan1() RETURNS text AS E'
-plan = plpy.prepare("SELECT $1 AS testvalue", ["text"])
-rv = plpy.execute(plan, [u"\\x80"], 1)
-return rv[0]["testvalue"]
-' LANGUAGE plpythonu;
-CREATE FUNCTION unicode_plan2() RETURNS text AS E'
-plan = plpy.prepare("SELECT $1 || $2 AS testvalue", ["text", u"text"])
-rv = plpy.execute(plan, ["foo", "bar"], 1)
-return rv[0]["testvalue"]
-' LANGUAGE plpythonu;
-SELECT unicode_return();
- unicode_return
-----------------
- �
-(1 row)
-
-INSERT INTO unicode_test (testvalue) VALUES ('test');
-SELECT * FROM unicode_test;
- testvalue
------------
- �
-(1 row)
-
-SELECT unicode_plan1();
- unicode_plan1
----------------
- �
-(1 row)
-
-SELECT unicode_plan2();
- unicode_plan2
----------------
- foobar
-(1 row)
-
diff --git a/src/pl/plpython/expected/plpython_unicode_3.out b/src/pl/plpython/expected/plpython_unicode_3.out
index d023bc9b73..52d0aa0b1e 100644
--- a/src/pl/plpython/expected/plpython_unicode_3.out
+++ b/src/pl/plpython/expected/plpython_unicode_3.out
@@ -1,6 +1,7 @@
--
-- Unicode handling
--
+SET client_encoding TO UTF8;
CREATE TABLE unicode_test (
testvalue text NOT NULL
);
diff --git a/src/pl/plpython/plpython.c b/src/pl/plpython/plpython.c
index 5e600daa96..8108cfce2c 100644
--- a/src/pl/plpython/plpython.c
+++ b/src/pl/plpython/plpython.c
@@ -4510,6 +4510,14 @@ get_source_line(const char *src, int lineno)
if (next == NULL)
return pstrdup(s);
+ /*
+ * Sanity check, next < s if the line was all-whitespace, which should
+ * never happen if Python reported a frame created on that line, but
+ * check anyway.
+ */
+ if (next < s)
+ return NULL;
+
return pnstrdup(s, next - s);
}
@@ -4606,6 +4614,7 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
PyObject *volatile code = NULL;
PyObject *volatile name = NULL;
PyObject *volatile lineno = NULL;
+ PyObject *volatile filename = NULL;
PG_TRY();
{
@@ -4624,6 +4633,10 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
name = PyObject_GetAttrString(code, "co_name");
if (name == NULL)
elog(ERROR, "could not get function name from Python code object");
+
+ filename = PyObject_GetAttrString(code, "co_filename");
+ if (filename == NULL)
+ elog(ERROR, "could not get file name from Python code object");
}
PG_CATCH();
{
@@ -4631,6 +4644,7 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
Py_XDECREF(code);
Py_XDECREF(name);
Py_XDECREF(lineno);
+ Py_XDECREF(filename);
PG_RE_THROW();
}
PG_END_TRY();
@@ -4641,6 +4655,7 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
char *proname;
char *fname;
char *line;
+ char *plain_filename;
long plain_lineno;
/*
@@ -4653,6 +4668,7 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
fname = PyString_AsString(name);
proname = PLy_procedure_name(PLy_curr_procedure);
+ plain_filename = PyString_AsString(filename);
plain_lineno = PyInt_AsLong(lineno);
if (proname == NULL)
@@ -4664,7 +4680,9 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
&tbstr, "\n PL/Python function \"%s\", line %ld, in %s",
proname, plain_lineno - 1, fname);
- if (PLy_curr_procedure)
+ /* function code object was compiled with "<string>" as the filename */
+ if (PLy_curr_procedure && plain_filename != NULL &&
+ strcmp(plain_filename, "<string>") == 0)
{
/*
* If we know the current procedure, append the exact line
@@ -4672,7 +4690,8 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
* module behavior. We could store the already line-split
* source to avoid splitting it every time, but producing a
* traceback is not the most important scenario to optimize
- * for.
+ * for. But we do not go as far as traceback.py in reading
+ * the source of imported modules.
*/
line = get_source_line(PLy_curr_procedure->src, plain_lineno);
if (line)
@@ -4687,6 +4706,7 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
Py_DECREF(code);
Py_DECREF(name);
Py_DECREF(lineno);
+ Py_DECREF(filename);
/* Release the current frame and go to the next one. */
tb_prev = tb;
diff --git a/src/pl/plpython/sql/plpython_unicode.sql b/src/pl/plpython/sql/plpython_unicode.sql
index 6b9fac682a..bdd40c40a5 100644
--- a/src/pl/plpython/sql/plpython_unicode.sql
+++ b/src/pl/plpython/sql/plpython_unicode.sql
@@ -2,6 +2,8 @@
-- Unicode handling
--
+SET client_encoding TO UTF8;
+
CREATE TABLE unicode_test (
testvalue text NOT NULL
);
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index ebc1f0d821..1cb418164b 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -1779,44 +1779,44 @@ drop cascades to text search dictionary dict
CREATE TYPE test_type AS (a int);
\d test_type
Composite type "public.test_type"
- Column | Type
---------+---------
- a | integer
+ Column | Type | Modifiers
+--------+---------+-----------
+ a | integer |
ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails
ERROR: relation "nosuchtype" does not exist
ALTER TYPE test_type ADD ATTRIBUTE b text;
\d test_type
Composite type "public.test_type"
- Column | Type
---------+---------
- a | integer
- b | text
+ Column | Type | Modifiers
+--------+---------+-----------
+ a | integer |
+ b | text |
ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails
ERROR: column "b" of relation "test_type" already exists
ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar;
\d test_type
-Composite type "public.test_type"
- Column | Type
---------+-------------------
- a | integer
- b | character varying
+ Composite type "public.test_type"
+ Column | Type | Modifiers
+--------+-------------------+-----------
+ a | integer |
+ b | character varying |
ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer;
\d test_type
Composite type "public.test_type"
- Column | Type
---------+---------
- a | integer
- b | integer
+ Column | Type | Modifiers
+--------+---------+-----------
+ a | integer |
+ b | integer |
ALTER TYPE test_type DROP ATTRIBUTE b;
\d test_type
Composite type "public.test_type"
- Column | Type
---------+---------
- a | integer
+ Column | Type | Modifiers
+--------+---------+-----------
+ a | integer |
ALTER TYPE test_type DROP ATTRIBUTE c; -- fails
ERROR: column "c" of relation "test_type" does not exist
@@ -1825,18 +1825,18 @@ NOTICE: column "c" of relation "test_type" does not exist, skipping
ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean;
\d test_type
Composite type "public.test_type"
- Column | Type
---------+---------
- d | boolean
+ Column | Type | Modifiers
+--------+---------+-----------
+ d | boolean |
ALTER TYPE test_type RENAME ATTRIBUTE a TO aa;
ERROR: column "a" does not exist
ALTER TYPE test_type RENAME ATTRIBUTE d TO dd;
\d test_type
Composite type "public.test_type"
- Column | Type
---------+---------
- dd | boolean
+ Column | Type | Modifiers
+--------+---------+-----------
+ dd | boolean |
DROP TYPE test_type;
CREATE TYPE test_type1 AS (a int, b text);
@@ -1845,12 +1845,13 @@ ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails
ERROR: cannot alter type "test_type1" because column "test_tbl1"."y" uses it
CREATE TYPE test_type2 AS (a int, b text);
CREATE TABLE test_tbl2 OF test_type2;
+CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2);
\d test_type2
Composite type "public.test_type2"
- Column | Type
---------+---------
- a | integer
- b | text
+ Column | Type | Modifiers
+--------+---------+-----------
+ a | integer |
+ b | text |
\d test_tbl2
Table "public.test_tbl2"
@@ -1858,6 +1859,7 @@ Composite type "public.test_type2"
--------+---------+-----------
a | integer |
b | text |
+Number of child tables: 1 (Use \d+ to list them.)
Typed table of type: test_type2
ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails
@@ -1866,11 +1868,11 @@ HINT: Use ALTER ... CASCADE to alter the typed tables too.
ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE;
\d test_type2
Composite type "public.test_type2"
- Column | Type
---------+---------
- a | integer
- b | text
- c | text
+ Column | Type | Modifiers
+--------+---------+-----------
+ a | integer |
+ b | text |
+ c | text |
\d test_tbl2
Table "public.test_tbl2"
@@ -1879,6 +1881,7 @@ Composite type "public.test_type2"
a | integer |
b | text |
c | text |
+Number of child tables: 1 (Use \d+ to list them.)
Typed table of type: test_type2
ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails
@@ -1886,12 +1889,12 @@ ERROR: cannot alter type "test_type2" because it is the type of a typed table
HINT: Use ALTER ... CASCADE to alter the typed tables too.
ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE;
\d test_type2
-Composite type "public.test_type2"
- Column | Type
---------+-------------------
- a | integer
- b | character varying
- c | text
+ Composite type "public.test_type2"
+ Column | Type | Modifiers
+--------+-------------------+-----------
+ a | integer |
+ b | character varying |
+ c | text |
\d test_tbl2
Table "public.test_tbl2"
@@ -1900,6 +1903,7 @@ Composite type "public.test_type2"
a | integer |
b | character varying |
c | text |
+Number of child tables: 1 (Use \d+ to list them.)
Typed table of type: test_type2
ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails
@@ -1908,10 +1912,10 @@ HINT: Use ALTER ... CASCADE to alter the typed tables too.
ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE;
\d test_type2
Composite type "public.test_type2"
- Column | Type
---------+---------
- a | integer
- c | text
+ Column | Type | Modifiers
+--------+---------+-----------
+ a | integer |
+ c | text |
\d test_tbl2
Table "public.test_tbl2"
@@ -1919,6 +1923,7 @@ Composite type "public.test_type2"
--------+---------+-----------
a | integer |
c | text |
+Number of child tables: 1 (Use \d+ to list them.)
Typed table of type: test_type2
ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails
@@ -1927,10 +1932,10 @@ HINT: Use ALTER ... CASCADE to alter the typed tables too.
ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE;
\d test_type2
Composite type "public.test_type2"
- Column | Type
---------+---------
- aa | integer
- c | text
+ Column | Type | Modifiers
+--------+---------+-----------
+ aa | integer |
+ c | text |
\d test_tbl2
Table "public.test_tbl2"
@@ -1938,7 +1943,55 @@ Composite type "public.test_type2"
--------+---------+-----------
aa | integer |
c | text |
+Number of child tables: 1 (Use \d+ to list them.)
Typed table of type: test_type2
+\d test_tbl2_subclass
+Table "public.test_tbl2_subclass"
+ Column | Type | Modifiers
+--------+---------+-----------
+ aa | integer |
+ c | text |
+Inherits: test_tbl2
+
+DROP TABLE test_tbl2_subclass;
CREATE TYPE test_type_empty AS ();
DROP TYPE test_type_empty;
+--
+-- typed tables: OF / NOT OF
+--
+CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2));
+ALTER TYPE tt_t0 DROP ATTRIBUTE z;
+CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK
+CREATE TABLE tt1 (x int, y bigint); -- wrong base type
+CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod
+CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order
+CREATE TABLE tt4 (x int); -- too few columns
+CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns
+CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent
+CREATE TABLE tt7 (x int, q text, y numeric(8,2)) WITH OIDS;
+ALTER TABLE tt7 DROP q; -- OK
+ALTER TABLE tt0 OF tt_t0;
+ALTER TABLE tt1 OF tt_t0;
+ERROR: table "tt1" has different type for column "y"
+ALTER TABLE tt2 OF tt_t0;
+ERROR: table "tt2" has different type for column "y"
+ALTER TABLE tt3 OF tt_t0;
+ERROR: table has column "y" where type requires "x"
+ALTER TABLE tt4 OF tt_t0;
+ERROR: table is missing column "y"
+ALTER TABLE tt5 OF tt_t0;
+ERROR: table has extra column "z"
+ALTER TABLE tt6 OF tt_t0;
+ERROR: typed tables cannot inherit
+ALTER TABLE tt7 OF tt_t0;
+CREATE TYPE tt_t1 AS (x int, y numeric(8,2));
+ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table
+ALTER TABLE tt7 NOT OF;
+\d tt7
+ Table "public.tt7"
+ Column | Type | Modifiers
+--------+--------------+-----------
+ x | integer |
+ y | numeric(8,2) |
+
diff --git a/src/test/regress/expected/collate.linux.utf8.out b/src/test/regress/expected/collate.linux.utf8.out
index f0008ddf14..9813b6847c 100644
--- a/src/test/regress/expected/collate.linux.utf8.out
+++ b/src/test/regress/expected/collate.linux.utf8.out
@@ -825,6 +825,46 @@ ORDER BY a.b, b.b;
bbc | bbc | f | f | f | f
(16 rows)
+-- collation override in plpgsql
+CREATE FUNCTION mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$
+declare
+ xx text := x;
+ yy text := y;
+begin
+ return xx < yy;
+end
+$$;
+SELECT mylt2('a', 'B' collate "en_US") as t, mylt2('a', 'B' collate "C") as f;
+ t | f
+---+---
+ t | f
+(1 row)
+
+CREATE OR REPLACE FUNCTION
+ mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$
+declare
+ xx text COLLATE "POSIX" := x;
+ yy text := y;
+begin
+ return xx < yy;
+end
+$$;
+SELECT mylt2('a', 'B') as f;
+ f
+---
+ f
+(1 row)
+
+SELECT mylt2('a', 'B' collate "C") as fail; -- conflicting collations
+ERROR: could not determine which collation to use for string comparison
+HINT: Use the COLLATE clause to set the collation explicitly.
+CONTEXT: PL/pgSQL function "mylt2" line 6 at RETURN
+SELECT mylt2('a', 'B' collate "POSIX") as f;
+ f
+---
+ f
+(1 row)
+
-- polymorphism
SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test1)) ORDER BY 1;
unnest
@@ -1001,9 +1041,9 @@ Table "public.collate_dep_test1"
\d collate_dep_test2
Composite type "public.collate_dep_test2"
- Column | Type
---------+---------
- x | integer
+ Column | Type | Modifiers
+--------+---------+-----------
+ x | integer |
DROP TABLE collate_dep_test1, collate_dep_test4t;
DROP TYPE collate_dep_test2;
diff --git a/src/test/regress/expected/collate.out b/src/test/regress/expected/collate.out
index 251a8a5178..627ae1f3f8 100644
--- a/src/test/regress/expected/collate.out
+++ b/src/test/regress/expected/collate.out
@@ -460,6 +460,14 @@ ERROR: recursive query "foo" column 1 has collation "C" in non-recursive term b
LINE 2: (SELECT x FROM (VALUES('a' COLLATE "C"),('b')) t(x)
^
HINT: Use the COLLATE clause to set the collation of the non-recursive term.
+SELECT a, b, a < b as lt FROM
+ (VALUES ('a', 'B'), ('A', 'b' COLLATE "C")) v(a,b);
+ a | b | lt
+---+---+----
+ a | B | f
+ A | b | t
+(2 rows)
+
-- casting
SELECT CAST('42' AS text COLLATE "C");
ERROR: syntax error at or near "COLLATE"
diff --git a/src/test/regress/expected/typed_table.out b/src/test/regress/expected/typed_table.out
index 0874a64d55..1fe426d6e3 100644
--- a/src/test/regress/expected/typed_table.out
+++ b/src/test/regress/expected/typed_table.out
@@ -91,6 +91,8 @@ DETAIL: drop cascades to table persons
drop cascades to function get_all_persons()
drop cascades to table persons2
drop cascades to table persons3
+CREATE TABLE persons5 OF stuff; -- only CREATE TYPE AS types may be used
+ERROR: type stuff is not a composite type
DROP TABLE stuff;
-- implicit casting
CREATE TYPE person_type AS (id int, name text);
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index b9ae622b04..5fe3724c82 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -2277,8 +2277,14 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
postmaster_running = true;
+#ifdef WIN64
+/* need a series of two casts to convert HANDLE without compiler warning */
+#define ULONGPID(x) (unsigned long) (unsigned long long) (x)
+#else
+#define ULONGPID(x) (unsigned long) (x)
+#endif
printf(_("running on port %d with pid %lu\n"),
- port, (unsigned long) postmaster_pid);
+ port, ULONGPID(postmaster_pid));
}
else
{
diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql
index 43a9ce971f..4b2afe8bc9 100644
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -1344,6 +1344,7 @@ ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails
CREATE TYPE test_type2 AS (a int, b text);
CREATE TABLE test_tbl2 OF test_type2;
+CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2);
\d test_type2
\d test_tbl2
@@ -1366,6 +1367,39 @@ ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails
ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE;
\d test_type2
\d test_tbl2
+\d test_tbl2_subclass
+
+DROP TABLE test_tbl2_subclass;
CREATE TYPE test_type_empty AS ();
DROP TYPE test_type_empty;
+
+--
+-- typed tables: OF / NOT OF
+--
+
+CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2));
+ALTER TYPE tt_t0 DROP ATTRIBUTE z;
+CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK
+CREATE TABLE tt1 (x int, y bigint); -- wrong base type
+CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod
+CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order
+CREATE TABLE tt4 (x int); -- too few columns
+CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns
+CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent
+CREATE TABLE tt7 (x int, q text, y numeric(8,2)) WITH OIDS;
+ALTER TABLE tt7 DROP q; -- OK
+
+ALTER TABLE tt0 OF tt_t0;
+ALTER TABLE tt1 OF tt_t0;
+ALTER TABLE tt2 OF tt_t0;
+ALTER TABLE tt3 OF tt_t0;
+ALTER TABLE tt4 OF tt_t0;
+ALTER TABLE tt5 OF tt_t0;
+ALTER TABLE tt6 OF tt_t0;
+ALTER TABLE tt7 OF tt_t0;
+
+CREATE TYPE tt_t1 AS (x int, y numeric(8,2));
+ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table
+ALTER TABLE tt7 NOT OF;
+\d tt7
diff --git a/src/test/regress/sql/collate.linux.utf8.sql b/src/test/regress/sql/collate.linux.utf8.sql
index 51d65cf0da..dfb10e4d15 100644
--- a/src/test/regress/sql/collate.linux.utf8.sql
+++ b/src/test/regress/sql/collate.linux.utf8.sql
@@ -256,6 +256,34 @@ FROM collate_test1 a, collate_test1 b
ORDER BY a.b, b.b;
+-- collation override in plpgsql
+
+CREATE FUNCTION mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$
+declare
+ xx text := x;
+ yy text := y;
+begin
+ return xx < yy;
+end
+$$;
+
+SELECT mylt2('a', 'B' collate "en_US") as t, mylt2('a', 'B' collate "C") as f;
+
+CREATE OR REPLACE FUNCTION
+ mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$
+declare
+ xx text COLLATE "POSIX" := x;
+ yy text := y;
+begin
+ return xx < yy;
+end
+$$;
+
+SELECT mylt2('a', 'B') as f;
+SELECT mylt2('a', 'B' collate "C") as fail; -- conflicting collations
+SELECT mylt2('a', 'B' collate "POSIX") as f;
+
+
-- polymorphism
SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test1)) ORDER BY 1;
diff --git a/src/test/regress/sql/collate.sql b/src/test/regress/sql/collate.sql
index 150ad2c5bc..9585852bc5 100644
--- a/src/test/regress/sql/collate.sql
+++ b/src/test/regress/sql/collate.sql
@@ -154,6 +154,9 @@ WITH RECURSIVE foo(x) AS
SELECT (x || 'c') COLLATE "POSIX" FROM foo WHERE length(x) < 10)
SELECT * FROM foo;
+SELECT a, b, a < b as lt FROM
+ (VALUES ('a', 'B'), ('A', 'b' COLLATE "C")) v(a,b);
+
-- casting
diff --git a/src/test/regress/sql/typed_table.sql b/src/test/regress/sql/typed_table.sql
index b0d452c387..25aaccb8bc 100644
--- a/src/test/regress/sql/typed_table.sql
+++ b/src/test/regress/sql/typed_table.sql
@@ -46,6 +46,8 @@ CREATE TABLE persons4 OF person_type (
DROP TYPE person_type RESTRICT;
DROP TYPE person_type CASCADE;
+CREATE TABLE persons5 OF stuff; -- only CREATE TYPE AS types may be used
+
DROP TABLE stuff;
diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl
index eea4a70fe7..971d740a9e 100644
--- a/src/tools/msvc/config_default.pl
+++ b/src/tools/msvc/config_default.pl
@@ -8,7 +8,7 @@ our $config = {
# float4byval=>1, # --disable-float4-byval, on by default
# float8byval=>0, # --disable-float8-byval, off by default
# blocksize => 8, # --with-blocksize, 8kB by default
- # wal_blocksize => 8, # --with-wal-blocksize, 8kb by default
+ # wal_blocksize => 8, # --with-wal-blocksize, 8kB by default
# wal_segsize => 16, # --with-wal-segsize, 16MB by default
ldap=>1, # --with-ldap
nls=>undef, # --enable-nls=<path>
diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl
index ac7e6aac2a..8c920b883a 100644
--- a/src/tools/msvc/vcregress.pl
+++ b/src/tools/msvc/vcregress.pl
@@ -13,7 +13,8 @@ my $startdir = getcwd();
chdir "../../.." if (-d "../../../src/tools/msvc");
-require 'src/tools/msvc/config.pl';
+require 'src/tools/msvc/config_default.pl';
+require 'src/tools/msvc/config.pl' if (-f 'src/tools/msvc/config.pl');
# buildenv.pl is for specifying the build environment settings
# it should contian lines like:
@@ -227,6 +228,14 @@ sub fetchRegressOpts
# ignore anything that isn't an option staring with --
@opts = grep { $_ !~ /\$\(/ && $_ =~ /^--/ } split(/\s+/,$1);
}
+ if ($m =~ /^\s*ENCODING\s*=\s*(\S+)/m)
+ {
+ push @opts, "--encoding=$1";
+ }
+ if ($m =~ /^\s*NO_LOCALE\s*=\s*\S+/m)
+ {
+ push @opts, "--no-locale";
+ }
return @opts;
}