summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavan Deolasee2015-06-05 13:35:08 +0000
committerPavan Deolasee2015-06-05 13:35:08 +0000
commitbbf2fec2f194a40b25561c2d2d62c432b49bdd1a (patch)
treeaef582c5cba2ab7b111c0e20e9ebf71dab32c91f
parent633da80d8080348ae59dcdd1404a061abc8d0ead (diff)
parent38d500ac2e5d4d4f3468b505962fb85850c1ff4b (diff)
Merge remote-tracking branch 'remotes/PGSQL/master' into XL_NEW_MASTER
Conflicts: .gitignore contrib/Makefile src/backend/access/common/heaptuple.c src/backend/access/transam/rmgr.c src/backend/access/transam/xact.c src/backend/catalog/Makefile src/backend/catalog/catalog.c src/backend/catalog/genbki.pl src/backend/catalog/namespace.c src/backend/commands/sequence.c src/backend/executor/execMain.c src/backend/executor/functions.c src/backend/executor/nodeAgg.c src/backend/executor/nodeModifyTable.c src/backend/nodes/copyfuncs.c src/backend/nodes/outfuncs.c src/backend/nodes/readfuncs.c src/backend/optimizer/plan/createplan.c src/backend/optimizer/plan/planner.c src/backend/optimizer/plan/setrefs.c src/backend/optimizer/util/pathnode.c src/backend/parser/gram.y src/backend/parser/parse_agg.c src/backend/parser/parse_utilcmd.c src/backend/postmaster/postmaster.c src/backend/replication/logical/decode.c src/backend/storage/file/fd.c src/backend/storage/ipc/procsignal.c src/backend/tcop/utility.c src/backend/utils/adt/lockfuncs.c src/backend/utils/adt/ruleutils.c src/backend/utils/sort/tuplesort.c src/backend/utils/time/snapmgr.c src/include/access/rmgrlist.h src/include/catalog/pg_aggregate.h src/include/catalog/pg_proc.h src/include/nodes/execnodes.h src/include/nodes/plannodes.h src/include/nodes/primnodes.h src/include/nodes/relation.h src/include/storage/lwlock.h src/include/storage/procsignal.h src/include/utils/plancache.h src/include/utils/snapshot.h src/test/regress/expected/foreign_key.out src/test/regress/expected/triggers.out src/test/regress/expected/with.out src/test/regress/input/constraints.source src/test/regress/output/constraints.source src/test/regress/pg_regress.c src/test/regress/serial_schedule src/test/regress/sql/returning.sql
-rw-r--r--.gitignore2
-rw-r--r--GNUmakefile.in6
-rw-r--r--config/python.m418
-rwxr-xr-xconfigure67
-rw-r--r--configure.in46
-rw-r--r--contrib/Makefile17
-rw-r--r--contrib/btree_gin/btree_gin.c37
-rw-r--r--contrib/btree_gist/btree_utils_num.c2
-rw-r--r--contrib/btree_gist/btree_utils_var.c2
-rw-r--r--contrib/citext/Makefile2
-rw-r--r--contrib/citext/citext--1.0--1.1.sql21
-rw-r--r--contrib/citext/citext--1.1.sql (renamed from contrib/citext/citext--1.0.sql)10
-rw-r--r--contrib/citext/citext.control2
-rw-r--r--contrib/citext/expected/citext.out17
-rw-r--r--contrib/citext/expected/citext_1.out17
-rw-r--r--contrib/citext/sql/citext.sql4
-rw-r--r--contrib/cube/cube.c2
-rw-r--r--contrib/earthdistance/Makefile2
-rw-r--r--contrib/file_fdw/file_fdw.c12
-rw-r--r--contrib/fuzzystrmatch/dmetaphone.c2
-rw-r--r--contrib/hstore/hstore_gin.c2
-rw-r--r--contrib/hstore/hstore_gist.c4
-rw-r--r--contrib/hstore_plperl/.gitignore4
-rw-r--r--contrib/hstore_plperl/Makefile37
-rw-r--r--contrib/hstore_plperl/expected/create_transform.out75
-rw-r--r--contrib/hstore_plperl/expected/hstore_plperl.out48
-rw-r--r--contrib/hstore_plperl/expected/hstore_plperlu.out180
-rw-r--r--contrib/hstore_plperl/hstore_plperl--1.0.sql17
-rw-r--r--contrib/hstore_plperl/hstore_plperl.c88
-rw-r--r--contrib/hstore_plperl/hstore_plperl.control6
-rw-r--r--contrib/hstore_plperl/hstore_plperlu--1.0.sql17
-rw-r--r--contrib/hstore_plperl/hstore_plperlu.control6
-rw-r--r--contrib/hstore_plperl/sql/create_transform.sql49
-rw-r--r--contrib/hstore_plperl/sql/hstore_plperl.sql43
-rw-r--r--contrib/hstore_plperl/sql/hstore_plperlu.sql121
-rw-r--r--contrib/hstore_plpython/.gitignore6
-rw-r--r--contrib/hstore_plpython/Makefile37
-rw-r--r--contrib/hstore_plpython/expected/hstore_plpython.out128
-rw-r--r--contrib/hstore_plpython/hstore_plpython.c114
-rw-r--r--contrib/hstore_plpython/hstore_plpython2u--1.0.sql19
-rw-r--r--contrib/hstore_plpython/hstore_plpython2u.control6
-rw-r--r--contrib/hstore_plpython/hstore_plpython3u--1.0.sql19
-rw-r--r--contrib/hstore_plpython/hstore_plpython3u.control6
-rw-r--r--contrib/hstore_plpython/hstore_plpythonu--1.0.sql19
-rw-r--r--contrib/hstore_plpython/hstore_plpythonu.control6
-rw-r--r--contrib/hstore_plpython/sql/hstore_plpython.sql107
-rw-r--r--contrib/intarray/_int_gin.c3
-rw-r--r--contrib/intarray/_int_gist.c2
-rw-r--r--contrib/intarray/_intbig_gist.c2
-rw-r--r--contrib/isn/isn.c2
-rw-r--r--contrib/ltree/_ltree_gist.c2
-rw-r--r--contrib/ltree/crc32.c5
-rw-r--r--contrib/ltree/ltree_gist.c2
-rw-r--r--contrib/ltree_plpython/.gitignore6
-rw-r--r--contrib/ltree_plpython/Makefile37
-rw-r--r--contrib/ltree_plpython/expected/ltree_plpython.out45
-rw-r--r--contrib/ltree_plpython/ltree_plpython.c31
-rw-r--r--contrib/ltree_plpython/ltree_plpython2u--1.0.sql12
-rw-r--r--contrib/ltree_plpython/ltree_plpython2u.control6
-rw-r--r--contrib/ltree_plpython/ltree_plpython3u--1.0.sql12
-rw-r--r--contrib/ltree_plpython/ltree_plpython3u.control6
-rw-r--r--contrib/ltree_plpython/ltree_plpythonu--1.0.sql12
-rw-r--r--contrib/ltree_plpython/ltree_plpythonu.control6
-rw-r--r--contrib/ltree_plpython/sql/ltree_plpython.sql37
-rw-r--r--contrib/pageinspect/brinfuncs.c40
-rw-r--r--contrib/pageinspect/ginfuncs.c2
-rw-r--r--contrib/pg_buffercache/pg_buffercache_pages.c3
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c64
-rw-r--r--contrib/pg_test_fsync/Makefile18
-rw-r--r--contrib/pg_test_timing/Makefile18
-rw-r--r--contrib/pg_trgm/trgm_gin.c3
-rw-r--r--contrib/pg_trgm/trgm_gist.c3
-rw-r--r--contrib/pgcrypto/expected/pgp-decrypt.out51
-rw-r--r--contrib/pgcrypto/expected/pgp-pubkey-decrypt.out4
-rw-r--r--contrib/pgcrypto/mbuf.c2
-rw-r--r--contrib/pgcrypto/pgp-armor.c2
-rw-r--r--contrib/pgcrypto/pgp-decrypt.c70
-rw-r--r--contrib/pgcrypto/pgp-pgsql.c33
-rw-r--r--contrib/pgcrypto/pgp.h12
-rw-r--r--contrib/pgcrypto/px.c3
-rw-r--r--contrib/pgcrypto/px.h2
-rw-r--r--contrib/pgcrypto/sql/pgp-decrypt.sql45
-rw-r--r--contrib/pgstattuple/Makefile4
-rw-r--r--contrib/pgstattuple/pgstatapprox.c274
-rw-r--r--contrib/pgstattuple/pgstattuple--1.2--1.3.sql18
-rw-r--r--contrib/pgstattuple/pgstattuple--1.3.sql (renamed from contrib/pgstattuple/pgstattuple--1.2.sql)18
-rw-r--r--contrib/pgstattuple/pgstattuple.control2
-rw-r--r--contrib/postgres_fdw/connection.c3
-rw-r--r--contrib/postgres_fdw/deparse.c7
-rw-r--r--contrib/postgres_fdw/expected/postgres_fdw.out57
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c42
-rw-r--r--contrib/postgres_fdw/postgres_fdw.h2
-rw-r--r--contrib/postgres_fdw/sql/postgres_fdw.sql3
-rw-r--r--contrib/seg/seg.c3
-rw-r--r--contrib/sepgsql/dml.c31
-rw-r--r--contrib/spi/insert_username.c2
-rw-r--r--contrib/spi/timetravel.c4
-rw-r--r--contrib/start-scripts/linux2
-rw-r--r--contrib/test_decoding/Makefile19
-rw-r--r--contrib/test_decoding/expected/ddl.out64
-rw-r--r--contrib/test_decoding/expected/decoding_in_xact.out2
-rw-r--r--contrib/test_decoding/expected/replorigin.out141
-rw-r--r--contrib/test_decoding/expected/toast.out9
-rw-r--r--contrib/test_decoding/specs/ondisk_startup.spec2
-rw-r--r--contrib/test_decoding/sql/ddl.sql19
-rw-r--r--contrib/test_decoding/sql/decoding_in_xact.sql2
-rw-r--r--contrib/test_decoding/sql/replorigin.sql64
-rw-r--r--contrib/test_decoding/sql/toast.sql5
-rw-r--r--contrib/test_decoding/test_decoding.c36
-rw-r--r--contrib/tsm_system_rows/.gitignore4
-rw-r--r--contrib/tsm_system_rows/Makefile21
-rw-r--r--contrib/tsm_system_rows/expected/tsm_system_rows.out31
-rw-r--r--contrib/tsm_system_rows/sql/tsm_system_rows.sql14
-rw-r--r--contrib/tsm_system_rows/tsm_system_rows--1.0.sql44
-rw-r--r--contrib/tsm_system_rows/tsm_system_rows.c271
-rw-r--r--contrib/tsm_system_rows/tsm_system_rows.control5
-rw-r--r--contrib/tsm_system_time/.gitignore4
-rw-r--r--contrib/tsm_system_time/Makefile21
-rw-r--r--contrib/tsm_system_time/expected/tsm_system_time.out54
-rw-r--r--contrib/tsm_system_time/sql/tsm_system_time.sql14
-rw-r--r--contrib/tsm_system_time/tsm_system_time--1.0.sql39
-rw-r--r--contrib/tsm_system_time/tsm_system_time.c317
-rw-r--r--contrib/tsm_system_time/tsm_system_time.control5
-rw-r--r--doc/src/sgml/Makefile3
-rw-r--r--doc/src/sgml/backup.sgml32
-rw-r--r--doc/src/sgml/brin.sgml57
-rw-r--r--doc/src/sgml/btree-gin.sgml2
-rw-r--r--doc/src/sgml/btree-gist.sgml4
-rw-r--r--doc/src/sgml/catalogs.sgml433
-rw-r--r--doc/src/sgml/charset.sgml2
-rw-r--r--doc/src/sgml/citext.sgml5
-rw-r--r--doc/src/sgml/client-auth.sgml74
-rw-r--r--doc/src/sgml/config.sgml15
-rw-r--r--doc/src/sgml/contrib.sgml5
-rw-r--r--doc/src/sgml/custom-scan.sgml124
-rw-r--r--doc/src/sgml/datatype.sgml32
-rw-r--r--doc/src/sgml/ddl.sgml2
-rw-r--r--doc/src/sgml/event-trigger.sgml13
-rw-r--r--doc/src/sgml/fdwhandler.sgml363
-rw-r--r--doc/src/sgml/filelist.sgml7
-rw-r--r--doc/src/sgml/func.sgml534
-rw-r--r--doc/src/sgml/gist.sgml55
-rw-r--r--doc/src/sgml/high-availability.sgml40
-rw-r--r--doc/src/sgml/hstore.sgml19
-rw-r--r--doc/src/sgml/indices.sgml2
-rw-r--r--doc/src/sgml/information_schema.sgml85
-rw-r--r--doc/src/sgml/installation.sgml61
-rw-r--r--doc/src/sgml/json.sgml6
-rw-r--r--doc/src/sgml/keywords.sgml7
-rw-r--r--doc/src/sgml/logicaldecoding.sgml45
-rw-r--r--doc/src/sgml/ltree.sgml19
-rw-r--r--doc/src/sgml/maintenance.sgml11
-rw-r--r--doc/src/sgml/mvcc.sgml90
-rw-r--r--doc/src/sgml/pgcrypto.sgml8
-rw-r--r--doc/src/sgml/pgstatstatements.sgml26
-rw-r--r--doc/src/sgml/pgstattuple.sgml136
-rw-r--r--doc/src/sgml/pgtrgm.sgml6
-rw-r--r--doc/src/sgml/plpgsql.sgml14
-rw-r--r--doc/src/sgml/postgres-fdw.sgml25
-rw-r--r--doc/src/sgml/postgres.sgml2
-rw-r--r--doc/src/sgml/protocol.sgml15
-rw-r--r--doc/src/sgml/queries.sgml175
-rw-r--r--doc/src/sgml/rangetypes.sgml6
-rw-r--r--doc/src/sgml/ref/allfiles.sgml5
-rw-r--r--doc/src/sgml/ref/alter_extension.sgml21
-rw-r--r--doc/src/sgml/ref/alter_foreign_table.sgml4
-rw-r--r--doc/src/sgml/ref/alter_policy.sgml2
-rw-r--r--doc/src/sgml/ref/alter_table.sgml26
-rw-r--r--doc/src/sgml/ref/comment.sgml22
-rw-r--r--doc/src/sgml/ref/create_foreign_table.sgml4
-rw-r--r--doc/src/sgml/ref/create_function.sgml18
-rw-r--r--doc/src/sgml/ref/create_index.sgml2
-rw-r--r--doc/src/sgml/ref/create_policy.sgml73
-rw-r--r--doc/src/sgml/ref/create_rule.sgml6
-rw-r--r--doc/src/sgml/ref/create_table.sgml4
-rw-r--r--doc/src/sgml/ref/create_tablespace.sgml2
-rw-r--r--doc/src/sgml/ref/create_transform.sgml207
-rw-r--r--doc/src/sgml/ref/create_trigger.sgml5
-rw-r--r--doc/src/sgml/ref/create_view.sgml9
-rw-r--r--doc/src/sgml/ref/drop_owned.sgml2
-rw-r--r--doc/src/sgml/ref/drop_transform.sgml123
-rw-r--r--doc/src/sgml/ref/insert.sgml404
-rw-r--r--doc/src/sgml/ref/lock.sgml8
-rw-r--r--doc/src/sgml/ref/pg_basebackup.sgml14
-rw-r--r--doc/src/sgml/ref/pg_dumpall.sgml4
-rw-r--r--doc/src/sgml/ref/pg_xlogdump.sgml (renamed from doc/src/sgml/pg_xlogdump.sgml)0
-rw-r--r--doc/src/sgml/ref/pgtestfsync.sgml (renamed from doc/src/sgml/pgtestfsync.sgml)10
-rw-r--r--doc/src/sgml/ref/pgtesttiming.sgml (renamed from doc/src/sgml/pgtesttiming.sgml)10
-rw-r--r--doc/src/sgml/ref/reassign_owned.sgml3
-rw-r--r--doc/src/sgml/ref/reindex.sgml11
-rw-r--r--doc/src/sgml/ref/reindexdb.sgml19
-rw-r--r--doc/src/sgml/ref/select.sgml94
-rw-r--r--doc/src/sgml/reference.sgml5
-rw-r--r--doc/src/sgml/release-9.0.sgml538
-rw-r--r--doc/src/sgml/release-9.1.sgml614
-rw-r--r--doc/src/sgml/release-9.2.sgml677
-rw-r--r--doc/src/sgml/release-9.3.sgml698
-rw-r--r--doc/src/sgml/release-9.4.sgml1424
-rw-r--r--doc/src/sgml/replication-origins.sgml93
-rw-r--r--doc/src/sgml/rules.sgml11
-rw-r--r--doc/src/sgml/storage.sgml42
-rw-r--r--doc/src/sgml/stylesheet-fo.xsl7
-rw-r--r--doc/src/sgml/tablesample-method.sgml139
-rw-r--r--doc/src/sgml/textsearch.sgml12
-rw-r--r--doc/src/sgml/trigger.sgml52
-rw-r--r--doc/src/sgml/tsm-system-rows.sgml50
-rw-r--r--doc/src/sgml/tsm-system-time.sgml51
-rw-r--r--doc/src/sgml/xfunc.sgml5
-rw-r--r--doc/src/sgml/xtypes.sgml71
-rw-r--r--src/Makefile.global.in34
-rw-r--r--src/Makefile.shlib2
-rw-r--r--src/backend/access/Makefile3
-rw-r--r--src/backend/access/brin/Makefile2
-rw-r--r--src/backend/access/brin/brin.c107
-rw-r--r--src/backend/access/brin/brin_inclusion.c696
-rw-r--r--src/backend/access/brin/brin_minmax.c169
-rw-r--r--src/backend/access/brin/brin_pageops.c8
-rw-r--r--src/backend/access/brin/brin_revmap.c18
-rw-r--r--src/backend/access/brin/brin_tuple.c23
-rw-r--r--src/backend/access/common/heaptuple.c48
-rw-r--r--src/backend/access/gin/ginarrayproc.c2
-rw-r--r--src/backend/access/gin/ginbtree.c2
-rw-r--r--src/backend/access/gin/ginget.c3
-rw-r--r--src/backend/access/gin/ginpostinglist.c14
-rw-r--r--src/backend/access/gin/ginutil.c2
-rw-r--r--src/backend/access/gin/ginxlog.c2
-rw-r--r--src/backend/access/gist/gist.c2
-rw-r--r--src/backend/access/gist/gistget.c81
-rw-r--r--src/backend/access/gist/gistproc.c39
-rw-r--r--src/backend/access/gist/gistscan.c29
-rw-r--r--src/backend/access/gist/gistutil.c2
-rw-r--r--src/backend/access/heap/README.tuplock20
-rw-r--r--src/backend/access/heap/heapam.c588
-rw-r--r--src/backend/access/heap/hio.c27
-rw-r--r--src/backend/access/heap/rewriteheap.c2
-rw-r--r--src/backend/access/heap/tuptoaster.c44
-rw-r--r--src/backend/access/index/genam.c20
-rw-r--r--src/backend/access/nbtree/README2
-rw-r--r--src/backend/access/nbtree/nbtinsert.c31
-rw-r--r--src/backend/access/nbtree/nbtpage.c11
-rw-r--r--src/backend/access/nbtree/nbtree.c5
-rw-r--r--src/backend/access/nbtree/nbtsearch.c10
-rw-r--r--src/backend/access/nbtree/nbtsort.c2
-rw-r--r--src/backend/access/nbtree/nbtutils.c6
-rw-r--r--src/backend/access/rmgrdesc/Makefile6
-rw-r--r--src/backend/access/rmgrdesc/barrierdesc.c32
-rw-r--r--src/backend/access/rmgrdesc/committsdesc.c8
-rw-r--r--src/backend/access/rmgrdesc/heapdesc.c9
-rw-r--r--src/backend/access/rmgrdesc/replorigindesc.c63
-rw-r--r--src/backend/access/rmgrdesc/smgrdesc.c2
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c39
-rw-r--r--src/backend/access/rmgrdesc/xlogdesc.c4
-rw-r--r--src/backend/access/spgist/spgkdtreeproc.c2
-rw-r--r--src/backend/access/spgist/spgquadtreeproc.c2
-rw-r--r--src/backend/access/spgist/spgscan.c1
-rw-r--r--src/backend/access/tablesample/Makefile17
-rw-r--r--src/backend/access/tablesample/bernoulli.c233
-rw-r--r--src/backend/access/tablesample/system.c186
-rw-r--r--src/backend/access/tablesample/tablesample.c371
-rw-r--r--src/backend/access/transam/Makefile2
-rw-r--r--src/backend/access/transam/README.parallel223
-rw-r--r--src/backend/access/transam/commit_ts.c125
-rw-r--r--src/backend/access/transam/multixact.c396
-rw-r--r--src/backend/access/transam/parallel.c1010
-rw-r--r--src/backend/access/transam/rmgr.c1
-rw-r--r--src/backend/access/transam/twophase.c47
-rw-r--r--src/backend/access/transam/varsup.c7
-rw-r--r--src/backend/access/transam/xact.c647
-rw-r--r--src/backend/access/transam/xlog.c773
-rw-r--r--src/backend/access/transam/xlogarchive.c40
-rw-r--r--src/backend/access/transam/xlogfuncs.c12
-rw-r--r--src/backend/access/transam/xloginsert.c51
-rw-r--r--src/backend/access/transam/xlogreader.c33
-rw-r--r--src/backend/bootstrap/bootparse.y1
-rw-r--r--src/backend/bootstrap/bootscanner.l1
-rw-r--r--src/backend/bootstrap/bootstrap.c14
-rw-r--r--src/backend/catalog/Catalog.pm3
-rw-r--r--src/backend/catalog/Makefile6
-rw-r--r--src/backend/catalog/aclchk.c53
-rw-r--r--src/backend/catalog/catalog.c10
-rw-r--r--src/backend/catalog/dependency.c30
-rw-r--r--src/backend/catalog/genbki.pl17
-rw-r--r--src/backend/catalog/index.c74
-rw-r--r--src/backend/catalog/indexing.c2
-rw-r--r--src/backend/catalog/information_schema.sql34
-rw-r--r--src/backend/catalog/namespace.c13
-rw-r--r--src/backend/catalog/objectaddress.c436
-rw-r--r--src/backend/catalog/pg_aggregate.c1
-rw-r--r--src/backend/catalog/pg_enum.c2
-rw-r--r--src/backend/catalog/pg_proc.c46
-rw-r--r--src/backend/catalog/pg_shdepend.c8
-rw-r--r--src/backend/catalog/pg_type.c2
-rw-r--r--src/backend/catalog/sql_features.txt10
-rw-r--r--src/backend/catalog/storage.c2
-rw-r--r--src/backend/catalog/system_views.sql22
-rw-r--r--src/backend/catalog/toasting.c12
-rw-r--r--src/backend/commands/analyze.c226
-rw-r--r--src/backend/commands/cluster.c2
-rw-r--r--src/backend/commands/constraint.c19
-rw-r--r--src/backend/commands/copy.c41
-rw-r--r--src/backend/commands/createas.c4
-rw-r--r--src/backend/commands/dbcommands.c8
-rw-r--r--src/backend/commands/dropcmds.c12
-rw-r--r--src/backend/commands/event_trigger.c727
-rw-r--r--src/backend/commands/explain.c268
-rw-r--r--src/backend/commands/functioncmds.c354
-rw-r--r--src/backend/commands/indexcmds.c27
-rw-r--r--src/backend/commands/lockcmds.c12
-rw-r--r--src/backend/commands/matview.c2
-rw-r--r--src/backend/commands/opclasscmds.c63
-rw-r--r--src/backend/commands/policy.c260
-rw-r--r--src/backend/commands/proclang.c3
-rw-r--r--src/backend/commands/schemacmds.c16
-rw-r--r--src/backend/commands/sequence.c20
-rw-r--r--src/backend/commands/tablecmds.c114
-rw-r--r--src/backend/commands/tablespace.c6
-rw-r--r--src/backend/commands/trigger.c47
-rw-r--r--src/backend/commands/tsearchcmds.c5
-rw-r--r--src/backend/commands/typecmds.c43
-rw-r--r--src/backend/commands/user.c27
-rw-r--r--src/backend/commands/vacuum.c23
-rw-r--r--src/backend/commands/vacuumlazy.c13
-rw-r--r--src/backend/executor/Makefile6
-rw-r--r--src/backend/executor/execAmi.c16
-rw-r--r--src/backend/executor/execCurrent.c1
-rw-r--r--src/backend/executor/execIndexing.c897
-rw-r--r--src/backend/executor/execMain.c373
-rw-r--r--src/backend/executor/execProcnode.c14
-rw-r--r--src/backend/executor/execQual.c75
-rw-r--r--src/backend/executor/execScan.c17
-rw-r--r--src/backend/executor/execTuples.c47
-rw-r--r--src/backend/executor/execUtils.c556
-rw-r--r--src/backend/executor/functions.c3
-rw-r--r--src/backend/executor/nodeAgg.c1394
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c6
-rw-r--r--src/backend/executor/nodeCustom.c61
-rw-r--r--src/backend/executor/nodeForeignscan.c50
-rw-r--r--src/backend/executor/nodeGroup.c7
-rw-r--r--src/backend/executor/nodeHash.c58
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c23
-rw-r--r--src/backend/executor/nodeIndexscan.c411
-rw-r--r--src/backend/executor/nodeLockRows.c146
-rw-r--r--src/backend/executor/nodeMaterial.c12
-rw-r--r--src/backend/executor/nodeMergeAppend.c8
-rw-r--r--src/backend/executor/nodeMergejoin.c4
-rw-r--r--src/backend/executor/nodeModifyTable.c517
-rw-r--r--src/backend/executor/nodeSamplescan.c257
-rw-r--r--src/backend/executor/nodeSort.c8
-rw-r--r--src/backend/executor/nodeSubqueryscan.c8
-rw-r--r--src/backend/executor/nodeWindowAgg.c5
-rw-r--r--src/backend/executor/spi.c49
-rw-r--r--src/backend/foreign/foreign.c49
-rw-r--r--src/backend/lib/Makefile3
-rw-r--r--src/backend/lib/bipartite_match.c163
-rw-r--r--src/backend/lib/hyperloglog.c6
-rw-r--r--src/backend/lib/pairingheap.c4
-rw-r--r--src/backend/libpq/auth.c11
-rw-r--r--src/backend/libpq/be-secure-openssl.c38
-rw-r--r--src/backend/libpq/be-secure.c14
-rw-r--r--src/backend/libpq/hba.c13
-rw-r--r--src/backend/libpq/pqcomm.c29
-rw-r--r--src/backend/libpq/pqmq.c64
-rw-r--r--src/backend/nodes/copyfuncs.c211
-rw-r--r--src/backend/nodes/equalfuncs.c153
-rw-r--r--src/backend/nodes/list.c6
-rw-r--r--src/backend/nodes/makefuncs.c15
-rw-r--r--src/backend/nodes/nodeFuncs.c169
-rw-r--r--src/backend/nodes/outfuncs.c133
-rw-r--r--src/backend/nodes/readfuncs.c138
-rw-r--r--src/backend/optimizer/README11
-rw-r--r--src/backend/optimizer/geqo/geqo_erx.c2
-rw-r--r--src/backend/optimizer/path/allpaths.c107
-rw-r--r--src/backend/optimizer/path/costsize.c191
-rw-r--r--src/backend/optimizer/path/equivclass.c2
-rw-r--r--src/backend/optimizer/path/indxpath.c7
-rw-r--r--src/backend/optimizer/path/joinpath.c348
-rw-r--r--src/backend/optimizer/path/pathkeys.c2
-rw-r--r--src/backend/optimizer/plan/analyzejoins.c28
-rw-r--r--src/backend/optimizer/plan/createplan.c199
-rw-r--r--src/backend/optimizer/plan/initsplan.c7
-rw-r--r--src/backend/optimizer/plan/planagg.c2
-rw-r--r--src/backend/optimizer/plan/planner.c927
-rw-r--r--src/backend/optimizer/plan/setrefs.c308
-rw-r--r--src/backend/optimizer/plan/subselect.c64
-rw-r--r--src/backend/optimizer/prep/prepjointree.c7
-rw-r--r--src/backend/optimizer/prep/prepsecurity.c6
-rw-r--r--src/backend/optimizer/prep/preptlist.c47
-rw-r--r--src/backend/optimizer/prep/prepunion.c15
-rw-r--r--src/backend/optimizer/util/clauses.c97
-rw-r--r--src/backend/optimizer/util/pathnode.c125
-rw-r--r--src/backend/optimizer/util/plancat.c369
-rw-r--r--src/backend/optimizer/util/predtest.c2
-rw-r--r--src/backend/optimizer/util/relnode.c15
-rw-r--r--src/backend/optimizer/util/tlist.c22
-rw-r--r--src/backend/optimizer/util/var.c25
-rw-r--r--src/backend/parser/analyze.c158
-rw-r--r--src/backend/parser/gram.y429
-rw-r--r--src/backend/parser/parse_agg.c731
-rw-r--r--src/backend/parser/parse_clause.c760
-rw-r--r--src/backend/parser/parse_coerce.c16
-rw-r--r--src/backend/parser/parse_collate.c2
-rw-r--r--src/backend/parser/parse_expr.c5
-rw-r--r--src/backend/parser/parse_func.c146
-rw-r--r--src/backend/parser/parse_relation.c77
-rw-r--r--src/backend/parser/parse_target.c15
-rw-r--r--src/backend/parser/parse_type.c2
-rw-r--r--src/backend/parser/parse_utilcmd.c23
-rw-r--r--src/backend/pgxc/barrier/barrier.c9
-rw-r--r--src/backend/port/atomics.c17
-rw-r--r--src/backend/port/sysv_shmem.c2
-rw-r--r--src/backend/port/win32_latch.c2
-rw-r--r--src/backend/port/win32_sema.c1
-rw-r--r--src/backend/postmaster/autovacuum.c30
-rw-r--r--src/backend/postmaster/bgworker.c73
-rw-r--r--src/backend/postmaster/bgwriter.c2
-rw-r--r--src/backend/postmaster/pgstat.c2
-rw-r--r--src/backend/postmaster/postmaster.c54
-rw-r--r--src/backend/replication/basebackup.c169
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c14
-rw-r--r--src/backend/replication/logical/Makefile3
-rw-r--r--src/backend/replication/logical/decode.c119
-rw-r--r--src/backend/replication/logical/logical.c39
-rw-r--r--src/backend/replication/logical/logicalfuncs.c4
-rw-r--r--src/backend/replication/logical/origin.c1487
-rw-r--r--src/backend/replication/logical/reorderbuffer.c181
-rw-r--r--src/backend/replication/logical/snapbuild.c48
-rw-r--r--src/backend/replication/repl_gram.y16
-rw-r--r--src/backend/replication/repl_scanner.l1
-rw-r--r--src/backend/replication/slot.c40
-rw-r--r--src/backend/replication/slotfuncs.c19
-rw-r--r--src/backend/replication/walreceiver.c10
-rw-r--r--src/backend/replication/walreceiverfuncs.c2
-rw-r--r--src/backend/replication/walsender.c11
-rw-r--r--src/backend/rewrite/rewriteDefine.c1
-rw-r--r--src/backend/rewrite/rewriteHandler.c272
-rw-r--r--src/backend/rewrite/rewriteManip.c38
-rw-r--r--src/backend/rewrite/rowsecurity.c452
-rw-r--r--src/backend/snowball/dict_snowball.c2
-rw-r--r--src/backend/storage/buffer/buf_init.c8
-rw-r--r--src/backend/storage/buffer/bufmgr.c48
-rw-r--r--src/backend/storage/buffer/freelist.c15
-rw-r--r--src/backend/storage/file/fd.c285
-rw-r--r--src/backend/storage/file/reinit.c10
-rw-r--r--src/backend/storage/ipc/dsm.c2
-rw-r--r--src/backend/storage/ipc/dsm_impl.c8
-rw-r--r--src/backend/storage/ipc/ipci.c3
-rw-r--r--src/backend/storage/ipc/procarray.c52
-rw-r--r--src/backend/storage/ipc/procsignal.c3
-rw-r--r--src/backend/storage/ipc/shm_mq.c22
-rw-r--r--src/backend/storage/ipc/sinval.c6
-rw-r--r--src/backend/storage/lmgr/README6
-rw-r--r--src/backend/storage/lmgr/lmgr.c91
-rw-r--r--src/backend/storage/lmgr/lwlock.c76
-rw-r--r--src/backend/storage/lmgr/predicate.c12
-rw-r--r--src/backend/storage/lmgr/proc.c1
-rw-r--r--src/backend/storage/page/bufpage.c10
-rw-r--r--src/backend/storage/page/itemptr.c7
-rw-r--r--src/backend/storage/smgr/md.c4
-rw-r--r--src/backend/tcop/postgres.c24
-rw-r--r--src/backend/tcop/utility.c346
-rw-r--r--src/backend/tsearch/dict_synonym.c2
-rw-r--r--src/backend/tsearch/spell.c4
-rw-r--r--src/backend/utils/adt/Makefile9
-rw-r--r--src/backend/utils/adt/acl.c6
-rw-r--r--src/backend/utils/adt/array_expanded.c455
-rw-r--r--src/backend/utils/adt/array_selfuncs.c4
-rw-r--r--src/backend/utils/adt/array_typanalyze.c2
-rw-r--r--src/backend/utils/adt/array_userfuncs.c124
-rw-r--r--src/backend/utils/adt/arrayfuncs.c993
-rw-r--r--src/backend/utils/adt/cash.c2
-rw-r--r--src/backend/utils/adt/datum.c89
-rw-r--r--src/backend/utils/adt/enum.c15
-rw-r--r--src/backend/utils/adt/expandeddatum.c163
-rw-r--r--src/backend/utils/adt/format_type.c3
-rw-r--r--src/backend/utils/adt/formatting.c60
-rw-r--r--src/backend/utils/adt/geo_ops.c66
-rw-r--r--src/backend/utils/adt/json.c6
-rw-r--r--src/backend/utils/adt/jsonb.c182
-rw-r--r--src/backend/utils/adt/jsonb_gin.c2
-rw-r--r--src/backend/utils/adt/jsonb_util.c49
-rw-r--r--src/backend/utils/adt/jsonfuncs.c741
-rw-r--r--src/backend/utils/adt/levenshtein.c4
-rw-r--r--src/backend/utils/adt/lockfuncs.c59
-rw-r--r--src/backend/utils/adt/misc.c10
-rw-r--r--src/backend/utils/adt/name.c4
-rw-r--r--src/backend/utils/adt/network.c52
-rw-r--r--src/backend/utils/adt/network_gist.c32
-rw-r--r--src/backend/utils/adt/numeric.c59
-rw-r--r--src/backend/utils/adt/orderedsetaggs.c4
-rw-r--r--src/backend/utils/adt/pg_locale.c74
-rw-r--r--src/backend/utils/adt/pg_upgrade_support.c26
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c198
-rw-r--r--src/backend/utils/adt/pseudotypes.c65
-rw-r--r--src/backend/utils/adt/rangetypes.c54
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c2
-rw-r--r--src/backend/utils/adt/rangetypes_selfuncs.c4
-rw-r--r--src/backend/utils/adt/rangetypes_spgist.c32
-rw-r--r--src/backend/utils/adt/regexp.c5
-rw-r--r--src/backend/utils/adt/regproc.c204
-rw-r--r--src/backend/utils/adt/ri_triggers.c16
-rw-r--r--src/backend/utils/adt/ruleutils.c505
-rw-r--r--src/backend/utils/adt/selfuncs.c17
-rw-r--r--src/backend/utils/adt/timestamp.c6
-rw-r--r--src/backend/utils/adt/tsginidx.c2
-rw-r--r--src/backend/utils/adt/tsquery_gist.c2
-rw-r--r--src/backend/utils/adt/tsquery_op.c5
-rw-r--r--src/backend/utils/adt/tsvector_op.c4
-rw-r--r--src/backend/utils/adt/txid.c6
-rw-r--r--src/backend/utils/adt/varlena.c188
-rw-r--r--src/backend/utils/adt/xml.c4
-rw-r--r--src/backend/utils/cache/catcache.c4
-rw-r--r--src/backend/utils/cache/inval.c13
-rw-r--r--src/backend/utils/cache/lsyscache.c98
-rw-r--r--src/backend/utils/cache/plancache.c6
-rw-r--r--src/backend/utils/cache/relcache.c16
-rw-r--r--src/backend/utils/cache/syscache.c69
-rw-r--r--src/backend/utils/error/elog.c4
-rw-r--r--src/backend/utils/fmgr/dfmgr.c54
-rw-r--r--src/backend/utils/fmgr/funcapi.c42
-rw-r--r--src/backend/utils/init/miscinit.c30
-rw-r--r--src/backend/utils/init/postinit.c2
-rw-r--r--src/backend/utils/mb/Unicode/ISO10646-GB18030.TXT63488
-rw-r--r--src/backend/utils/mb/Unicode/Makefile8
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_BIG5.pl4
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl4
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl8
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl4
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl4
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl4
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_GB18030.pl85
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl8
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_SJIS.pl4
-rw-r--r--src/backend/utils/mb/Unicode/UCS_to_most.pl4
-rw-r--r--src/backend/utils/mb/Unicode/big5_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/euc_cn_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/euc_jis_2004_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/euc_jis_2004_to_utf8_combined.map2
-rw-r--r--src/backend/utils/mb/Unicode/euc_jp_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/euc_kr_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/euc_tw_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/gb-18030-2000.xml30916
-rw-r--r--src/backend/utils/mb/Unicode/gb18030_to_utf8.map32633
-rw-r--r--src/backend/utils/mb/Unicode/gbk_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_10_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_13_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_14_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_15_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_16_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_2_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_3_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_4_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_5_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_6_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_7_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_8_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/iso8859_9_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/johab_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/koi8r_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/koi8u_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/shift_jis_2004_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/shift_jis_2004_to_utf8_combined.map2
-rw-r--r--src/backend/utils/mb/Unicode/sjis_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/uhc_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_big5.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_euc_cn.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_euc_jis_2004.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_euc_jis_2004_combined.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_euc_jp.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_euc_kr.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_euc_tw.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_gb18030.map32631
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_gbk.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_10.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_13.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_14.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_15.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_16.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_2.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_3.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_4.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_5.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_6.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_7.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_8.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_iso8859_9.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_johab.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_koi8r.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_koi8u.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_shift_jis_2004.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_shift_jis_2004_combined.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_sjis.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_uhc.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1250.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1251.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1252.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1253.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1254.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1255.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1256.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1257.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win1258.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win866.map2
-rw-r--r--src/backend/utils/mb/Unicode/utf8_to_win874.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1250_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1251_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1252_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1253_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1254_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1255_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1256_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1257_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win1258_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win866_to_utf8.map2
-rw-r--r--src/backend/utils/mb/Unicode/win874_to_utf8.map2
-rw-r--r--src/backend/utils/mb/conv.c327
-rw-r--r--src/backend/utils/mb/conversion_procs/ascii_and_mic/ascii_and_mic.c3
-rw-r--r--src/backend/utils/mb/conversion_procs/cyrillic_and_mic/cyrillic_and_mic.c21
-rw-r--r--src/backend/utils/mb/conversion_procs/euc2004_sjis2004/euc2004_sjis2004.c3
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_cn_and_mic/euc_cn_and_mic.c3
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c7
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_kr_and_mic/euc_kr_and_mic.c3
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c14
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c7
-rw-r--r--src/backend/utils/mb/conversion_procs/latin2_and_win1250/latin2_and_win1250.c7
-rw-r--r--src/backend/utils/mb/conversion_procs/latin_and_mic/latin_and_mic.c7
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_ascii/utf8_and_ascii.c3
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c34
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc2004/utf8_and_euc2004.c21
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c170
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_gbk/utf8_and_gbk.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c83
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c3
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_sjis/utf8_and_sjis.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_sjis2004/utf8_and_sjis2004.c21
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_uhc/utf8_and_uhc.c17
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c75
-rw-r--r--src/backend/utils/mb/wchar.c52
-rw-r--r--src/backend/utils/misc/Makefile2
-rw-r--r--src/backend/utils/misc/guc-file.l58
-rw-r--r--src/backend/utils/misc/guc.c293
-rw-r--r--src/backend/utils/misc/postgresql.conf.sample2
-rw-r--r--src/backend/utils/misc/rls.c35
-rw-r--r--src/backend/utils/misc/sampling.c285
-rw-r--r--src/backend/utils/mmgr/mcxt.c4
-rw-r--r--src/backend/utils/sort/sortsupport.c4
-rw-r--r--src/backend/utils/sort/tuplesort.c230
-rw-r--r--src/backend/utils/time/combocid.c77
-rw-r--r--src/backend/utils/time/snapmgr.c227
-rw-r--r--src/backend/utils/time/tqual.c32
-rw-r--r--src/bin/Makefile3
-rw-r--r--src/bin/initdb/Makefile2
-rw-r--r--src/bin/initdb/initdb.c309
-rw-r--r--src/bin/initdb/nls.mk2
-rw-r--r--src/bin/initdb/t/001_initdb.pl48
-rw-r--r--src/bin/pg_archivecleanup/pg_archivecleanup.c2
-rw-r--r--src/bin/pg_basebackup/Makefile2
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c24
-rw-r--r--src/bin/pg_basebackup/pg_receivexlog.c49
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c6
-rw-r--r--src/bin/pg_basebackup/receivelog.c115
-rw-r--r--src/bin/pg_basebackup/receivelog.h2
-rw-r--r--src/bin/pg_basebackup/streamutil.c14
-rw-r--r--src/bin/pg_basebackup/streamutil.h12
-rw-r--r--src/bin/pg_basebackup/t/010_pg_basebackup.pl26
-rw-r--r--src/bin/pg_config/Makefile2
-rw-r--r--src/bin/pg_controldata/Makefile2
-rw-r--r--src/bin/pg_ctl/Makefile2
-rw-r--r--src/bin/pg_ctl/pg_ctl.c35
-rw-r--r--src/bin/pg_ctl/t/001_start_stop.pl4
-rw-r--r--src/bin/pg_ctl/t/002_status.pl2
-rw-r--r--src/bin/pg_dump/common.c5
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c2
-rw-r--r--src/bin/pg_dump/pg_dump.c375
-rw-r--r--src/bin/pg_dump/pg_dump.h13
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c11
-rw-r--r--src/bin/pg_dump/pg_dumpall.c4
-rw-r--r--src/bin/pg_resetxlog/nls.mk2
-rw-r--r--src/bin/pg_resetxlog/pg_resetxlog.c11
-rw-r--r--src/bin/pg_rewind/Makefile3
-rw-r--r--src/bin/pg_rewind/RewindTest.pm200
-rw-r--r--src/bin/pg_rewind/copy_fetch.c26
-rw-r--r--src/bin/pg_rewind/filemap.c19
-rw-r--r--src/bin/pg_rewind/filemap.h28
-rw-r--r--src/bin/pg_rewind/parsexlog.c10
-rw-r--r--src/bin/pg_rewind/pg_rewind.c8
-rw-r--r--src/bin/pg_rewind/t/001_basic.pl20
-rw-r--r--src/bin/pg_rewind/t/002_databases.pl8
-rw-r--r--src/bin/pg_rewind/t/003_extrafiles.pl56
-rw-r--r--src/bin/pg_test_fsync/.gitignore (renamed from contrib/pg_test_fsync/.gitignore)0
-rw-r--r--src/bin/pg_test_fsync/Makefile27
-rw-r--r--src/bin/pg_test_fsync/pg_test_fsync.c (renamed from contrib/pg_test_fsync/pg_test_fsync.c)0
-rw-r--r--src/bin/pg_test_timing/.gitignore (renamed from contrib/pg_test_timing/.gitignore)0
-rw-r--r--src/bin/pg_test_timing/Makefile27
-rw-r--r--src/bin/pg_test_timing/pg_test_timing.c (renamed from contrib/pg_test_timing/pg_test_timing.c)0
-rw-r--r--src/bin/pg_upgrade/Makefile2
-rw-r--r--src/bin/pg_upgrade/check.c93
-rw-r--r--src/bin/pg_upgrade/controldata.c54
-rw-r--r--src/bin/pg_upgrade/dump.c8
-rw-r--r--src/bin/pg_upgrade/info.c160
-rw-r--r--src/bin/pg_upgrade/option.c12
-rw-r--r--src/bin/pg_upgrade/parallel.c2
-rw-r--r--src/bin/pg_upgrade/pg_upgrade.c15
-rw-r--r--src/bin/pg_upgrade/pg_upgrade.h9
-rw-r--r--src/bin/pg_upgrade/relfilenode.c8
-rw-r--r--src/bin/pg_upgrade/server.c11
-rw-r--r--src/bin/pg_upgrade/test.sh2
-rw-r--r--src/bin/pg_upgrade/version.c4
-rw-r--r--src/bin/pg_xlogdump/.gitignore (renamed from contrib/pg_xlogdump/.gitignore)3
-rw-r--r--src/bin/pg_xlogdump/Makefile (renamed from contrib/pg_xlogdump/Makefile)35
-rw-r--r--src/bin/pg_xlogdump/compat.c (renamed from contrib/pg_xlogdump/compat.c)2
-rw-r--r--src/bin/pg_xlogdump/pg_xlogdump.c (renamed from contrib/pg_xlogdump/pg_xlogdump.c)81
-rw-r--r--src/bin/pg_xlogdump/rmgrdesc.c (renamed from contrib/pg_xlogdump/rmgrdesc.c)6
-rw-r--r--src/bin/pg_xlogdump/rmgrdesc.h (renamed from contrib/pg_xlogdump/rmgrdesc.h)2
-rw-r--r--src/bin/pgbench/pgbench.c142
-rw-r--r--src/bin/pgbench/pgbench.h34
-rw-r--r--src/bin/psql/command.c31
-rw-r--r--src/bin/psql/common.c9
-rw-r--r--src/bin/psql/common.h2
-rw-r--r--src/bin/psql/copy.c2
-rw-r--r--src/bin/psql/describe.c36
-rw-r--r--src/bin/psql/help.c40
-rw-r--r--src/bin/psql/print.c62
-rw-r--r--src/bin/psql/print.h8
-rw-r--r--src/bin/psql/startup.c2
-rw-r--r--src/bin/psql/tab-complete.c56
-rw-r--r--src/bin/scripts/Makefile2
-rw-r--r--src/bin/scripts/common.c2
-rw-r--r--src/bin/scripts/reindexdb.c52
-rw-r--r--src/bin/scripts/t/090_reindexdb.pl6
-rw-r--r--src/bin/scripts/t/102_vacuumdb_stages.pl2
-rw-r--r--src/bin/scripts/vacuumdb.c4
-rw-r--r--src/common/restricted_token.c2
-rw-r--r--src/include/access/attnum.h2
-rw-r--r--src/include/access/brin_internal.h13
-rw-r--r--src/include/access/brin_page.h4
-rw-r--r--src/include/access/commit_ts.h20
-rw-r--r--src/include/access/gin.h2
-rw-r--r--src/include/access/gin_private.h2
-rw-r--r--src/include/access/gist.h20
-rw-r--r--src/include/access/gist_private.h5
-rw-r--r--src/include/access/hash.h2
-rw-r--r--src/include/access/heapam.h9
-rw-r--r--src/include/access/heapam_xlog.h54
-rw-r--r--src/include/access/hio.h2
-rw-r--r--src/include/access/htup_details.h36
-rw-r--r--src/include/access/multixact.h1
-rw-r--r--src/include/access/nbtree.h2
-rw-r--r--src/include/access/parallel.h68
-rw-r--r--src/include/access/relscan.h16
-rw-r--r--src/include/access/rewriteheap.h2
-rw-r--r--src/include/access/rmgrlist.h3
-rw-r--r--src/include/access/skey.h23
-rw-r--r--src/include/access/stratnum.h75
-rw-r--r--src/include/access/tablesample.h61
-rw-r--r--src/include/access/xact.h81
-rw-r--r--src/include/access/xlog.h23
-rw-r--r--src/include/access/xlog_internal.h26
-rw-r--r--src/include/access/xlogdefs.h6
-rw-r--r--src/include/access/xloginsert.h16
-rw-r--r--src/include/access/xlogreader.h3
-rw-r--r--src/include/access/xlogrecord.h19
-rw-r--r--src/include/access/xlogutils.h10
-rw-r--r--src/include/bootstrap/bootstrap.h2
-rw-r--r--src/include/c.h2
-rw-r--r--src/include/catalog/binary_upgrade.h2
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/catalog/dependency.h1
-rw-r--r--src/include/catalog/index.h8
-rw-r--r--src/include/catalog/indexing.h20
-rw-r--r--src/include/catalog/namespace.h2
-rw-r--r--src/include/catalog/objectaddress.h2
-rw-r--r--src/include/catalog/opfam_internal.h28
-rw-r--r--src/include/catalog/pg_aggregate.h56
-rw-r--r--src/include/catalog/pg_am.h3
-rw-r--r--src/include/catalog/pg_amop.h449
-rw-r--r--src/include/catalog/pg_amproc.h517
-rw-r--r--src/include/catalog/pg_attribute.h12
-rw-r--r--src/include/catalog/pg_cast.h17
-rw-r--r--src/include/catalog/pg_class.h4
-rw-r--r--src/include/catalog/pg_control.h6
-rw-r--r--src/include/catalog/pg_description.h2
-rw-r--r--src/include/catalog/pg_extension.h2
-rw-r--r--src/include/catalog/pg_largeobject.h2
-rw-r--r--src/include/catalog/pg_opclass.h60
-rw-r--r--src/include/catalog/pg_operator.h16
-rw-r--r--src/include/catalog/pg_opfamily.h3
-rw-r--r--src/include/catalog/pg_pltemplate.h5
-rw-r--r--src/include/catalog/pg_policy.h20
-rw-r--r--src/include/catalog/pg_proc.h5558
-rw-r--r--src/include/catalog/pg_proc_fn.h3
-rw-r--r--src/include/catalog/pg_replication_origin.h70
-rw-r--r--src/include/catalog/pg_seclabel.h4
-rw-r--r--src/include/catalog/pg_shdescription.h2
-rw-r--r--src/include/catalog/pg_shseclabel.h4
-rw-r--r--src/include/catalog/pg_tablesample_method.h81
-rw-r--r--src/include/catalog/pg_transform.h47
-rw-r--r--src/include/catalog/pg_trigger.h2
-rw-r--r--src/include/catalog/pg_type.h18
-rw-r--r--src/include/commands/defrem.h11
-rw-r--r--src/include/commands/event_trigger.h26
-rw-r--r--src/include/commands/explain.h2
-rw-r--r--src/include/commands/extension.h2
-rw-r--r--src/include/commands/vacuum.h22
-rw-r--r--src/include/common/fe_memutils.h4
-rw-r--r--src/include/common/pg_lzcompress.h2
-rw-r--r--src/include/common/restricted_token.h8
-rw-r--r--src/include/common/string.h4
-rw-r--r--src/include/executor/execdesc.h1
-rw-r--r--src/include/executor/executor.h34
-rw-r--r--src/include/executor/hashjoin.h18
-rw-r--r--src/include/executor/nodeSamplescan.h24
-rw-r--r--src/include/executor/spi.h1
-rw-r--r--src/include/executor/tuptable.h1
-rw-r--r--src/include/fmgr.h5
-rw-r--r--src/include/foreign/fdwapi.h24
-rw-r--r--src/include/foreign/foreign.h2
-rw-r--r--src/include/funcapi.h1
-rw-r--r--src/include/lib/bipartite_match.h44
-rw-r--r--src/include/lib/hyperloglog.h2
-rw-r--r--src/include/lib/pairingheap.h14
-rw-r--r--src/include/libpq/libpq-be.h4
-rw-r--r--src/include/libpq/libpq.h22
-rw-r--r--src/include/libpq/pqmq.h3
-rw-r--r--src/include/mb/pg_wchar.h43
-rw-r--r--src/include/miscadmin.h5
-rw-r--r--src/include/nodes/execnodes.h97
-rw-r--r--src/include/nodes/makefuncs.h2
-rw-r--r--src/include/nodes/nodes.h25
-rw-r--r--src/include/nodes/parsenodes.h233
-rw-r--r--src/include/nodes/pg_list.h3
-rw-r--r--src/include/nodes/plannodes.h89
-rw-r--r--src/include/nodes/primnodes.h91
-rw-r--r--src/include/nodes/relation.h54
-rw-r--r--src/include/optimizer/clauses.h2
-rw-r--r--src/include/optimizer/cost.h1
-rw-r--r--src/include/optimizer/pathnode.h2
-rw-r--r--src/include/optimizer/paths.h9
-rw-r--r--src/include/optimizer/plancat.h2
-rw-r--r--src/include/optimizer/planmain.h6
-rw-r--r--src/include/optimizer/prep.h3
-rw-r--r--src/include/optimizer/tlist.h3
-rw-r--r--src/include/parser/kwlist.h8
-rw-r--r--src/include/parser/parse_agg.h5
-rw-r--r--src/include/parser/parse_clause.h5
-rw-r--r--src/include/parser/parse_func.h5
-rw-r--r--src/include/parser/parse_relation.h12
-rw-r--r--src/include/pgstat.h32
-rw-r--r--src/include/pgxc/barrier.h6
-rw-r--r--src/include/port.h4
-rw-r--r--src/include/port/atomics.h39
-rw-r--r--src/include/port/atomics/generic-gcc.h2
-rw-r--r--src/include/port/pg_crc32c.h2
-rw-r--r--src/include/postgres.h30
-rw-r--r--src/include/postmaster/bgworker.h2
-rw-r--r--src/include/postmaster/pgarch.h2
-rw-r--r--src/include/regex/regguts.h5
-rw-r--r--src/include/replication/basebackup.h10
-rw-r--r--src/include/replication/logical.h2
-rw-r--r--src/include/replication/origin.h86
-rw-r--r--src/include/replication/output_plugin.h8
-rw-r--r--src/include/replication/reorderbuffer.h19
-rw-r--r--src/include/replication/slot.h4
-rw-r--r--src/include/replication/walsender.h2
-rw-r--r--src/include/rewrite/prs2lock.h2
-rw-r--r--src/include/rewrite/rowsecurity.h38
-rw-r--r--src/include/storage/fd.h1
-rw-r--r--src/include/storage/itemptr.h7
-rw-r--r--src/include/storage/lmgr.h5
-rw-r--r--src/include/storage/lock.h10
-rw-r--r--src/include/storage/lwlock.h5
-rw-r--r--src/include/storage/procarray.h1
-rw-r--r--src/include/storage/procsignal.h1
-rw-r--r--src/include/storage/s_lock.h4
-rw-r--r--src/include/storage/shm_mq.h6
-rw-r--r--src/include/tcop/deparse_utility.h105
-rw-r--r--src/include/tcop/fastpath.h2
-rw-r--r--src/include/utils/acl.h2
-rw-r--r--src/include/utils/aclchk_internal.h45
-rw-r--r--src/include/utils/array.h143
-rw-r--r--src/include/utils/arrayaccess.h133
-rw-r--r--src/include/utils/builtins.h24
-rw-r--r--src/include/utils/combocid.h3
-rw-r--r--src/include/utils/datum.h8
-rw-r--r--src/include/utils/expandeddatum.h151
-rw-r--r--src/include/utils/geo_decls.h5
-rw-r--r--src/include/utils/guc.h4
-rw-r--r--src/include/utils/guc_tables.h1
-rw-r--r--src/include/utils/jsonapi.h2
-rw-r--r--src/include/utils/jsonb.h23
-rw-r--r--src/include/utils/lsyscache.h4
-rw-r--r--src/include/utils/palloc.h2
-rw-r--r--src/include/utils/pg_crc.h2
-rw-r--r--src/include/utils/rangetypes.h21
-rw-r--r--src/include/utils/rel.h3
-rw-r--r--src/include/utils/rls.h18
-rw-r--r--src/include/utils/ruleutils.h2
-rw-r--r--src/include/utils/sampling.h65
-rw-r--r--src/include/utils/selfuncs.h2
-rw-r--r--src/include/utils/snapmgr.h5
-rw-r--r--src/include/utils/snapshot.h24
-rw-r--r--src/include/utils/sortsupport.h83
-rw-r--r--src/include/utils/syscache.h6
-rw-r--r--src/include/utils/tuplesort.h8
-rw-r--r--src/interfaces/ecpg/Makefile2
-rw-r--r--src/interfaces/ecpg/compatlib/Makefile2
-rw-r--r--src/interfaces/ecpg/ecpglib/data.c3
-rw-r--r--src/interfaces/ecpg/ecpglib/execute.c14
-rw-r--r--src/interfaces/ecpg/ecpglib/memory.c2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/Makefile2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/datetime.c2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/interval.c2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/numeric.c2
-rw-r--r--src/interfaces/ecpg/preproc/ecpg.tokens2
-rw-r--r--src/interfaces/ecpg/preproc/ecpg.trailer5
-rw-r--r--src/interfaces/ecpg/preproc/ecpg_keywords.c2
-rw-r--r--src/interfaces/ecpg/preproc/parse.pl21
-rw-r--r--src/interfaces/ecpg/test/Makefile10
-rw-r--r--src/interfaces/libpq/fe-auth.c4
-rw-r--r--src/interfaces/libpq/fe-connect.c11
-rw-r--r--src/interfaces/libpq/fe-misc.c14
-rw-r--r--src/interfaces/libpq/fe-secure-openssl.c88
-rw-r--r--src/interfaces/libpq/fe-secure.c18
-rw-r--r--src/interfaces/libpq/nls.mk2
-rw-r--r--src/makefiles/pgxs.mk9
-rw-r--r--src/pl/plperl/GNUmakefile31
-rw-r--r--src/pl/plperl/plperl.c47
-rw-r--r--src/pl/plperl/plperl_helpers.h2
-rw-r--r--src/pl/plpgsql/src/pl_comp.c16
-rw-r--r--src/pl/plpgsql/src/pl_exec.c301
-rw-r--r--src/pl/plpgsql/src/pl_gram.y3
-rw-r--r--src/pl/plpgsql/src/plpgsql.h2
-rw-r--r--src/pl/plpython/Makefile88
-rw-r--r--src/pl/plpython/plpy_main.c1
-rw-r--r--src/pl/plpython/plpy_procedure.c23
-rw-r--r--src/pl/plpython/plpy_procedure.h2
-rw-r--r--src/pl/plpython/plpy_spi.c3
-rw-r--r--src/pl/plpython/plpy_typeio.c231
-rw-r--r--src/pl/plpython/plpy_typeio.h9
-rw-r--r--src/pl/plpython/plpy_util.c21
-rw-r--r--src/pl/plpython/plpy_util.h1
-rw-r--r--src/pl/plpython/plpython.h1
-rw-r--r--src/pl/plpython/regress-python3-mangle.mk35
-rw-r--r--src/pl/tcl/Makefile30
-rw-r--r--src/port/erand48.c3
-rw-r--r--src/port/gettimeofday.c22
-rw-r--r--src/port/pg_crc32c_choose.c4
-rw-r--r--src/port/pg_crc32c_sse42.c3
-rw-r--r--src/port/pgmkdirp.c2
-rw-r--r--src/port/snprintf.c94
-rw-r--r--src/port/win32setlocale.c8
-rw-r--r--src/test/Makefile7
-rw-r--r--src/test/examples/.gitignore6
-rw-r--r--src/test/examples/Makefile4
-rw-r--r--src/test/isolation/Makefile15
-rw-r--r--src/test/isolation/expected/insert-conflict-do-nothing.out23
-rw-r--r--src/test/isolation/expected/insert-conflict-do-update-2.out23
-rw-r--r--src/test/isolation/expected/insert-conflict-do-update-3.out26
-rw-r--r--src/test/isolation/expected/insert-conflict-do-update.out23
-rw-r--r--src/test/isolation/isolation_schedule4
-rw-r--r--src/test/isolation/specparse.y16
-rw-r--r--src/test/isolation/specs/insert-conflict-do-nothing.spec41
-rw-r--r--src/test/isolation/specs/insert-conflict-do-update-2.spec41
-rw-r--r--src/test/isolation/specs/insert-conflict-do-update-3.spec69
-rw-r--r--src/test/isolation/specs/insert-conflict-do-update.spec40
-rw-r--r--src/test/isolation/specscanner.l2
-rw-r--r--src/test/locale/.gitignore1
-rw-r--r--src/test/locale/Makefile4
-rw-r--r--src/test/mb/expected/big5.out1
-rw-r--r--src/test/mb/expected/euc_jp.out1
-rw-r--r--src/test/mb/expected/euc_kr.out1
-rw-r--r--src/test/mb/expected/euc_tw.out1
-rw-r--r--src/test/mb/expected/gb18030.out87
-rw-r--r--src/test/mb/expected/mule_internal.out2
-rw-r--r--src/test/mb/expected/sjis.out1
-rw-r--r--src/test/mb/expected/utf8.out1
-rwxr-xr-xsrc/test/mb/mbregress.sh11
-rw-r--r--src/test/mb/sql/gb18030.sql19
-rw-r--r--src/test/modules/Makefile6
-rw-r--r--src/test/modules/test_ddl_deparse/.gitignore1
-rw-r--r--src/test/modules/test_ddl_deparse/Makefile41
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_extension.out0
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_function.out15
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_sequence.out15
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_table.out18
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_type_enum.out7
-rw-r--r--src/test/modules/test_ddl_deparse/expected/comment_on.out25
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_conversion.out6
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_domain.out11
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_extension.out5
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_function.out0
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_operator.out0
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_rule.out30
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_schema.out19
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_sequence_1.out11
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_table.out160
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_trigger.out18
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_type.out24
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_view.out19
-rw-r--r--src/test/modules/test_ddl_deparse/expected/defprivs.out6
-rw-r--r--src/test/modules/test_ddl_deparse/expected/matviews.out8
-rw-r--r--src/test/modules/test_ddl_deparse/expected/opfamily.out67
-rw-r--r--src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out40
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_function.sql17
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_sequence.sql15
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_table.sql13
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_type_enum.sql6
-rw-r--r--src/test/modules/test_ddl_deparse/sql/comment_on.sql17
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_conversion.sql6
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_domain.sql10
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_extension.sql5
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_rule.sql31
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_schema.sql17
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_sequence_1.sql11
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_table.sql142
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_trigger.sql18
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_type.sql21
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_view.sql17
-rw-r--r--src/test/modules/test_ddl_deparse/sql/defprivs.sql6
-rw-r--r--src/test/modules/test_ddl_deparse/sql/matviews.sql8
-rw-r--r--src/test/modules/test_ddl_deparse/sql/opfamily.sql52
-rw-r--r--src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql42
-rw-r--r--src/test/modules/test_ddl_deparse/test_ddl_deparse--1.0.sql16
-rw-r--r--src/test/modules/test_ddl_deparse/test_ddl_deparse.c267
-rw-r--r--src/test/modules/test_ddl_deparse/test_ddl_deparse.control4
-rw-r--r--src/test/modules/test_rls_hooks/.gitignore4
-rw-r--r--src/test/modules/test_rls_hooks/Makefile22
-rw-r--r--src/test/modules/test_rls_hooks/README16
-rw-r--r--src/test/modules/test_rls_hooks/expected/test_rls_hooks.out198
-rw-r--r--src/test/modules/test_rls_hooks/rls_hooks.conf1
-rw-r--r--src/test/modules/test_rls_hooks/sql/test_rls_hooks.sql168
-rw-r--r--src/test/modules/test_rls_hooks/test_rls_hooks.c172
-rw-r--r--src/test/modules/test_rls_hooks/test_rls_hooks.control4
-rw-r--r--src/test/modules/test_rls_hooks/test_rls_hooks.h25
-rw-r--r--src/test/perl/TestLib.pm13
-rw-r--r--src/test/regress/GNUmakefile4
-rw-r--r--src/test/regress/expected/brin.out216
-rw-r--r--src/test/regress/expected/conversion.out16
-rw-r--r--src/test/regress/expected/create_index.out86
-rw-r--r--src/test/regress/expected/create_type.out6
-rw-r--r--src/test/regress/expected/errors.out4
-rw-r--r--src/test/regress/expected/event_trigger.out2
-rw-r--r--src/test/regress/expected/foreign_key.out45
-rw-r--r--src/test/regress/expected/geometry.out34
-rw-r--r--src/test/regress/expected/geometry_1.out34
-rw-r--r--src/test/regress/expected/geometry_2.out34
-rw-r--r--src/test/regress/expected/gist.out28
-rw-r--r--src/test/regress/expected/groupingsets.out590
-rw-r--r--src/test/regress/expected/inet.out28
-rw-r--r--src/test/regress/expected/insert_conflict.out492
-rw-r--r--src/test/regress/expected/join.out94
-rw-r--r--src/test/regress/expected/jsonb.out549
-rw-r--r--src/test/regress/expected/jsonb_1.out549
-rw-r--r--src/test/regress/expected/opr_sanity.out110
-rw-r--r--src/test/regress/expected/polymorphism.out12
-rw-r--r--src/test/regress/expected/privileges.out29
-rw-r--r--src/test/regress/expected/rangetypes.out20
-rw-r--r--src/test/regress/expected/regproc.out48
-rw-r--r--src/test/regress/expected/returning.out24
-rw-r--r--src/test/regress/expected/rolenames.out4
-rw-r--r--src/test/regress/expected/rowsecurity.out582
-rw-r--r--src/test/regress/expected/rules.out279
-rw-r--r--src/test/regress/expected/sanity_check.out3
-rw-r--r--src/test/regress/expected/select_views.out46
-rw-r--r--src/test/regress/expected/select_views_1.out46
-rw-r--r--src/test/regress/expected/subselect.out22
-rw-r--r--src/test/regress/expected/tablesample.out231
-rw-r--r--src/test/regress/expected/triggers.out106
-rw-r--r--src/test/regress/expected/updatable_views.out63
-rw-r--r--src/test/regress/expected/update.out34
-rw-r--r--src/test/regress/expected/with.out109
-rw-r--r--src/test/regress/input/constraints.source22
-rw-r--r--src/test/regress/output/constraints.source29
-rw-r--r--src/test/regress/parallel_schedule3
-rw-r--r--src/test/regress/pg_regress.c269
-rw-r--r--src/test/regress/pg_regress.h6
-rw-r--r--src/test/regress/pg_regress_main.c4
-rw-r--r--src/test/regress/regress.c22
-rw-r--r--src/test/regress/serial_schedule7
-rw-r--r--src/test/regress/sql/brin.sql215
-rw-r--r--src/test/regress/sql/conversion.sql4
-rw-r--r--src/test/regress/sql/create_index.sql20
-rw-r--r--src/test/regress/sql/create_type.sql6
-rw-r--r--src/test/regress/sql/event_trigger.sql2
-rw-r--r--src/test/regress/sql/foreign_key.sql47
-rw-r--r--src/test/regress/sql/geometry.sql6
-rw-r--r--src/test/regress/sql/gist.sql8
-rw-r--r--src/test/regress/sql/groupingsets.sql165
-rw-r--r--src/test/regress/sql/inet.sql7
-rw-r--r--src/test/regress/sql/insert_conflict.sql286
-rw-r--r--src/test/regress/sql/join.sql17
-rw-r--r--src/test/regress/sql/jsonb.sql121
-rw-r--r--src/test/regress/sql/opr_sanity.sql74
-rw-r--r--src/test/regress/sql/privileges.sql19
-rw-r--r--src/test/regress/sql/rangetypes.sql6
-rw-r--r--src/test/regress/sql/regproc.sql11
-rw-r--r--src/test/regress/sql/returning.sql6
-rw-r--r--src/test/regress/sql/rowsecurity.sql215
-rw-r--r--src/test/regress/sql/rules.sql119
-rw-r--r--src/test/regress/sql/select_views.sql14
-rw-r--r--src/test/regress/sql/subselect.sql14
-rw-r--r--src/test/regress/sql/tablesample.sql74
-rw-r--r--src/test/regress/sql/triggers.sql69
-rw-r--r--src/test/regress/sql/updatable_views.sql11
-rw-r--r--src/test/regress/sql/update.sql21
-rw-r--r--src/test/regress/sql/with.sql57
-rw-r--r--src/test/ssl/ServerSetup.pm105
-rw-r--r--src/test/ssl/t/001_ssltests.pl110
-rw-r--r--src/test/thread/.gitignore1
-rw-r--r--src/timezone/data/africa67
-rw-r--r--src/timezone/data/antarctica51
-rw-r--r--src/timezone/data/asia35
-rw-r--r--src/timezone/data/australasia27
-rw-r--r--src/timezone/data/backward1
-rw-r--r--src/timezone/data/backzone76
-rw-r--r--src/timezone/data/europe4
-rw-r--r--src/timezone/data/northamerica198
-rw-r--r--src/timezone/data/southamerica170
-rw-r--r--src/timezone/known_abbrevs.txt8
-rw-r--r--src/timezone/tznames/America.txt6
-rw-r--r--src/timezone/tznames/Asia.txt9
-rw-r--r--src/timezone/tznames/Default5
-rw-r--r--src/timezone/tznames/Pacific.txt2
-rw-r--r--src/tools/msvc/Install.pm33
-rw-r--r--src/tools/msvc/MSBuildProject.pm4
-rw-r--r--src/tools/msvc/Mkvcbuild.pm682
-rw-r--r--src/tools/msvc/Project.pm41
-rw-r--r--src/tools/msvc/Solution.pm149
-rw-r--r--src/tools/msvc/VCBuildProject.pm35
-rw-r--r--src/tools/msvc/VSObjectFactory.pm5
-rw-r--r--src/tools/msvc/config_default.pl34
-rw-r--r--src/tools/msvc/vcregress.pl131
-rw-r--r--src/tools/pgindent/README9
-rw-r--r--src/tools/pgindent/exclude_file_patterns1
-rwxr-xr-xsrc/tools/pgindent/pgindent2
-rw-r--r--src/tools/pgindent/typedefs.list231
-rw-r--r--src/tutorial/complex.c17
-rw-r--r--src/tutorial/funcs_new.c9
1143 files changed, 86723 insertions, 142377 deletions
diff --git a/.gitignore b/.gitignore
index d42475b167..13c4bde5e6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,4 +40,4 @@ lib*.pc
/StormDB*
/cscope*
/.gitignore
-
+/tmp_install/
diff --git a/GNUmakefile.in b/GNUmakefile.in
index 332ab7c36b..8bbd1cb83d 100644
--- a/GNUmakefile.in
+++ b/GNUmakefile.in
@@ -47,6 +47,7 @@ $(call recurse,distprep,doc-xc src config contrib)
# it's not built by default
$(call recurse,clean,doc-xc contrib src config)
clean:
+ rm -rf tmp_install/
# Garbage from autoconf:
@rm -rf autom4te.cache/
@@ -57,11 +58,10 @@ distclean maintainer-clean:
$(MAKE) -C contrib $@
$(MAKE) -C config $@
$(MAKE) -C src $@
- rm -f config.cache config.log config.status GNUmakefile
+ rm -rf tmp_install/
# Garbage from autoconf:
@rm -rf autom4te.cache/
-
-check check-tests: all
+ rm -f config.cache config.log config.status GNUmakefile
check check-tests installcheck installcheck-parallel installcheck-tests:
$(MAKE) -C src/test/regress $@
diff --git a/config/python.m4 b/config/python.m4
index 7012c536d7..b95c8ed3b3 100644
--- a/config/python.m4
+++ b/config/python.m4
@@ -44,6 +44,9 @@ if a == b:
print(a)
else:
print(a + ' ' + b)"`
+if test "$PORTNAME" = win32 ; then
+ python_includespec=`echo $python_includespec | sed 's,[[\]],/,g'`
+fi
AC_MSG_RESULT([$python_includespec])
AC_SUBST(python_majorversion)[]dnl
@@ -93,20 +96,5 @@ AC_MSG_RESULT([${python_libspec} ${python_additional_libs}])
AC_SUBST(python_libdir)[]dnl
AC_SUBST(python_libspec)[]dnl
AC_SUBST(python_additional_libs)[]dnl
-AC_SUBST(python_enable_shared)[]dnl
-
-# threaded python is not supported on OpenBSD
-AC_MSG_CHECKING(whether Python is compiled with thread support)
-pythreads=`${PYTHON} -c "import sys; print(int('thread' in sys.builtin_module_names))"`
-if test "$pythreads" = "1"; then
- AC_MSG_RESULT(yes)
- case $host_os in
- openbsd*)
- AC_MSG_ERROR([threaded Python not supported on this platform])
- ;;
- esac
-else
- AC_MSG_RESULT(no)
-fi
])# PGAC_CHECK_PYTHON_EMBED_SETUP
diff --git a/configure b/configure
index 7b9a6ed87b..01a31aaf76 100755
--- a/configure
+++ b/configure
@@ -642,7 +642,6 @@ TCL_SHLIB_LD_LIBS
TCL_SHARED_BUILD
TCL_LIB_SPEC
TCL_LIBS
-TCL_LIB_FILE
TCL_INCLUDE_SPEC
TCL_CONFIG_SH
TCLSH
@@ -663,7 +662,6 @@ HAVE_IPV6
LIBOBJS
UUID_LIBS
ZIC
-python_enable_shared
python_additional_libs
python_libspec
python_libdir
@@ -7396,6 +7394,12 @@ perl_useshrplib=`$PERL -MConfig -e 'print $Config{useshrplib}'`
test "$PORTNAME" = "win32" && perl_useshrplib=`echo $perl_useshrplib | sed 's,\\\\,/,g'`
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $perl_useshrplib" >&5
$as_echo "$perl_useshrplib" >&6; }
+ if test "$perl_useshrplib" != yes && test "$perl_useshrplib" != true; then
+ as_fn_error $? "cannot build PL/Perl because libperl is not a shared library
+You might have to rebuild your Perl installation. Refer to the
+documentation for details. Use --without-perl to disable building
+PL/Perl." "$LINENO" 5
+ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for flags to link embedded Perl" >&5
$as_echo_n "checking for flags to link embedded Perl... " >&6; }
@@ -7495,6 +7499,9 @@ if a == b:
print(a)
else:
print(a + ' ' + b)"`
+if test "$PORTNAME" = win32 ; then
+ python_includespec=`echo $python_includespec | sed 's,[\],/,g'`
+fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $python_includespec" >&5
$as_echo "$python_includespec" >&6; }
@@ -7531,24 +7538,41 @@ python_additional_libs=`${PYTHON} -c "import distutils.sysconfig; print(' '.join
$as_echo "${python_libspec} ${python_additional_libs}" >&6; }
-# threaded python is not supported on OpenBSD
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Python is compiled with thread support" >&5
-$as_echo_n "checking whether Python is compiled with thread support... " >&6; }
-pythreads=`${PYTHON} -c "import sys; print(int('thread' in sys.builtin_module_names))"`
-if test "$pythreads" = "1"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
- case $host_os in
- openbsd*)
- as_fn_error $? "threaded Python not supported on this platform" "$LINENO" 5
- ;;
- esac
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
+ # We need libpython as a shared library. With Python >=2.5, we
+ # check the Py_ENABLE_SHARED setting. On Debian, the setting is not
+ # correct before the jessie release (http://bugs.debian.org/695979).
+ # We also want to support older Python versions. So as a fallback
+ # we see if there is a file that is named like a shared library.
+
+ if test "$python_enable_shared" != 1; then
+ if test "$PORTNAME" = darwin; then
+ # OS X does supply a .dylib even though Py_ENABLE_SHARED does
+ # not get set. The file detection logic below doesn't succeed
+ # on older OS X versions, so make it explicit.
+ python_enable_shared=1
+ elif test "$PORTNAME" = win32; then
+ # Windows also needs an explicit override.
+ python_enable_shared=1
+ else
+ # We don't know the platform shared library extension here yet,
+ # so we try some candidates.
+ for dlsuffix in .so .sl; do
+ if ls "$python_libdir"/libpython*${dlsuffix}* >/dev/null 2>&1; then
+ python_enable_shared=1
+ break
+ fi
+ done
+ fi
+ fi
+
+ if test "$python_enable_shared" != 1; then
+ as_fn_error $? "cannot build PL/Python because libpython is not a shared library
+You might have to rebuild your Python installation. Refer to the
+documentation for details. Use --without-python to disable building
+PL/Python." "$LINENO" 5
+ fi
fi
if test "$cross_compiling" = yes && test -z "$with_system_tzdata"; then
@@ -14748,12 +14772,15 @@ fi
. "$TCL_CONFIG_SH"
eval TCL_INCLUDE_SPEC=\"$TCL_INCLUDE_SPEC\"
-eval TCL_LIB_FILE=\"$TCL_LIB_FILE\"
eval TCL_LIBS=\"$TCL_LIBS\"
eval TCL_LIB_SPEC=\"$TCL_LIB_SPEC\"
eval TCL_SHARED_BUILD=\"$TCL_SHARED_BUILD\"
- # now that we have TCL_INCLUDE_SPEC, we can check for <tcl.h>
+ if test "$TCL_SHARED_BUILD" != 1; then
+ as_fn_error $? "cannot build PL/Tcl because Tcl is not a shared library
+Use --without-tcl to disable building PL/Tcl." "$LINENO" 5
+ fi
+ # now that we have TCL_INCLUDE_SPEC, we can check for <tcl.h>
ac_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS"
ac_fn_c_check_header_mongrel "$LINENO" "tcl.h" "ac_cv_header_tcl_h" "$ac_includes_default"
diff --git a/configure.in b/configure.in
index 6de315e2b5..e0941e52bc 100644
--- a/configure.in
+++ b/configure.in
@@ -889,12 +889,52 @@ if test "$with_perl" = yes; then
AC_MSG_ERROR([Perl not found])
fi
PGAC_CHECK_PERL_CONFIGS([archlibexp,privlibexp,useshrplib])
+ if test "$perl_useshrplib" != yes && test "$perl_useshrplib" != true; then
+ AC_MSG_ERROR([cannot build PL/Perl because libperl is not a shared library
+You might have to rebuild your Perl installation. Refer to the
+documentation for details. Use --without-perl to disable building
+PL/Perl.])
+ fi
PGAC_CHECK_PERL_EMBED_LDFLAGS
fi
if test "$with_python" = yes; then
PGAC_PATH_PYTHON
PGAC_CHECK_PYTHON_EMBED_SETUP
+
+ # We need libpython as a shared library. With Python >=2.5, we
+ # check the Py_ENABLE_SHARED setting. On Debian, the setting is not
+ # correct before the jessie release (http://bugs.debian.org/695979).
+ # We also want to support older Python versions. So as a fallback
+ # we see if there is a file that is named like a shared library.
+
+ if test "$python_enable_shared" != 1; then
+ if test "$PORTNAME" = darwin; then
+ # OS X does supply a .dylib even though Py_ENABLE_SHARED does
+ # not get set. The file detection logic below doesn't succeed
+ # on older OS X versions, so make it explicit.
+ python_enable_shared=1
+ elif test "$PORTNAME" = win32; then
+ # Windows also needs an explicit override.
+ python_enable_shared=1
+ else
+ # We don't know the platform shared library extension here yet,
+ # so we try some candidates.
+ for dlsuffix in .so .sl; do
+ if ls "$python_libdir"/libpython*${dlsuffix}* >/dev/null 2>&1; then
+ python_enable_shared=1
+ break
+ fi
+ done
+ fi
+ fi
+
+ if test "$python_enable_shared" != 1; then
+ AC_MSG_ERROR([cannot build PL/Python because libpython is not a shared library
+You might have to rebuild your Python installation. Refer to the
+documentation for details. Use --without-python to disable building
+PL/Python.])
+ fi
fi
if test "$cross_compiling" = yes && test -z "$with_system_tzdata"; then
@@ -1942,8 +1982,12 @@ fi
if test "$with_tcl" = yes; then
PGAC_PATH_TCLCONFIGSH([$with_tclconfig])
PGAC_EVAL_TCLCONFIGSH([$TCL_CONFIG_SH],
- [TCL_INCLUDE_SPEC,TCL_LIB_FILE,TCL_LIBS,TCL_LIB_SPEC,TCL_SHARED_BUILD])
+ [TCL_INCLUDE_SPEC,TCL_LIBS,TCL_LIB_SPEC,TCL_SHARED_BUILD])
AC_SUBST(TCL_SHLIB_LD_LIBS)dnl don't want to double-evaluate that one
+ if test "$TCL_SHARED_BUILD" != 1; then
+ AC_MSG_ERROR([cannot build PL/Tcl because Tcl is not a shared library
+Use --without-tcl to disable building PL/Tcl.])
+ fi
# now that we have TCL_INCLUDE_SPEC, we can check for <tcl.h>
ac_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS"
diff --git a/contrib/Makefile b/contrib/Makefile
index b8230aadf7..0eedf440a6 100644
--- a/contrib/Makefile
+++ b/contrib/Makefile
@@ -33,21 +33,20 @@ SUBDIRS = \
pg_prewarm \
pg_standby \
pg_stat_statements \
- pg_test_fsync \
- pg_test_timing \
pg_trgm \
pgcrypto \
pgrowlocks \
pgstattuple \
pgxc_clean \
pgxc_ctl \
- pg_xlogdump \
postgres_fdw \
seg \
spi \
tablefunc \
tcn \
test_decoding \
+ tsm_system_rows \
+ tsm_system_time \
tsearch2 \
unaccent \
vacuumlo \
@@ -77,6 +76,18 @@ else
ALWAYS_SUBDIRS += sepgsql
endif
+ifeq ($(with_perl),yes)
+SUBDIRS += hstore_plperl
+else
+ALWAYS_SUBDIRS += hstore_plperl
+endif
+
+ifeq ($(with_python),yes)
+SUBDIRS += hstore_plpython ltree_plpython
+else
+ALWAYS_SUBDIRS += hstore_plpython ltree_plpython
+endif
+
# Missing:
# start-scripts \ (does not have a makefile)
diff --git a/contrib/btree_gin/btree_gin.c b/contrib/btree_gin/btree_gin.c
index 1a5bb3cdc6..f74e912ed7 100644
--- a/contrib/btree_gin/btree_gin.c
+++ b/contrib/btree_gin/btree_gin.c
@@ -5,7 +5,7 @@
#include <limits.h>
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "utils/builtins.h"
#include "utils/bytea.h"
#include "utils/cash.h"
@@ -113,12 +113,12 @@ gin_btree_compare_prefix(FunctionCallInfo fcinfo)
cmp;
cmp = DatumGetInt32(DirectFunctionCall2Coll(
- data->typecmp,
- PG_GET_COLLATION(),
- (data->strategy == BTLessStrategyNumber ||
- data->strategy == BTLessEqualStrategyNumber)
- ? data->datum : a,
- b));
+ data->typecmp,
+ PG_GET_COLLATION(),
+ (data->strategy == BTLessStrategyNumber ||
+ data->strategy == BTLessEqualStrategyNumber)
+ ? data->datum : a,
+ b));
switch (data->strategy)
{
@@ -186,14 +186,14 @@ Datum \
gin_extract_value_##type(PG_FUNCTION_ARGS) \
{ \
return gin_btree_extract_value(fcinfo, is_varlena); \
-} \
+} \
PG_FUNCTION_INFO_V1(gin_extract_query_##type); \
Datum \
gin_extract_query_##type(PG_FUNCTION_ARGS) \
{ \
return gin_btree_extract_query(fcinfo, \
is_varlena, leftmostvalue, typecmp); \
-} \
+} \
PG_FUNCTION_INFO_V1(gin_compare_prefix_##type); \
Datum \
gin_compare_prefix_##type(PG_FUNCTION_ARGS) \
@@ -209,6 +209,7 @@ leftmostvalue_int2(void)
{
return Int16GetDatum(SHRT_MIN);
}
+
GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp)
static Datum
@@ -216,6 +217,7 @@ leftmostvalue_int4(void)
{
return Int32GetDatum(INT_MIN);
}
+
GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp)
static Datum
@@ -226,6 +228,7 @@ leftmostvalue_int8(void)
*/
return Int64GetDatum(SEQ_MINVALUE);
}
+
GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp)
static Datum
@@ -233,6 +236,7 @@ leftmostvalue_float4(void)
{
return Float4GetDatum(-get_float4_infinity());
}
+
GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp)
static Datum
@@ -240,6 +244,7 @@ leftmostvalue_float8(void)
{
return Float8GetDatum(-get_float8_infinity());
}
+
GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp)
static Datum
@@ -250,6 +255,7 @@ leftmostvalue_money(void)
*/
return Int64GetDatum(SEQ_MINVALUE);
}
+
GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp)
static Datum
@@ -257,6 +263,7 @@ leftmostvalue_oid(void)
{
return ObjectIdGetDatum(0);
}
+
GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp)
static Datum
@@ -264,6 +271,7 @@ leftmostvalue_timestamp(void)
{
return TimestampGetDatum(DT_NOBEGIN);
}
+
GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp)
GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp)
@@ -273,6 +281,7 @@ leftmostvalue_time(void)
{
return TimeADTGetDatum(0);
}
+
GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp)
static Datum
@@ -285,6 +294,7 @@ leftmostvalue_timetz(void)
return TimeTzADTPGetDatum(v);
}
+
GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp)
static Datum
@@ -292,6 +302,7 @@ leftmostvalue_date(void)
{
return DateADTGetDatum(DATEVAL_NOBEGIN);
}
+
GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp)
static Datum
@@ -304,6 +315,7 @@ leftmostvalue_interval(void)
v->month = 0;
return IntervalPGetDatum(v);
}
+
GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp)
static Datum
@@ -313,6 +325,7 @@ leftmostvalue_macaddr(void)
return MacaddrPGetDatum(v);
}
+
GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp)
static Datum
@@ -320,6 +333,7 @@ leftmostvalue_inet(void)
{
return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0"));
}
+
GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp)
GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp)
@@ -329,6 +343,7 @@ leftmostvalue_text(void)
{
return PointerGetDatum(cstring_to_text_with_len("", 0));
}
+
GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp)
static Datum
@@ -336,6 +351,7 @@ leftmostvalue_char(void)
{
return CharGetDatum(SCHAR_MIN);
}
+
GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp)
GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp)
@@ -348,6 +364,7 @@ leftmostvalue_bit(void)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
+
GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp)
static Datum
@@ -358,6 +375,7 @@ leftmostvalue_varbit(void)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
+
GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp)
/*
@@ -402,4 +420,5 @@ leftmostvalue_numeric(void)
{
return PointerGetDatum(NULL);
}
+
GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c
index 5bfe659f91..99cb41f5f5 100644
--- a/contrib/btree_gist/btree_utils_num.c
+++ b/contrib/btree_gist/btree_utils_num.c
@@ -13,7 +13,7 @@
GISTENTRY *
gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo)
{
- GISTENTRY *retval;
+ GISTENTRY *retval;
if (entry->leafkey)
{
diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c
index 78e8662add..8105a3b035 100644
--- a/contrib/btree_gist/btree_utils_var.c
+++ b/contrib/btree_gist/btree_utils_var.c
@@ -71,7 +71,7 @@ gbt_var_key_readable(const GBT_VARKEY *k)
* Create a leaf-entry to store in the index, from a single Datum.
*/
static GBT_VARKEY *
-gbt_var_key_from_datum(const struct varlena *u)
+gbt_var_key_from_datum(const struct varlena * u)
{
int32 lowersize = VARSIZE(u);
GBT_VARKEY *r;
diff --git a/contrib/citext/Makefile b/contrib/citext/Makefile
index 267854b5de..61e04bce7a 100644
--- a/contrib/citext/Makefile
+++ b/contrib/citext/Makefile
@@ -3,7 +3,7 @@
MODULES = citext
EXTENSION = citext
-DATA = citext--1.0.sql citext--unpackaged--1.0.sql
+DATA = citext--1.1.sql citext--1.0--1.1.sql citext--unpackaged--1.0.sql
PGFILEDESC = "citext - case-insensitive character string data type"
REGRESS = citext
diff --git a/contrib/citext/citext--1.0--1.1.sql b/contrib/citext/citext--1.0--1.1.sql
new file mode 100644
index 0000000000..e06627e025
--- /dev/null
+++ b/contrib/citext/citext--1.0--1.1.sql
@@ -0,0 +1,21 @@
+/* contrib/citext/citext--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION citext UPDATE TO '1.1'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION citext DROP FUNCTION regexp_matches( citext, citext );
+ALTER EXTENSION citext DROP FUNCTION regexp_matches( citext, citext, text );
+
+/* Then we can drop them */
+DROP FUNCTION regexp_matches( citext, citext );
+DROP FUNCTION regexp_matches( citext, citext, text );
+
+/* Now redefine */
+CREATE FUNCTION regexp_matches( citext, citext ) RETURNS SETOF TEXT[] AS $$
+ SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, 'i' );
+$$ LANGUAGE SQL IMMUTABLE STRICT ROWS 1;
+
+CREATE FUNCTION regexp_matches( citext, citext, text ) RETURNS SETOF TEXT[] AS $$
+ SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, CASE WHEN pg_catalog.strpos($3, 'c') = 0 THEN $3 || 'i' ELSE $3 END );
+$$ LANGUAGE SQL IMMUTABLE STRICT ROWS 10;
diff --git a/contrib/citext/citext--1.0.sql b/contrib/citext/citext--1.1.sql
index 7db2e8d0ae..9ea7c64709 100644
--- a/contrib/citext/citext--1.0.sql
+++ b/contrib/citext/citext--1.1.sql
@@ -1,4 +1,4 @@
-/* contrib/citext/citext--1.0.sql */
+/* contrib/citext/citext--1.1.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION citext" to load this file. \quit
@@ -440,13 +440,13 @@ CREATE OPERATOR !~~* (
-- XXX TODO Ideally these would be implemented in C.
--
-CREATE FUNCTION regexp_matches( citext, citext ) RETURNS TEXT[] AS $$
+CREATE FUNCTION regexp_matches( citext, citext ) RETURNS SETOF TEXT[] AS $$
SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, 'i' );
-$$ LANGUAGE SQL IMMUTABLE STRICT;
+$$ LANGUAGE SQL IMMUTABLE STRICT ROWS 1;
-CREATE FUNCTION regexp_matches( citext, citext, text ) RETURNS TEXT[] AS $$
+CREATE FUNCTION regexp_matches( citext, citext, text ) RETURNS SETOF TEXT[] AS $$
SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, CASE WHEN pg_catalog.strpos($3, 'c') = 0 THEN $3 || 'i' ELSE $3 END );
-$$ LANGUAGE SQL IMMUTABLE STRICT;
+$$ LANGUAGE SQL IMMUTABLE STRICT ROWS 10;
CREATE FUNCTION regexp_replace( citext, citext, text ) returns TEXT AS $$
SELECT pg_catalog.regexp_replace( $1::pg_catalog.text, $2::pg_catalog.text, $3, 'i');
diff --git a/contrib/citext/citext.control b/contrib/citext/citext.control
index 3eb01a3360..ef90a97bc9 100644
--- a/contrib/citext/citext.control
+++ b/contrib/citext/citext.control
@@ -1,5 +1,5 @@
# citext extension
comment = 'data type for case-insensitive character strings'
-default_version = '1.0'
+default_version = '1.1'
module_pathname = '$libdir/citext'
relocatable = true
diff --git a/contrib/citext/expected/citext.out b/contrib/citext/expected/citext.out
index 411b689b4b..373fe6da54 100644
--- a/contrib/citext/expected/citext.out
+++ b/contrib/citext/expected/citext.out
@@ -1846,11 +1846,18 @@ SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, ''::cite
(1 row)
-- c forces case-sensitive
-SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, 'c'::citext) = ARRAY[ 'bar', 'beque' ] AS "null";
- null
-------
-
-(1 row)
+SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, 'c'::citext) = ARRAY[ 'bar', 'beque' ] AS "no rows";
+ no rows
+---------
+(0 rows)
+
+-- g allows multiple output rows
+SELECT regexp_matches('foobarbequebazmorebarbequetoo'::citext, '(BAR)(BEQUE)'::citext, 'g'::citext) AS "two rows";
+ two rows
+-------------
+ {bar,beque}
+ {bar,beque}
+(2 rows)
SELECT regexp_replace('Thomas'::citext, '.[mN]a.', 'M') = 'ThM' AS t;
t
diff --git a/contrib/citext/expected/citext_1.out b/contrib/citext/expected/citext_1.out
index da3862f49b..fcadd8d392 100644
--- a/contrib/citext/expected/citext_1.out
+++ b/contrib/citext/expected/citext_1.out
@@ -1846,11 +1846,18 @@ SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, ''::cite
(1 row)
-- c forces case-sensitive
-SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, 'c'::citext) = ARRAY[ 'bar', 'beque' ] AS "null";
- null
-------
-
-(1 row)
+SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, 'c'::citext) = ARRAY[ 'bar', 'beque' ] AS "no rows";
+ no rows
+---------
+(0 rows)
+
+-- g allows multiple output rows
+SELECT regexp_matches('foobarbequebazmorebarbequetoo'::citext, '(BAR)(BEQUE)'::citext, 'g'::citext) AS "two rows";
+ two rows
+-------------
+ {bar,beque}
+ {bar,beque}
+(2 rows)
SELECT regexp_replace('Thomas'::citext, '.[mN]a.', 'M') = 'ThM' AS t;
t
diff --git a/contrib/citext/sql/citext.sql b/contrib/citext/sql/citext.sql
index 27678fab5d..950895baea 100644
--- a/contrib/citext/sql/citext.sql
+++ b/contrib/citext/sql/citext.sql
@@ -601,7 +601,9 @@ SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)', '') = ARRAY[ 'ba
SELECT regexp_matches('foobarbequebaz', '(BAR)(BEQUE)'::citext, '') = ARRAY[ 'bar', 'beque' ] AS t;
SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, ''::citext) = ARRAY[ 'bar', 'beque' ] AS t;
-- c forces case-sensitive
-SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, 'c'::citext) = ARRAY[ 'bar', 'beque' ] AS "null";
+SELECT regexp_matches('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, 'c'::citext) = ARRAY[ 'bar', 'beque' ] AS "no rows";
+-- g allows multiple output rows
+SELECT regexp_matches('foobarbequebazmorebarbequetoo'::citext, '(BAR)(BEQUE)'::citext, 'g'::citext) AS "two rows";
SELECT regexp_replace('Thomas'::citext, '.[mN]a.', 'M') = 'ThM' AS t;
SELECT regexp_replace('Thomas'::citext, '.[MN]A.', 'M') = 'ThM' AS t;
diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c
index b9ccad994a..113c66383a 100644
--- a/contrib/cube/cube.c
+++ b/contrib/cube/cube.c
@@ -12,7 +12,7 @@
#include <math.h>
#include "access/gist.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "utils/array.h"
#include "utils/builtins.h"
diff --git a/contrib/earthdistance/Makefile b/contrib/earthdistance/Makefile
index 93dcbe3a31..cde1ae630f 100644
--- a/contrib/earthdistance/Makefile
+++ b/contrib/earthdistance/Makefile
@@ -7,7 +7,7 @@ DATA = earthdistance--1.0.sql earthdistance--unpackaged--1.0.sql
PGFILEDESC = "earthdistance - calculate distances on the surface of the Earth"
REGRESS = earthdistance
-REGRESS_OPTS = --extra-install=contrib/cube
+EXTRA_INSTALL = contrib/cube
LDFLAGS_SL += $(filter -lm, $(LIBS))
diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c
index 4368897581..499f24ff28 100644
--- a/contrib/file_fdw/file_fdw.c
+++ b/contrib/file_fdw/file_fdw.c
@@ -34,6 +34,7 @@
#include "optimizer/var.h"
#include "utils/memutils.h"
#include "utils/rel.h"
+#include "utils/sampling.h"
PG_MODULE_MAGIC;
@@ -561,7 +562,8 @@ fileGetForeignPlan(PlannerInfo *root,
scan_clauses,
scan_relid,
NIL, /* no expressions to evaluate */
- best_path->fdw_private);
+ best_path->fdw_private,
+ NIL /* no custom tlist */ );
}
/*
@@ -1005,7 +1007,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
{
int numrows = 0;
double rowstoskip = -1; /* -1 means not set yet */
- double rstate;
+ ReservoirStateData rstate;
TupleDesc tupDesc;
Datum *values;
bool *nulls;
@@ -1043,7 +1045,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
ALLOCSET_DEFAULT_MAXSIZE);
/* Prepare for sampling rows */
- rstate = anl_init_selection_state(targrows);
+ reservoir_init_selection_state(&rstate, targrows);
/* Set up callback to identify error line number. */
errcallback.callback = CopyFromErrorCallback;
@@ -1087,7 +1089,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
* not-yet-incremented value of totalrows as t.
*/
if (rowstoskip < 0)
- rowstoskip = anl_get_next_S(*totalrows, targrows, &rstate);
+ rowstoskip = reservoir_get_next_S(&rstate, *totalrows, targrows);
if (rowstoskip <= 0)
{
@@ -1095,7 +1097,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
* Found a suitable tuple, so save it, replacing one old tuple
* at random
*/
- int k = (int) (targrows * anl_random_fract());
+ int k = (int) (targrows * sampler_random_fract(rstate.randstate));
Assert(k >= 0 && k < targrows);
heap_freetuple(rows[k]);
diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c
index 7c8457e734..147c8501ee 100644
--- a/contrib/fuzzystrmatch/dmetaphone.c
+++ b/contrib/fuzzystrmatch/dmetaphone.c
@@ -195,7 +195,7 @@ dmetaphone_alt(PG_FUNCTION_ARGS)
* in a case like this.
*/
-#define META_FREE(x) ((void)true) /* pfree((x)) */
+#define META_FREE(x) ((void)true) /* pfree((x)) */
#else /* not defined DMETAPHONE_MAIN */
/* use the standard malloc library when not running in PostgreSQL */
diff --git a/contrib/hstore/hstore_gin.c b/contrib/hstore/hstore_gin.c
index 68f9061db1..919181d375 100644
--- a/contrib/hstore/hstore_gin.c
+++ b/contrib/hstore/hstore_gin.c
@@ -4,7 +4,7 @@
#include "postgres.h"
#include "access/gin.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "catalog/pg_type.h"
#include "hstore.h"
diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c
index 06f3c9359b..0fb769de7d 100644
--- a/contrib/hstore/hstore_gist.c
+++ b/contrib/hstore/hstore_gist.c
@@ -4,7 +4,7 @@
#include "postgres.h"
#include "access/gist.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "catalog/pg_type.h"
#include "utils/pg_crc.h"
@@ -72,7 +72,7 @@ typedef struct
static pg_crc32
crc32_sz(char *buf, int size)
{
- pg_crc32 crc;
+ pg_crc32 crc;
INIT_TRADITIONAL_CRC32(crc);
COMP_TRADITIONAL_CRC32(crc, buf, size);
diff --git a/contrib/hstore_plperl/.gitignore b/contrib/hstore_plperl/.gitignore
new file mode 100644
index 0000000000..5dcb3ff972
--- /dev/null
+++ b/contrib/hstore_plperl/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/contrib/hstore_plperl/Makefile b/contrib/hstore_plperl/Makefile
new file mode 100644
index 0000000000..19a8ab4493
--- /dev/null
+++ b/contrib/hstore_plperl/Makefile
@@ -0,0 +1,37 @@
+# contrib/hstore_plperl/Makefile
+
+MODULE_big = hstore_plperl
+OBJS = hstore_plperl.o $(WIN32RES)
+PGFILEDESC = "hstore_plperl - hstore transform for plperl"
+
+PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plperl -I$(top_srcdir)/contrib/hstore
+
+EXTENSION = hstore_plperl hstore_plperlu
+DATA = hstore_plperl--1.0.sql hstore_plperlu--1.0.sql
+
+REGRESS = hstore_plperl hstore_plperlu create_transform
+EXTRA_INSTALL = contrib/hstore
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/hstore_plperl
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
+
+ifeq ($(PORTNAME), win32)
+# these settings are the same as for plperl
+override CPPFLAGS += -DPLPERL_HAVE_UID_GID -Wno-comment
+# This means we need an in-tree build on Windows, not a pgxs build
+SHLIB_LINK += ../hstore/libhstore.a $(wildcard ../../src/pl/plperl/libperl*.a)
+endif
+
+# As with plperl we need to make sure that the CORE directory is included
+# last, probably because it sometimes contains some header files with names
+# that clash with some of ours, or with some that we include, notably on
+# Windows.
+override CPPFLAGS := $(CPPFLAGS) -I$(perl_archlibexp)/CORE
diff --git a/contrib/hstore_plperl/expected/create_transform.out b/contrib/hstore_plperl/expected/create_transform.out
new file mode 100644
index 0000000000..c588d33ab8
--- /dev/null
+++ b/contrib/hstore_plperl/expected/create_transform.out
@@ -0,0 +1,75 @@
+-- general regression test for transforms
+DROP EXTENSION IF EXISTS hstore CASCADE;
+NOTICE: extension "hstore" does not exist, skipping
+DROP EXTENSION IF EXISTS plperl CASCADE;
+NOTICE: extension "plperl" does not exist, skipping
+DROP EXTENSION IF EXISTS hstore_plperl CASCADE;
+NOTICE: extension "hstore_plperl" does not exist, skipping
+CREATE EXTENSION hstore;
+CREATE EXTENSION plperl;
+CREATE FUNCTION hstore_to_plperl(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS '$libdir/hstore_plperl';
+CREATE FUNCTION plperl_to_hstore(val internal) RETURNS hstore
+LANGUAGE C STRICT IMMUTABLE
+AS '$libdir/hstore_plperl';
+CREATE TRANSFORM FOR foo LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+ERROR: type "foo" does not exist
+CREATE TRANSFORM FOR hstore LANGUAGE foo (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+ERROR: language "foo" does not exist
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_out(hstore), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+ERROR: return data type of FROM SQL function must be "internal"
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION internal_in(cstring), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+ERROR: first argument of transform function must be type "internal"
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- ok
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+ERROR: transform for type hstore language "plperl" already exists
+CREATE OR REPLACE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- ok
+CREATE OR REPLACE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal)); -- ok
+CREATE OR REPLACE TRANSFORM FOR hstore LANGUAGE plperl (TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- ok
+COMMENT ON TRANSFORM FOR hstore LANGUAGE plperl IS 'test';
+DROP TRANSFORM IF EXISTS FOR fake_type LANGUAGE plperl;
+NOTICE: type "fake_type" does not exist, skipping
+DROP TRANSFORM IF EXISTS FOR hstore LANGUAGE fake_lang;
+NOTICE: transform for type hstore language "fake_lang" does not exist, skipping
+DROP TRANSFORM FOR foo LANGUAGE plperl;
+ERROR: type "foo" does not exist
+DROP TRANSFORM FOR hstore LANGUAGE foo;
+ERROR: language "foo" does not exist
+DROP TRANSFORM FOR hstore LANGUAGE plperl;
+DROP TRANSFORM IF EXISTS FOR hstore LANGUAGE plperl;
+NOTICE: transform for type hstore language "plperl" does not exist, skipping
+DROP FUNCTION hstore_to_plperl(val internal);
+DROP FUNCTION plperl_to_hstore(val internal);
+CREATE EXTENSION hstore_plperl;
+\dx+ hstore_plperl
+ Objects in extension "hstore_plperl"
+ Object Description
+--------------------------------------
+ function hstore_to_plperl(internal)
+ function plperl_to_hstore(internal)
+ transform for hstore language plperl
+(3 rows)
+
+ALTER EXTENSION hstore_plperl DROP TRANSFORM FOR hstore LANGUAGE plperl;
+\dx+ hstore_plperl
+Objects in extension "hstore_plperl"
+ Object Description
+-------------------------------------
+ function hstore_to_plperl(internal)
+ function plperl_to_hstore(internal)
+(2 rows)
+
+ALTER EXTENSION hstore_plperl ADD TRANSFORM FOR hstore LANGUAGE plperl;
+\dx+ hstore_plperl
+ Objects in extension "hstore_plperl"
+ Object Description
+--------------------------------------
+ function hstore_to_plperl(internal)
+ function plperl_to_hstore(internal)
+ transform for hstore language plperl
+(3 rows)
+
+DROP EXTENSION hstore CASCADE;
+NOTICE: drop cascades to extension hstore_plperl
+DROP EXTENSION plperl CASCADE;
diff --git a/contrib/hstore_plperl/expected/hstore_plperl.out b/contrib/hstore_plperl/expected/hstore_plperl.out
new file mode 100644
index 0000000000..cf384eba64
--- /dev/null
+++ b/contrib/hstore_plperl/expected/hstore_plperl.out
@@ -0,0 +1,48 @@
+CREATE EXTENSION hstore;
+CREATE EXTENSION plperl;
+CREATE EXTENSION hstore_plperl;
+SELECT transforms.udt_schema, transforms.udt_name,
+ routine_schema, routine_name,
+ group_name, transform_type
+FROM information_schema.transforms JOIN information_schema.routines
+ USING (specific_catalog, specific_schema, specific_name)
+ORDER BY 1, 2, 5, 6;
+ udt_schema | udt_name | routine_schema | routine_name | group_name | transform_type
+------------+----------+----------------+------------------+------------+----------------
+ public | hstore | public | hstore_to_plperl | plperl | FROM SQL
+ public | hstore | public | plperl_to_hstore | plperl | TO SQL
+(2 rows)
+
+-- test perl -> hstore
+CREATE FUNCTION test2() RETURNS hstore
+LANGUAGE plperl
+TRANSFORM FOR TYPE hstore
+AS $$
+$val = {a => 1, b => 'boo', c => undef};
+return $val;
+$$;
+SELECT test2();
+ test2
+---------------------------------
+ "a"=>"1", "b"=>"boo", "c"=>NULL
+(1 row)
+
+-- test perl -> hstore[]
+CREATE FUNCTION test2arr() RETURNS hstore[]
+LANGUAGE plperl
+TRANSFORM FOR TYPE hstore
+AS $$
+$val = [{a => 1, b => 'boo', c => undef}, {d => 2}];
+return $val;
+$$;
+SELECT test2arr();
+ test2arr
+--------------------------------------------------------------
+ {"\"a\"=>\"1\", \"b\"=>\"boo\", \"c\"=>NULL","\"d\"=>\"2\""}
+(1 row)
+
+DROP FUNCTION test2();
+DROP FUNCTION test2arr();
+DROP EXTENSION hstore_plperl;
+DROP EXTENSION hstore;
+DROP EXTENSION plperl;
diff --git a/contrib/hstore_plperl/expected/hstore_plperlu.out b/contrib/hstore_plperl/expected/hstore_plperlu.out
new file mode 100644
index 0000000000..8c689ad3ad
--- /dev/null
+++ b/contrib/hstore_plperl/expected/hstore_plperlu.out
@@ -0,0 +1,180 @@
+CREATE EXTENSION hstore;
+CREATE EXTENSION plperlu;
+CREATE EXTENSION hstore_plperlu;
+SELECT transforms.udt_schema, transforms.udt_name,
+ routine_schema, routine_name,
+ group_name, transform_type
+FROM information_schema.transforms JOIN information_schema.routines
+ USING (specific_catalog, specific_schema, specific_name)
+ORDER BY 1, 2, 5, 6;
+ udt_schema | udt_name | routine_schema | routine_name | group_name | transform_type
+------------+----------+----------------+-------------------+------------+----------------
+ public | hstore | public | hstore_to_plperlu | plperlu | FROM SQL
+ public | hstore | public | plperlu_to_hstore | plperlu | TO SQL
+(2 rows)
+
+-- test hstore -> perl
+CREATE FUNCTION test1(val hstore) RETURNS int
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_[0]));
+return scalar(keys %{$_[0]});
+$$;
+SELECT test1('aa=>bb, cc=>NULL'::hstore);
+INFO: $VAR1 = {
+ 'aa' => 'bb',
+ 'cc' => undef
+ };
+
+CONTEXT: PL/Perl function "test1"
+ test1
+-------
+ 2
+(1 row)
+
+CREATE FUNCTION test1none(val hstore) RETURNS int
+LANGUAGE plperlu
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_[0]));
+return scalar(keys %{$_[0]});
+$$;
+SELECT test1none('aa=>bb, cc=>NULL'::hstore);
+INFO: $VAR1 = '"aa"=>"bb", "cc"=>NULL';
+
+CONTEXT: PL/Perl function "test1none"
+ test1none
+-----------
+ 0
+(1 row)
+
+CREATE FUNCTION test1list(val hstore) RETURNS int
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_[0]));
+return scalar(keys %{$_[0]});
+$$;
+SELECT test1list('aa=>bb, cc=>NULL'::hstore);
+INFO: $VAR1 = {
+ 'aa' => 'bb',
+ 'cc' => undef
+ };
+
+CONTEXT: PL/Perl function "test1list"
+ test1list
+-----------
+ 2
+(1 row)
+
+-- test hstore[] -> perl
+CREATE FUNCTION test1arr(val hstore[]) RETURNS int
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_[0]->[0], $_[0]->[1]));
+return scalar(keys %{$_[0]});
+$$;
+SELECT test1arr(array['aa=>bb, cc=>NULL'::hstore, 'dd=>ee']);
+INFO: $VAR1 = {
+ 'aa' => 'bb',
+ 'cc' => undef
+ };
+$VAR2 = {
+ 'dd' => 'ee'
+ };
+
+CONTEXT: PL/Perl function "test1arr"
+ test1arr
+----------
+ 2
+(1 row)
+
+-- test as part of prepare/execute
+CREATE FUNCTION test3() RETURNS void
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+
+$rv = spi_exec_query(q{SELECT 'aa=>bb, cc=>NULL'::hstore AS col1});
+elog(INFO, Dumper($rv->{rows}[0]->{col1}));
+
+$val = {a => 1, b => 'boo', c => undef};
+$plan = spi_prepare(q{SELECT $1::text AS col1}, "hstore");
+$rv = spi_exec_prepared($plan, {}, $val);
+elog(INFO, Dumper($rv->{rows}[0]->{col1}));
+$$;
+SELECT test3();
+INFO: $VAR1 = {
+ 'aa' => 'bb',
+ 'cc' => undef
+ };
+
+CONTEXT: PL/Perl function "test3"
+INFO: $VAR1 = '"a"=>"1", "b"=>"boo", "c"=>NULL';
+
+CONTEXT: PL/Perl function "test3"
+ test3
+-------
+
+(1 row)
+
+-- test trigger
+CREATE TABLE test1 (a int, b hstore);
+INSERT INTO test1 VALUES (1, 'aa=>bb, cc=>NULL');
+SELECT * FROM test1;
+ a | b
+---+------------------------
+ 1 | "aa"=>"bb", "cc"=>NULL
+(1 row)
+
+CREATE FUNCTION test4() RETURNS trigger
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_TD->{new}));
+if ($_TD->{new}{a} == 1) {
+ $_TD->{new}{b} = {a => 1, b => 'boo', c => undef};
+}
+
+return "MODIFY";
+$$;
+CREATE TRIGGER test4 BEFORE UPDATE ON test1 FOR EACH ROW EXECUTE PROCEDURE test4();
+UPDATE test1 SET a = a;
+INFO: $VAR1 = {
+ 'a' => '1',
+ 'b' => {
+ 'aa' => 'bb',
+ 'cc' => undef
+ }
+ };
+
+CONTEXT: PL/Perl function "test4"
+SELECT * FROM test1;
+ a | b
+---+---------------------------------
+ 1 | "a"=>"1", "b"=>"boo", "c"=>NULL
+(1 row)
+
+DROP TABLE test1;
+DROP FUNCTION test1(hstore);
+DROP FUNCTION test1none(hstore);
+DROP FUNCTION test1list(hstore);
+DROP FUNCTION test1arr(hstore[]);
+DROP FUNCTION test3();
+DROP FUNCTION test4();
+DROP EXTENSION hstore_plperlu;
+DROP EXTENSION hstore;
+DROP EXTENSION plperlu;
diff --git a/contrib/hstore_plperl/hstore_plperl--1.0.sql b/contrib/hstore_plperl/hstore_plperl--1.0.sql
new file mode 100644
index 0000000000..ea0ad7688d
--- /dev/null
+++ b/contrib/hstore_plperl/hstore_plperl--1.0.sql
@@ -0,0 +1,17 @@
+-- make sure the prerequisite libraries are loaded
+DO '' LANGUAGE plperl;
+SELECT NULL::hstore;
+
+
+CREATE FUNCTION hstore_to_plperl(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME';
+
+CREATE FUNCTION plperl_to_hstore(val internal) RETURNS hstore
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME';
+
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (
+ FROM SQL WITH FUNCTION hstore_to_plperl(internal),
+ TO SQL WITH FUNCTION plperl_to_hstore(internal)
+);
diff --git a/contrib/hstore_plperl/hstore_plperl.c b/contrib/hstore_plperl/hstore_plperl.c
new file mode 100644
index 0000000000..fbbb4c8e76
--- /dev/null
+++ b/contrib/hstore_plperl/hstore_plperl.c
@@ -0,0 +1,88 @@
+#include "postgres.h"
+#undef _
+#include "fmgr.h"
+#include "plperl.h"
+#include "plperl_helpers.h"
+#include "hstore.h"
+
+PG_MODULE_MAGIC;
+
+
+PG_FUNCTION_INFO_V1(hstore_to_plperl);
+
+Datum
+hstore_to_plperl(PG_FUNCTION_ARGS)
+{
+ HStore *in = PG_GETARG_HS(0);
+ int i;
+ int count = HS_COUNT(in);
+ char *base = STRPTR(in);
+ HEntry *entries = ARRPTR(in);
+ HV *hv;
+
+ hv = newHV();
+
+ for (i = 0; i < count; i++)
+ {
+ const char *key;
+ SV *value;
+
+ key = pnstrdup(HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
+ value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base, i), HS_VALLEN(entries, i)));
+
+ (void) hv_store(hv, key, strlen(key), value, 0);
+ }
+
+ return PointerGetDatum(newRV((SV *) hv));
+}
+
+
+PG_FUNCTION_INFO_V1(plperl_to_hstore);
+
+Datum
+plperl_to_hstore(PG_FUNCTION_ARGS)
+{
+ HV *hv;
+ HE *he;
+ int32 buflen;
+ int32 i;
+ int32 pcount;
+ HStore *out;
+ Pairs *pairs;
+
+ hv = (HV *) SvRV((SV *) PG_GETARG_POINTER(0));
+
+ pcount = hv_iterinit(hv);
+
+ pairs = palloc(pcount * sizeof(Pairs));
+
+ i = 0;
+ while ((he = hv_iternext(hv)))
+ {
+ char *key = sv2cstr(HeSVKEY_force(he));
+ SV *value = HeVAL(he);
+
+ pairs[i].key = pstrdup(key);
+ pairs[i].keylen = hstoreCheckKeyLen(strlen(pairs[i].key));
+ pairs[i].needfree = true;
+
+ if (!SvOK(value))
+ {
+ pairs[i].val = NULL;
+ pairs[i].vallen = 0;
+ pairs[i].isnull = true;
+ }
+ else
+ {
+ pairs[i].val = pstrdup(sv2cstr(value));
+ pairs[i].vallen = hstoreCheckValLen(strlen(pairs[i].val));
+ pairs[i].isnull = false;
+ }
+
+ i++;
+ }
+
+ pcount = hstoreUniquePairs(pairs, pcount, &buflen);
+ out = hstorePairs(pairs, pcount, buflen);
+ PG_RETURN_POINTER(out);
+}
diff --git a/contrib/hstore_plperl/hstore_plperl.control b/contrib/hstore_plperl/hstore_plperl.control
new file mode 100644
index 0000000000..16277f68c1
--- /dev/null
+++ b/contrib/hstore_plperl/hstore_plperl.control
@@ -0,0 +1,6 @@
+# hstore_plperl extension
+comment = 'transform between hstore and plperl'
+default_version = '1.0'
+module_pathname = '$libdir/hstore_plperl'
+relocatable = true
+requires = 'hstore,plperl'
diff --git a/contrib/hstore_plperl/hstore_plperlu--1.0.sql b/contrib/hstore_plperl/hstore_plperlu--1.0.sql
new file mode 100644
index 0000000000..46ad35c487
--- /dev/null
+++ b/contrib/hstore_plperl/hstore_plperlu--1.0.sql
@@ -0,0 +1,17 @@
+-- make sure the prerequisite libraries are loaded
+DO '' LANGUAGE plperlu;
+SELECT NULL::hstore;
+
+
+CREATE FUNCTION hstore_to_plperlu(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME', 'hstore_to_plperl';
+
+CREATE FUNCTION plperlu_to_hstore(val internal) RETURNS hstore
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME', 'plperl_to_hstore';
+
+CREATE TRANSFORM FOR hstore LANGUAGE plperlu (
+ FROM SQL WITH FUNCTION hstore_to_plperlu(internal),
+ TO SQL WITH FUNCTION plperlu_to_hstore(internal)
+);
diff --git a/contrib/hstore_plperl/hstore_plperlu.control b/contrib/hstore_plperl/hstore_plperlu.control
new file mode 100644
index 0000000000..c8d43b41a5
--- /dev/null
+++ b/contrib/hstore_plperl/hstore_plperlu.control
@@ -0,0 +1,6 @@
+# hstore_plperlu extension
+comment = 'transform between hstore and plperlu'
+default_version = '1.0'
+module_pathname = '$libdir/hstore_plperl'
+relocatable = true
+requires = 'hstore,plperlu'
diff --git a/contrib/hstore_plperl/sql/create_transform.sql b/contrib/hstore_plperl/sql/create_transform.sql
new file mode 100644
index 0000000000..d0a12ada9f
--- /dev/null
+++ b/contrib/hstore_plperl/sql/create_transform.sql
@@ -0,0 +1,49 @@
+-- general regression test for transforms
+
+DROP EXTENSION IF EXISTS hstore CASCADE;
+DROP EXTENSION IF EXISTS plperl CASCADE;
+DROP EXTENSION IF EXISTS hstore_plperl CASCADE;
+
+CREATE EXTENSION hstore;
+CREATE EXTENSION plperl;
+
+CREATE FUNCTION hstore_to_plperl(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS '$libdir/hstore_plperl';
+
+CREATE FUNCTION plperl_to_hstore(val internal) RETURNS hstore
+LANGUAGE C STRICT IMMUTABLE
+AS '$libdir/hstore_plperl';
+
+CREATE TRANSFORM FOR foo LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+CREATE TRANSFORM FOR hstore LANGUAGE foo (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_out(hstore), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION internal_in(cstring), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- ok
+CREATE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- fail
+CREATE OR REPLACE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal), TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- ok
+CREATE OR REPLACE TRANSFORM FOR hstore LANGUAGE plperl (FROM SQL WITH FUNCTION hstore_to_plperl(internal)); -- ok
+CREATE OR REPLACE TRANSFORM FOR hstore LANGUAGE plperl (TO SQL WITH FUNCTION plperl_to_hstore(internal)); -- ok
+
+COMMENT ON TRANSFORM FOR hstore LANGUAGE plperl IS 'test';
+
+DROP TRANSFORM IF EXISTS FOR fake_type LANGUAGE plperl;
+DROP TRANSFORM IF EXISTS FOR hstore LANGUAGE fake_lang;
+DROP TRANSFORM FOR foo LANGUAGE plperl;
+DROP TRANSFORM FOR hstore LANGUAGE foo;
+DROP TRANSFORM FOR hstore LANGUAGE plperl;
+DROP TRANSFORM IF EXISTS FOR hstore LANGUAGE plperl;
+
+DROP FUNCTION hstore_to_plperl(val internal);
+DROP FUNCTION plperl_to_hstore(val internal);
+
+CREATE EXTENSION hstore_plperl;
+\dx+ hstore_plperl
+ALTER EXTENSION hstore_plperl DROP TRANSFORM FOR hstore LANGUAGE plperl;
+\dx+ hstore_plperl
+ALTER EXTENSION hstore_plperl ADD TRANSFORM FOR hstore LANGUAGE plperl;
+\dx+ hstore_plperl
+
+DROP EXTENSION hstore CASCADE;
+DROP EXTENSION plperl CASCADE;
diff --git a/contrib/hstore_plperl/sql/hstore_plperl.sql b/contrib/hstore_plperl/sql/hstore_plperl.sql
new file mode 100644
index 0000000000..0f70f149f5
--- /dev/null
+++ b/contrib/hstore_plperl/sql/hstore_plperl.sql
@@ -0,0 +1,43 @@
+CREATE EXTENSION hstore;
+CREATE EXTENSION plperl;
+CREATE EXTENSION hstore_plperl;
+
+SELECT transforms.udt_schema, transforms.udt_name,
+ routine_schema, routine_name,
+ group_name, transform_type
+FROM information_schema.transforms JOIN information_schema.routines
+ USING (specific_catalog, specific_schema, specific_name)
+ORDER BY 1, 2, 5, 6;
+
+
+-- test perl -> hstore
+CREATE FUNCTION test2() RETURNS hstore
+LANGUAGE plperl
+TRANSFORM FOR TYPE hstore
+AS $$
+$val = {a => 1, b => 'boo', c => undef};
+return $val;
+$$;
+
+SELECT test2();
+
+
+-- test perl -> hstore[]
+CREATE FUNCTION test2arr() RETURNS hstore[]
+LANGUAGE plperl
+TRANSFORM FOR TYPE hstore
+AS $$
+$val = [{a => 1, b => 'boo', c => undef}, {d => 2}];
+return $val;
+$$;
+
+SELECT test2arr();
+
+
+DROP FUNCTION test2();
+DROP FUNCTION test2arr();
+
+
+DROP EXTENSION hstore_plperl;
+DROP EXTENSION hstore;
+DROP EXTENSION plperl;
diff --git a/contrib/hstore_plperl/sql/hstore_plperlu.sql b/contrib/hstore_plperl/sql/hstore_plperlu.sql
new file mode 100644
index 0000000000..3cfb2fdd77
--- /dev/null
+++ b/contrib/hstore_plperl/sql/hstore_plperlu.sql
@@ -0,0 +1,121 @@
+CREATE EXTENSION hstore;
+CREATE EXTENSION plperlu;
+CREATE EXTENSION hstore_plperlu;
+
+SELECT transforms.udt_schema, transforms.udt_name,
+ routine_schema, routine_name,
+ group_name, transform_type
+FROM information_schema.transforms JOIN information_schema.routines
+ USING (specific_catalog, specific_schema, specific_name)
+ORDER BY 1, 2, 5, 6;
+
+
+-- test hstore -> perl
+CREATE FUNCTION test1(val hstore) RETURNS int
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_[0]));
+return scalar(keys %{$_[0]});
+$$;
+
+SELECT test1('aa=>bb, cc=>NULL'::hstore);
+
+CREATE FUNCTION test1none(val hstore) RETURNS int
+LANGUAGE plperlu
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_[0]));
+return scalar(keys %{$_[0]});
+$$;
+
+SELECT test1none('aa=>bb, cc=>NULL'::hstore);
+
+CREATE FUNCTION test1list(val hstore) RETURNS int
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_[0]));
+return scalar(keys %{$_[0]});
+$$;
+
+SELECT test1list('aa=>bb, cc=>NULL'::hstore);
+
+
+-- test hstore[] -> perl
+CREATE FUNCTION test1arr(val hstore[]) RETURNS int
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_[0]->[0], $_[0]->[1]));
+return scalar(keys %{$_[0]});
+$$;
+
+SELECT test1arr(array['aa=>bb, cc=>NULL'::hstore, 'dd=>ee']);
+
+
+-- test as part of prepare/execute
+CREATE FUNCTION test3() RETURNS void
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+
+$rv = spi_exec_query(q{SELECT 'aa=>bb, cc=>NULL'::hstore AS col1});
+elog(INFO, Dumper($rv->{rows}[0]->{col1}));
+
+$val = {a => 1, b => 'boo', c => undef};
+$plan = spi_prepare(q{SELECT $1::text AS col1}, "hstore");
+$rv = spi_exec_prepared($plan, {}, $val);
+elog(INFO, Dumper($rv->{rows}[0]->{col1}));
+$$;
+
+SELECT test3();
+
+
+-- test trigger
+CREATE TABLE test1 (a int, b hstore);
+INSERT INTO test1 VALUES (1, 'aa=>bb, cc=>NULL');
+SELECT * FROM test1;
+
+CREATE FUNCTION test4() RETURNS trigger
+LANGUAGE plperlu
+TRANSFORM FOR TYPE hstore
+AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+elog(INFO, Dumper($_TD->{new}));
+if ($_TD->{new}{a} == 1) {
+ $_TD->{new}{b} = {a => 1, b => 'boo', c => undef};
+}
+
+return "MODIFY";
+$$;
+
+CREATE TRIGGER test4 BEFORE UPDATE ON test1 FOR EACH ROW EXECUTE PROCEDURE test4();
+
+UPDATE test1 SET a = a;
+SELECT * FROM test1;
+
+
+DROP TABLE test1;
+
+DROP FUNCTION test1(hstore);
+DROP FUNCTION test1none(hstore);
+DROP FUNCTION test1list(hstore);
+DROP FUNCTION test1arr(hstore[]);
+DROP FUNCTION test3();
+DROP FUNCTION test4();
+
+
+DROP EXTENSION hstore_plperlu;
+DROP EXTENSION hstore;
+DROP EXTENSION plperlu;
diff --git a/contrib/hstore_plpython/.gitignore b/contrib/hstore_plpython/.gitignore
new file mode 100644
index 0000000000..ce6fab94a0
--- /dev/null
+++ b/contrib/hstore_plpython/.gitignore
@@ -0,0 +1,6 @@
+# Generated subdirectories
+/expected/python3/
+/log/
+/results/
+/sql/python3/
+/tmp_check/
diff --git a/contrib/hstore_plpython/Makefile b/contrib/hstore_plpython/Makefile
new file mode 100644
index 0000000000..6ee434bafa
--- /dev/null
+++ b/contrib/hstore_plpython/Makefile
@@ -0,0 +1,37 @@
+# contrib/hstore_plpython/Makefile
+
+MODULE_big = hstore_plpython$(python_majorversion)
+OBJS = hstore_plpython.o $(WIN32RES)
+PGFILEDESC = "hstore_plpython - hstore transform for plpython"
+
+PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plpython $(python_includespec) -I$(top_srcdir)/contrib/hstore
+
+EXTENSION = hstore_plpythonu hstore_plpython2u hstore_plpython3u
+DATA = hstore_plpythonu--1.0.sql hstore_plpython2u--1.0.sql hstore_plpython3u--1.0.sql
+
+REGRESS = hstore_plpython
+REGRESS_PLPYTHON3_MANGLE := $(REGRESS)
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/hstore_plpython
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
+
+ifeq ($(PORTNAME), win32)
+# This means we need an in-tree build on Windows, not a pgxs build
+SHLIB_LINK += ../hstore/libhstore.a $(wildcard ../../src/pl/plpython/libpython*.a) $(wildcard ../../src/pl/plpython/libplpython*.a)
+endif
+
+REGRESS_OPTS += --load-extension=hstore
+ifeq ($(python_majorversion),2)
+REGRESS_OPTS += --load-extension=plpythonu --load-extension=hstore_plpythonu
+endif
+EXTRA_INSTALL += contrib/hstore
+
+include $(top_srcdir)/src/pl/plpython/regress-python3-mangle.mk
diff --git a/contrib/hstore_plpython/expected/hstore_plpython.out b/contrib/hstore_plpython/expected/hstore_plpython.out
new file mode 100644
index 0000000000..b7a6a92ac6
--- /dev/null
+++ b/contrib/hstore_plpython/expected/hstore_plpython.out
@@ -0,0 +1,128 @@
+CREATE EXTENSION plpython2u;
+CREATE EXTENSION hstore_plpython2u;
+-- test hstore -> python
+CREATE FUNCTION test1(val hstore) RETURNS int
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+assert isinstance(val, dict)
+i = list(val.items())
+i.sort()
+plpy.info(i)
+return len(val)
+$$;
+SELECT test1('aa=>bb, cc=>NULL'::hstore);
+INFO: [('aa', 'bb'), ('cc', None)]
+CONTEXT: PL/Python function "test1"
+ test1
+-------
+ 2
+(1 row)
+
+-- the same with the versioned language name
+CREATE FUNCTION test1n(val hstore) RETURNS int
+LANGUAGE plpython2u
+TRANSFORM FOR TYPE hstore
+AS $$
+assert isinstance(val, dict)
+i = list(val.items())
+i.sort()
+plpy.info(i)
+return len(val)
+$$;
+SELECT test1n('aa=>bb, cc=>NULL'::hstore);
+INFO: [('aa', 'bb'), ('cc', None)]
+CONTEXT: PL/Python function "test1n"
+ test1n
+--------
+ 2
+(1 row)
+
+-- test hstore[] -> python
+CREATE FUNCTION test1arr(val hstore[]) RETURNS int
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+assert(val == [{'aa': 'bb', 'cc': None}, {'dd': 'ee'}])
+return len(val)
+$$;
+SELECT test1arr(array['aa=>bb, cc=>NULL'::hstore, 'dd=>ee']);
+ test1arr
+----------
+ 2
+(1 row)
+
+-- test python -> hstore
+CREATE FUNCTION test2() RETURNS hstore
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+val = {'a': 1, 'b': 'boo', 'c': None}
+return val
+$$;
+SELECT test2();
+ test2
+---------------------------------
+ "a"=>"1", "b"=>"boo", "c"=>NULL
+(1 row)
+
+-- test python -> hstore[]
+CREATE FUNCTION test2arr() RETURNS hstore[]
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+val = [{'a': 1, 'b': 'boo', 'c': None}, {'d': 2}]
+return val
+$$;
+ SELECT test2arr();
+ test2arr
+--------------------------------------------------------------
+ {"\"a\"=>\"1\", \"b\"=>\"boo\", \"c\"=>NULL","\"d\"=>\"2\""}
+(1 row)
+
+-- test as part of prepare/execute
+CREATE FUNCTION test3() RETURNS void
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+rv = plpy.execute("SELECT 'aa=>bb, cc=>NULL'::hstore AS col1")
+assert(rv[0]["col1"] == {'aa': 'bb', 'cc': None})
+
+val = {'a': 1, 'b': 'boo', 'c': None}
+plan = plpy.prepare("SELECT $1::text AS col1", ["hstore"])
+rv = plpy.execute(plan, [val])
+assert(rv[0]["col1"] == '"a"=>"1", "b"=>"boo", "c"=>NULL')
+$$;
+SELECT test3();
+ test3
+-------
+
+(1 row)
+
+-- test trigger
+CREATE TABLE test1 (a int, b hstore);
+INSERT INTO test1 VALUES (1, 'aa=>bb, cc=>NULL');
+SELECT * FROM test1;
+ a | b
+---+------------------------
+ 1 | "aa"=>"bb", "cc"=>NULL
+(1 row)
+
+CREATE FUNCTION test4() RETURNS trigger
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+assert(TD["new"] == {'a': 1, 'b': {'aa': 'bb', 'cc': None}})
+if TD["new"]["a"] == 1:
+ TD["new"]["b"] = {'a': 1, 'b': 'boo', 'c': None}
+
+return "MODIFY"
+$$;
+CREATE TRIGGER test4 BEFORE UPDATE ON test1 FOR EACH ROW EXECUTE PROCEDURE test4();
+UPDATE test1 SET a = a;
+SELECT * FROM test1;
+ a | b
+---+---------------------------------
+ 1 | "a"=>"1", "b"=>"boo", "c"=>NULL
+(1 row)
+
diff --git a/contrib/hstore_plpython/hstore_plpython.c b/contrib/hstore_plpython/hstore_plpython.c
new file mode 100644
index 0000000000..a3316dd9eb
--- /dev/null
+++ b/contrib/hstore_plpython/hstore_plpython.c
@@ -0,0 +1,114 @@
+#include "postgres.h"
+#include "fmgr.h"
+#include "plpython.h"
+#include "plpy_typeio.h"
+#include "hstore.h"
+
+PG_MODULE_MAGIC;
+
+
+PG_FUNCTION_INFO_V1(hstore_to_plpython);
+
+Datum
+hstore_to_plpython(PG_FUNCTION_ARGS)
+{
+ HStore *in = PG_GETARG_HS(0);
+ int i;
+ int count = HS_COUNT(in);
+ char *base = STRPTR(in);
+ HEntry *entries = ARRPTR(in);
+ PyObject *dict;
+
+ dict = PyDict_New();
+
+ for (i = 0; i < count; i++)
+ {
+ PyObject *key;
+
+ key = PyString_FromStringAndSize(HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
+ if (HS_VALISNULL(entries, i))
+ PyDict_SetItem(dict, key, Py_None);
+ else
+ {
+ PyObject *value;
+
+ value = PyString_FromStringAndSize(HS_VAL(entries, base, i), HS_VALLEN(entries, i));
+ PyDict_SetItem(dict, key, value);
+ Py_XDECREF(value);
+ }
+ Py_XDECREF(key);
+ }
+
+ return PointerGetDatum(dict);
+}
+
+
+PG_FUNCTION_INFO_V1(plpython_to_hstore);
+
+Datum
+plpython_to_hstore(PG_FUNCTION_ARGS)
+{
+ PyObject *dict;
+ volatile PyObject *items_v = NULL;
+ int32 pcount;
+ HStore *out;
+
+ dict = (PyObject *) PG_GETARG_POINTER(0);
+ if (!PyMapping_Check(dict))
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("not a Python mapping")));
+
+ pcount = PyMapping_Size(dict);
+ items_v = PyMapping_Items(dict);
+
+ PG_TRY();
+ {
+ int32 buflen;
+ int32 i;
+ Pairs *pairs;
+ PyObject *items = (PyObject *) items_v;
+
+ pairs = palloc(pcount * sizeof(*pairs));
+
+ for (i = 0; i < pcount; i++)
+ {
+ PyObject *tuple;
+ PyObject *key;
+ PyObject *value;
+
+ tuple = PyList_GetItem(items, i);
+ key = PyTuple_GetItem(tuple, 0);
+ value = PyTuple_GetItem(tuple, 1);
+
+ pairs[i].key = PLyObject_AsString(key);
+ pairs[i].keylen = hstoreCheckKeyLen(strlen(pairs[i].key));
+ pairs[i].needfree = true;
+
+ if (value == Py_None)
+ {
+ pairs[i].val = NULL;
+ pairs[i].vallen = 0;
+ pairs[i].isnull = true;
+ }
+ else
+ {
+ pairs[i].val = PLyObject_AsString(value);
+ pairs[i].vallen = hstoreCheckValLen(strlen(pairs[i].val));
+ pairs[i].isnull = false;
+ }
+ }
+ Py_DECREF(items_v);
+
+ pcount = hstoreUniquePairs(pairs, pcount, &buflen);
+ out = hstorePairs(pairs, pcount, buflen);
+ }
+ PG_CATCH();
+ {
+ Py_DECREF(items_v);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ PG_RETURN_POINTER(out);
+}
diff --git a/contrib/hstore_plpython/hstore_plpython2u--1.0.sql b/contrib/hstore_plpython/hstore_plpython2u--1.0.sql
new file mode 100644
index 0000000000..c998de51c9
--- /dev/null
+++ b/contrib/hstore_plpython/hstore_plpython2u--1.0.sql
@@ -0,0 +1,19 @@
+-- make sure the prerequisite libraries are loaded
+DO '1' LANGUAGE plpython2u;
+SELECT NULL::hstore;
+
+
+CREATE FUNCTION hstore_to_plpython2(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME', 'hstore_to_plpython';
+
+CREATE FUNCTION plpython2_to_hstore(val internal) RETURNS hstore
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME', 'plpython_to_hstore';
+
+CREATE TRANSFORM FOR hstore LANGUAGE plpython2u (
+ FROM SQL WITH FUNCTION hstore_to_plpython2(internal),
+ TO SQL WITH FUNCTION plpython2_to_hstore(internal)
+);
+
+COMMENT ON TRANSFORM FOR hstore LANGUAGE plpython2u IS 'transform between hstore and Python dict';
diff --git a/contrib/hstore_plpython/hstore_plpython2u.control b/contrib/hstore_plpython/hstore_plpython2u.control
new file mode 100644
index 0000000000..ed90567112
--- /dev/null
+++ b/contrib/hstore_plpython/hstore_plpython2u.control
@@ -0,0 +1,6 @@
+# hstore_plpython2u extension
+comment = 'transform between hstore and plpython2u'
+default_version = '1.0'
+module_pathname = '$libdir/hstore_plpython2'
+relocatable = true
+requires = 'hstore,plpython2u'
diff --git a/contrib/hstore_plpython/hstore_plpython3u--1.0.sql b/contrib/hstore_plpython/hstore_plpython3u--1.0.sql
new file mode 100644
index 0000000000..61d0e47793
--- /dev/null
+++ b/contrib/hstore_plpython/hstore_plpython3u--1.0.sql
@@ -0,0 +1,19 @@
+-- make sure the prerequisite libraries are loaded
+DO '1' LANGUAGE plpython3u;
+SELECT NULL::hstore;
+
+
+CREATE FUNCTION hstore_to_plpython3(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME', 'hstore_to_plpython';
+
+CREATE FUNCTION plpython3_to_hstore(val internal) RETURNS hstore
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME', 'plpython_to_hstore';
+
+CREATE TRANSFORM FOR hstore LANGUAGE plpython3u (
+ FROM SQL WITH FUNCTION hstore_to_plpython3(internal),
+ TO SQL WITH FUNCTION plpython3_to_hstore(internal)
+);
+
+COMMENT ON TRANSFORM FOR hstore LANGUAGE plpython3u IS 'transform between hstore and Python dict';
diff --git a/contrib/hstore_plpython/hstore_plpython3u.control b/contrib/hstore_plpython/hstore_plpython3u.control
new file mode 100644
index 0000000000..d86f38e9a7
--- /dev/null
+++ b/contrib/hstore_plpython/hstore_plpython3u.control
@@ -0,0 +1,6 @@
+# hstore_plpython3u extension
+comment = 'transform between hstore and plpython3u'
+default_version = '1.0'
+module_pathname = '$libdir/hstore_plpython3'
+relocatable = true
+requires = 'hstore,plpython3u'
diff --git a/contrib/hstore_plpython/hstore_plpythonu--1.0.sql b/contrib/hstore_plpython/hstore_plpythonu--1.0.sql
new file mode 100644
index 0000000000..6acb97aab9
--- /dev/null
+++ b/contrib/hstore_plpython/hstore_plpythonu--1.0.sql
@@ -0,0 +1,19 @@
+-- make sure the prerequisite libraries are loaded
+DO '1' LANGUAGE plpythonu;
+SELECT NULL::hstore;
+
+
+CREATE FUNCTION hstore_to_plpython(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME';
+
+CREATE FUNCTION plpython_to_hstore(val internal) RETURNS hstore
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME';
+
+CREATE TRANSFORM FOR hstore LANGUAGE plpythonu (
+ FROM SQL WITH FUNCTION hstore_to_plpython(internal),
+ TO SQL WITH FUNCTION plpython_to_hstore(internal)
+);
+
+COMMENT ON TRANSFORM FOR hstore LANGUAGE plpythonu IS 'transform between hstore and Python dict';
diff --git a/contrib/hstore_plpython/hstore_plpythonu.control b/contrib/hstore_plpython/hstore_plpythonu.control
new file mode 100644
index 0000000000..8e9b35e43b
--- /dev/null
+++ b/contrib/hstore_plpython/hstore_plpythonu.control
@@ -0,0 +1,6 @@
+# hstore_plpythonu extension
+comment = 'transform between hstore and plpythonu'
+default_version = '1.0'
+module_pathname = '$libdir/hstore_plpython2'
+relocatable = true
+requires = 'hstore,plpythonu'
diff --git a/contrib/hstore_plpython/sql/hstore_plpython.sql b/contrib/hstore_plpython/sql/hstore_plpython.sql
new file mode 100644
index 0000000000..9ff2ebcd83
--- /dev/null
+++ b/contrib/hstore_plpython/sql/hstore_plpython.sql
@@ -0,0 +1,107 @@
+CREATE EXTENSION plpython2u;
+CREATE EXTENSION hstore_plpython2u;
+
+
+-- test hstore -> python
+CREATE FUNCTION test1(val hstore) RETURNS int
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+assert isinstance(val, dict)
+i = list(val.items())
+i.sort()
+plpy.info(i)
+return len(val)
+$$;
+
+SELECT test1('aa=>bb, cc=>NULL'::hstore);
+
+
+-- the same with the versioned language name
+CREATE FUNCTION test1n(val hstore) RETURNS int
+LANGUAGE plpython2u
+TRANSFORM FOR TYPE hstore
+AS $$
+assert isinstance(val, dict)
+i = list(val.items())
+i.sort()
+plpy.info(i)
+return len(val)
+$$;
+
+SELECT test1n('aa=>bb, cc=>NULL'::hstore);
+
+
+-- test hstore[] -> python
+CREATE FUNCTION test1arr(val hstore[]) RETURNS int
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+assert(val == [{'aa': 'bb', 'cc': None}, {'dd': 'ee'}])
+return len(val)
+$$;
+
+SELECT test1arr(array['aa=>bb, cc=>NULL'::hstore, 'dd=>ee']);
+
+
+-- test python -> hstore
+CREATE FUNCTION test2() RETURNS hstore
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+val = {'a': 1, 'b': 'boo', 'c': None}
+return val
+$$;
+
+SELECT test2();
+
+
+-- test python -> hstore[]
+CREATE FUNCTION test2arr() RETURNS hstore[]
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+val = [{'a': 1, 'b': 'boo', 'c': None}, {'d': 2}]
+return val
+$$;
+
+ SELECT test2arr();
+
+
+-- test as part of prepare/execute
+CREATE FUNCTION test3() RETURNS void
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+rv = plpy.execute("SELECT 'aa=>bb, cc=>NULL'::hstore AS col1")
+assert(rv[0]["col1"] == {'aa': 'bb', 'cc': None})
+
+val = {'a': 1, 'b': 'boo', 'c': None}
+plan = plpy.prepare("SELECT $1::text AS col1", ["hstore"])
+rv = plpy.execute(plan, [val])
+assert(rv[0]["col1"] == '"a"=>"1", "b"=>"boo", "c"=>NULL')
+$$;
+
+SELECT test3();
+
+
+-- test trigger
+CREATE TABLE test1 (a int, b hstore);
+INSERT INTO test1 VALUES (1, 'aa=>bb, cc=>NULL');
+SELECT * FROM test1;
+
+CREATE FUNCTION test4() RETURNS trigger
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE hstore
+AS $$
+assert(TD["new"] == {'a': 1, 'b': {'aa': 'bb', 'cc': None}})
+if TD["new"]["a"] == 1:
+ TD["new"]["b"] = {'a': 1, 'b': 'boo', 'c': None}
+
+return "MODIFY"
+$$;
+
+CREATE TRIGGER test4 BEFORE UPDATE ON test1 FOR EACH ROW EXECUTE PROCEDURE test4();
+
+UPDATE test1 SET a = a;
+SELECT * FROM test1;
diff --git a/contrib/intarray/_int_gin.c b/contrib/intarray/_int_gin.c
index 58352cac80..fb16b66edb 100644
--- a/contrib/intarray/_int_gin.c
+++ b/contrib/intarray/_int_gin.c
@@ -4,8 +4,7 @@
#include "postgres.h"
#include "access/gin.h"
-#include "access/gist.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "_int.h"
diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c
index 07108eb15e..888c277e60 100644
--- a/contrib/intarray/_int_gist.c
+++ b/contrib/intarray/_int_gist.c
@@ -6,7 +6,7 @@
#include <limits.h>
#include "access/gist.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "_int.h"
diff --git a/contrib/intarray/_intbig_gist.c b/contrib/intarray/_intbig_gist.c
index 235db38957..6dae7c91c1 100644
--- a/contrib/intarray/_intbig_gist.c
+++ b/contrib/intarray/_intbig_gist.c
@@ -4,7 +4,7 @@
#include "postgres.h"
#include "access/gist.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "_int.h"
diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c
index 5fbd253491..40398245f6 100644
--- a/contrib/isn/isn.c
+++ b/contrib/isn/isn.c
@@ -511,7 +511,7 @@ str2ean(const char *num)
}
/*
- * ean2string --- Try to convert an ean13 number to an hyphenated string.
+ * ean2string --- Try to convert an ean13 number to a hyphenated string.
* Assumes there's enough space in result to hold
* the string (maximum MAXEAN13LEN+1 bytes)
* This doesn't verify for a valid check digit.
diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c
index 41be68d7ee..37cd991694 100644
--- a/contrib/ltree/_ltree_gist.c
+++ b/contrib/ltree/_ltree_gist.c
@@ -8,7 +8,7 @@
#include "postgres.h"
#include "access/gist.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "crc32.h"
#include "ltree.h"
diff --git a/contrib/ltree/crc32.c b/contrib/ltree/crc32.c
index 1c08d264f7..403dae0d7d 100644
--- a/contrib/ltree/crc32.c
+++ b/contrib/ltree/crc32.c
@@ -26,13 +26,14 @@
unsigned int
ltree_crc32_sz(char *buf, int size)
{
- pg_crc32 crc;
+ pg_crc32 crc;
char *p = buf;
INIT_TRADITIONAL_CRC32(crc);
while (size > 0)
{
- char c = (char) TOLOWER(*p);
+ char c = (char) TOLOWER(*p);
+
COMP_TRADITIONAL_CRC32(crc, &c, 1);
size--;
p++;
diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c
index 2d89f1aed4..83da62018e 100644
--- a/contrib/ltree/ltree_gist.c
+++ b/contrib/ltree/ltree_gist.c
@@ -6,7 +6,7 @@
#include "postgres.h"
#include "access/gist.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
#include "crc32.h"
#include "ltree.h"
diff --git a/contrib/ltree_plpython/.gitignore b/contrib/ltree_plpython/.gitignore
new file mode 100644
index 0000000000..ce6fab94a0
--- /dev/null
+++ b/contrib/ltree_plpython/.gitignore
@@ -0,0 +1,6 @@
+# Generated subdirectories
+/expected/python3/
+/log/
+/results/
+/sql/python3/
+/tmp_check/
diff --git a/contrib/ltree_plpython/Makefile b/contrib/ltree_plpython/Makefile
new file mode 100644
index 0000000000..64ca1275f1
--- /dev/null
+++ b/contrib/ltree_plpython/Makefile
@@ -0,0 +1,37 @@
+# contrib/ltree_plpython/Makefile
+
+MODULE_big = ltree_plpython$(python_majorversion)
+OBJS = ltree_plpython.o $(WIN32RES)
+PGFILEDESC = "ltree_plpython - ltree transform for plpython"
+
+PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plpython $(python_includespec) -I$(top_srcdir)/contrib/ltree
+
+EXTENSION = ltree_plpythonu ltree_plpython2u ltree_plpython3u
+DATA = ltree_plpythonu--1.0.sql ltree_plpython2u--1.0.sql ltree_plpython3u--1.0.sql
+
+REGRESS = ltree_plpython
+REGRESS_PLPYTHON3_MANGLE := $(REGRESS)
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/ltree_plpython
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
+
+ifeq ($(PORTNAME), win32)
+# This means we need an in-tree build on Windows, not a pgxs build
+SHLIB_LINK += $(wildcard ../../src/pl/plpython/libpython*.a)
+endif
+
+REGRESS_OPTS += --load-extension=ltree
+ifeq ($(python_majorversion),2)
+REGRESS_OPTS += --load-extension=plpythonu --load-extension=ltree_plpythonu
+endif
+EXTRA_INSTALL += contrib/ltree
+
+include $(top_srcdir)/src/pl/plpython/regress-python3-mangle.mk
diff --git a/contrib/ltree_plpython/expected/ltree_plpython.out b/contrib/ltree_plpython/expected/ltree_plpython.out
new file mode 100644
index 0000000000..934529ee0f
--- /dev/null
+++ b/contrib/ltree_plpython/expected/ltree_plpython.out
@@ -0,0 +1,45 @@
+CREATE EXTENSION plpython2u;
+CREATE EXTENSION ltree_plpython2u;
+CREATE FUNCTION test1(val ltree) RETURNS int
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE ltree
+AS $$
+plpy.info(repr(val))
+return len(val)
+$$;
+SELECT test1('aa.bb.cc'::ltree);
+INFO: ['aa', 'bb', 'cc']
+CONTEXT: PL/Python function "test1"
+ test1
+-------
+ 3
+(1 row)
+
+CREATE FUNCTION test1n(val ltree) RETURNS int
+LANGUAGE plpython2u
+TRANSFORM FOR TYPE ltree
+AS $$
+plpy.info(repr(val))
+return len(val)
+$$;
+SELECT test1n('aa.bb.cc'::ltree);
+INFO: ['aa', 'bb', 'cc']
+CONTEXT: PL/Python function "test1n"
+ test1n
+--------
+ 3
+(1 row)
+
+CREATE FUNCTION test2() RETURNS ltree
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE ltree
+AS $$
+return ['foo', 'bar', 'baz']
+$$;
+-- plpython to ltree is not yet implemented, so this will fail,
+-- because it will try to parse the Python list as an ltree input
+-- string.
+SELECT test2();
+ERROR: syntax error at position 0
+CONTEXT: while creating return value
+PL/Python function "test2"
diff --git a/contrib/ltree_plpython/ltree_plpython.c b/contrib/ltree_plpython/ltree_plpython.c
new file mode 100644
index 0000000000..26b7b3c275
--- /dev/null
+++ b/contrib/ltree_plpython/ltree_plpython.c
@@ -0,0 +1,31 @@
+#include "postgres.h"
+#include "fmgr.h"
+#include "plpython.h"
+#include "ltree.h"
+
+PG_MODULE_MAGIC;
+
+
+PG_FUNCTION_INFO_V1(ltree_to_plpython);
+
+Datum
+ltree_to_plpython(PG_FUNCTION_ARGS)
+{
+ ltree *in = PG_GETARG_LTREE(0);
+ int i;
+ PyObject *list;
+ ltree_level *curlevel;
+
+ list = PyList_New(in->numlevel);
+
+ curlevel = LTREE_FIRST(in);
+ for (i = 0; i < in->numlevel; i++)
+ {
+ PyList_SetItem(list, i, PyString_FromStringAndSize(curlevel->name, curlevel->len));
+ curlevel = LEVEL_NEXT(curlevel);
+ }
+
+ PG_FREE_IF_COPY(in, 0);
+
+ return PointerGetDatum(list);
+}
diff --git a/contrib/ltree_plpython/ltree_plpython2u--1.0.sql b/contrib/ltree_plpython/ltree_plpython2u--1.0.sql
new file mode 100644
index 0000000000..29a12d45a2
--- /dev/null
+++ b/contrib/ltree_plpython/ltree_plpython2u--1.0.sql
@@ -0,0 +1,12 @@
+-- make sure the prerequisite libraries are loaded
+DO '1' LANGUAGE plpython2u;
+SELECT NULL::ltree;
+
+
+CREATE FUNCTION ltree_to_plpython2(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME', 'ltree_to_plpython';
+
+CREATE TRANSFORM FOR ltree LANGUAGE plpython2u (
+ FROM SQL WITH FUNCTION ltree_to_plpython2(internal)
+);
diff --git a/contrib/ltree_plpython/ltree_plpython2u.control b/contrib/ltree_plpython/ltree_plpython2u.control
new file mode 100644
index 0000000000..bedfd0acba
--- /dev/null
+++ b/contrib/ltree_plpython/ltree_plpython2u.control
@@ -0,0 +1,6 @@
+# ltree_plpython2u extension
+comment = 'transform between ltree and plpython2u'
+default_version = '1.0'
+module_pathname = '$libdir/ltree_plpython2'
+relocatable = true
+requires = 'ltree,plpython2u'
diff --git a/contrib/ltree_plpython/ltree_plpython3u--1.0.sql b/contrib/ltree_plpython/ltree_plpython3u--1.0.sql
new file mode 100644
index 0000000000..1300a78c66
--- /dev/null
+++ b/contrib/ltree_plpython/ltree_plpython3u--1.0.sql
@@ -0,0 +1,12 @@
+-- make sure the prerequisite libraries are loaded
+DO '1' LANGUAGE plpython3u;
+SELECT NULL::ltree;
+
+
+CREATE FUNCTION ltree_to_plpython3(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME', 'ltree_to_plpython';
+
+CREATE TRANSFORM FOR ltree LANGUAGE plpython3u (
+ FROM SQL WITH FUNCTION ltree_to_plpython3(internal)
+);
diff --git a/contrib/ltree_plpython/ltree_plpython3u.control b/contrib/ltree_plpython/ltree_plpython3u.control
new file mode 100644
index 0000000000..96c9764331
--- /dev/null
+++ b/contrib/ltree_plpython/ltree_plpython3u.control
@@ -0,0 +1,6 @@
+# ltree_plpython3u extension
+comment = 'transform between ltree and plpython3u'
+default_version = '1.0'
+module_pathname = '$libdir/ltree_plpython3'
+relocatable = true
+requires = 'ltree,plpython3u'
diff --git a/contrib/ltree_plpython/ltree_plpythonu--1.0.sql b/contrib/ltree_plpython/ltree_plpythonu--1.0.sql
new file mode 100644
index 0000000000..1d1af28f4e
--- /dev/null
+++ b/contrib/ltree_plpython/ltree_plpythonu--1.0.sql
@@ -0,0 +1,12 @@
+-- make sure the prerequisite libraries are loaded
+DO '1' LANGUAGE plpythonu;
+SELECT NULL::ltree;
+
+
+CREATE FUNCTION ltree_to_plpython(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS 'MODULE_PATHNAME';
+
+CREATE TRANSFORM FOR ltree LANGUAGE plpythonu (
+ FROM SQL WITH FUNCTION ltree_to_plpython(internal)
+);
diff --git a/contrib/ltree_plpython/ltree_plpythonu.control b/contrib/ltree_plpython/ltree_plpythonu.control
new file mode 100644
index 0000000000..b03c89a2e6
--- /dev/null
+++ b/contrib/ltree_plpython/ltree_plpythonu.control
@@ -0,0 +1,6 @@
+# ltree_plpythonu extension
+comment = 'transform between ltree and plpythonu'
+default_version = '1.0'
+module_pathname = '$libdir/ltree_plpython2'
+relocatable = true
+requires = 'ltree,plpythonu'
diff --git a/contrib/ltree_plpython/sql/ltree_plpython.sql b/contrib/ltree_plpython/sql/ltree_plpython.sql
new file mode 100644
index 0000000000..f08ff6a3f0
--- /dev/null
+++ b/contrib/ltree_plpython/sql/ltree_plpython.sql
@@ -0,0 +1,37 @@
+CREATE EXTENSION plpython2u;
+CREATE EXTENSION ltree_plpython2u;
+
+
+CREATE FUNCTION test1(val ltree) RETURNS int
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE ltree
+AS $$
+plpy.info(repr(val))
+return len(val)
+$$;
+
+SELECT test1('aa.bb.cc'::ltree);
+
+
+CREATE FUNCTION test1n(val ltree) RETURNS int
+LANGUAGE plpython2u
+TRANSFORM FOR TYPE ltree
+AS $$
+plpy.info(repr(val))
+return len(val)
+$$;
+
+SELECT test1n('aa.bb.cc'::ltree);
+
+
+CREATE FUNCTION test2() RETURNS ltree
+LANGUAGE plpythonu
+TRANSFORM FOR TYPE ltree
+AS $$
+return ['foo', 'bar', 'baz']
+$$;
+
+-- plpython to ltree is not yet implemented, so this will fail,
+-- because it will try to parse the Python list as an ltree input
+-- string.
+SELECT test2();
diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c
index 1b15a7bdfe..7adcfa8937 100644
--- a/contrib/pageinspect/brinfuncs.c
+++ b/contrib/pageinspect/brinfuncs.c
@@ -58,7 +58,7 @@ brin_page_type(PG_FUNCTION_ARGS)
{
bytea *raw_page = PG_GETARG_BYTEA_P(0);
Page page = VARDATA(raw_page);
- char *type;
+ char *type;
switch (BrinPageType(page))
{
@@ -86,8 +86,8 @@ brin_page_type(PG_FUNCTION_ARGS)
static Page
verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
{
- Page page;
- int raw_page_size;
+ Page page;
+ int raw_page_size;
raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
@@ -95,7 +95,7 @@ verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page too small"),
- errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
+ errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
page = VARDATA(raw_page);
@@ -153,7 +153,7 @@ brin_page_items(PG_FUNCTION_ARGS)
indexRel = index_open(indexRelid, AccessShareLock);
state = palloc(offsetof(brin_page_state, columns) +
- sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
+ sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
state->bdesc = brin_build_desc(indexRel);
state->page = page;
@@ -168,10 +168,10 @@ brin_page_items(PG_FUNCTION_ARGS)
*/
for (attno = 1; attno <= state->bdesc->bd_tupdesc->natts; attno++)
{
- Oid output;
- bool isVarlena;
+ Oid output;
+ bool isVarlena;
BrinOpcInfo *opcinfo;
- int i;
+ int i;
brin_column_state *column;
opcinfo = state->bdesc->bd_info[attno - 1];
@@ -181,7 +181,7 @@ brin_page_items(PG_FUNCTION_ARGS)
column->nstored = opcinfo->oi_nstored;
for (i = 0; i < opcinfo->oi_nstored; i++)
{
- getTypeOutputInfo(opcinfo->oi_typids[i], &output, &isVarlena);
+ getTypeOutputInfo(opcinfo->oi_typcache[i]->type_id, &output, &isVarlena);
fmgr_info(output, &column->outputFn[i]);
}
@@ -213,7 +213,7 @@ brin_page_items(PG_FUNCTION_ARGS)
*/
if (state->dtup == NULL)
{
- BrinTuple *tup;
+ BrinTuple *tup;
MemoryContext mctx;
ItemId itemId;
@@ -225,8 +225,8 @@ brin_page_items(PG_FUNCTION_ARGS)
if (ItemIdIsUsed(itemId))
{
tup = (BrinTuple *) PageGetItem(state->page,
- PageGetItemId(state->page,
- state->offset));
+ PageGetItemId(state->page,
+ state->offset));
state->dtup = brin_deform_tuple(state->bdesc, tup);
state->attno = 1;
state->unusedItem = false;
@@ -253,7 +253,7 @@ brin_page_items(PG_FUNCTION_ARGS)
}
else
{
- int att = state->attno - 1;
+ int att = state->attno - 1;
values[0] = UInt16GetDatum(state->offset);
values[1] = UInt32GetDatum(state->dtup->bt_blkno);
@@ -263,8 +263,8 @@ brin_page_items(PG_FUNCTION_ARGS)
values[5] = BoolGetDatum(state->dtup->bt_placeholder);
if (!state->dtup->bt_columns[att].bv_allnulls)
{
- BrinValues *bvalues = &state->dtup->bt_columns[att];
- StringInfoData s;
+ BrinValues *bvalues = &state->dtup->bt_columns[att];
+ StringInfoData s;
bool first;
int i;
@@ -274,7 +274,7 @@ brin_page_items(PG_FUNCTION_ARGS)
first = true;
for (i = 0; i < state->columns[att]->nstored; i++)
{
- char *val;
+ char *val;
if (!first)
appendStringInfoString(&s, " .. ");
@@ -312,8 +312,8 @@ brin_page_items(PG_FUNCTION_ARGS)
}
/*
- * If we're beyond the end of the page, set flag to end the function in
- * the following iteration.
+ * If we're beyond the end of the page, set flag to end the function
+ * in the following iteration.
*/
if (state->offset > PageGetMaxOffsetNumber(state->page))
state->done = true;
@@ -366,8 +366,8 @@ brin_revmap_data(PG_FUNCTION_ARGS)
struct
{
ItemPointerData *tids;
- int idx;
- } *state;
+ int idx;
+ } *state;
FuncCallContext *fctx;
if (!superuser())
diff --git a/contrib/pageinspect/ginfuncs.c b/contrib/pageinspect/ginfuncs.c
index 701b2ca763..c0de3be8df 100644
--- a/contrib/pageinspect/ginfuncs.c
+++ b/contrib/pageinspect/ginfuncs.c
@@ -167,7 +167,7 @@ typedef struct gin_leafpage_items_state
TupleDesc tupd;
GinPostingList *seg;
GinPostingList *lastseg;
-} gin_leafpage_items_state;
+} gin_leafpage_items_state;
Datum
gin_leafpage_items(PG_FUNCTION_ARGS)
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index 98016fc365..6622d22f5f 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -34,6 +34,7 @@ typedef struct
bool isvalid;
bool isdirty;
uint16 usagecount;
+
/*
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
* being pinned by too many backends and each backend will only pin once
@@ -142,7 +143,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
LWLockAcquire(BufMappingPartitionLockByIndex(i), LW_SHARED);
/*
- * Scan though all the buffers, saving the relevant fields in the
+ * Scan through all the buffers, saving the relevant fields in the
* fctx->record structure.
*/
for (i = 0; i < NBuffers; i++)
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 2963c58998..170b6f42c6 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -138,10 +138,10 @@ typedef struct Counters
{
int64 calls; /* # of times executed */
double total_time; /* total execution time, in msec */
- double min_time; /* minimim execution time in msec */
- double max_time; /* maximum execution time in msec */
- double mean_time; /* mean execution time in msec */
- double sum_var_time; /* sum of variances in execution time in msec */
+ double min_time; /* minimim execution time in msec */
+ double max_time; /* maximum execution time in msec */
+ double mean_time; /* mean execution time in msec */
+ double sum_var_time; /* sum of variances in execution time in msec */
int64 rows; /* total # of retrieved or affected rows */
int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_read; /* # of shared disk blocks read */
@@ -1255,10 +1255,10 @@ pgss_store(const char *query, uint32 queryId,
else
{
/*
- * Welford's method for accurately computing variance.
- * See <http://www.johndcook.com/blog/standard_deviation/>
+ * Welford's method for accurately computing variance. See
+ * <http://www.johndcook.com/blog/standard_deviation/>
*/
- double old_mean = e->counters.mean_time;
+ double old_mean = e->counters.mean_time;
e->counters.mean_time +=
(total_time - old_mean) / e->counters.calls;
@@ -1596,10 +1596,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
values[i++] = Float8GetDatumFast(tmp.min_time);
values[i++] = Float8GetDatumFast(tmp.max_time);
values[i++] = Float8GetDatumFast(tmp.mean_time);
+
/*
* Note we are calculating the population variance here, not the
- * sample variance, as we have data for the whole population,
- * so Bessel's correction is not used, and we don't divide by
+ * sample variance, as we have data for the whole population, so
+ * Bessel's correction is not used, and we don't divide by
* tmp.calls - 1.
*/
if (tmp.calls > 1)
@@ -2288,8 +2289,10 @@ JumbleQuery(pgssJumbleState *jstate, Query *query)
JumbleRangeTable(jstate, query->rtable);
JumbleExpr(jstate, (Node *) query->jointree);
JumbleExpr(jstate, (Node *) query->targetList);
+ JumbleExpr(jstate, (Node *) query->onConflict);
JumbleExpr(jstate, (Node *) query->returningList);
JumbleExpr(jstate, (Node *) query->groupClause);
+ JumbleExpr(jstate, (Node *) query->groupingSets);
JumbleExpr(jstate, query->havingQual);
JumbleExpr(jstate, (Node *) query->windowClause);
JumbleExpr(jstate, (Node *) query->distinctClause);
@@ -2420,6 +2423,13 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
JumbleExpr(jstate, (Node *) expr->aggfilter);
}
break;
+ case T_GroupingFunc:
+ {
+ GroupingFunc *grpnode = (GroupingFunc *) node;
+
+ JumbleExpr(jstate, (Node *) grpnode->refs);
+ }
+ break;
case T_WindowFunc:
{
WindowFunc *expr = (WindowFunc *) node;
@@ -2655,6 +2665,15 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
APP_JUMB(ce->cursor_param);
}
break;
+ case T_InferenceElem:
+ {
+ InferenceElem *ie = (InferenceElem *) node;
+
+ APP_JUMB(ie->infercollid);
+ APP_JUMB(ie->inferopclass);
+ JumbleExpr(jstate, ie->expr);
+ }
+ break;
case T_TargetEntry:
{
TargetEntry *tle = (TargetEntry *) node;
@@ -2691,12 +2710,32 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
JumbleExpr(jstate, from->quals);
}
break;
+ case T_OnConflictExpr:
+ {
+ OnConflictExpr *conf = (OnConflictExpr *) node;
+
+ APP_JUMB(conf->action);
+ JumbleExpr(jstate, (Node *) conf->arbiterElems);
+ JumbleExpr(jstate, conf->arbiterWhere);
+ JumbleExpr(jstate, (Node *) conf->onConflictSet);
+ JumbleExpr(jstate, conf->onConflictWhere);
+ APP_JUMB(conf->constraint);
+ APP_JUMB(conf->exclRelIndex);
+ JumbleExpr(jstate, (Node *) conf->exclRelTlist);
+ }
+ break;
case T_List:
foreach(temp, (List *) node)
{
JumbleExpr(jstate, (Node *) lfirst(temp));
}
break;
+ case T_IntList:
+ foreach(temp, (List *) node)
+ {
+ APP_JUMB(lfirst_int(temp));
+ }
+ break;
case T_SortGroupClause:
{
SortGroupClause *sgc = (SortGroupClause *) node;
@@ -2707,6 +2746,13 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
APP_JUMB(sgc->nulls_first);
}
break;
+ case T_GroupingSet:
+ {
+ GroupingSet *gsnode = (GroupingSet *) node;
+
+ JumbleExpr(jstate, (Node *) gsnode->content);
+ }
+ break;
case T_WindowClause:
{
WindowClause *wc = (WindowClause *) node;
diff --git a/contrib/pg_test_fsync/Makefile b/contrib/pg_test_fsync/Makefile
deleted file mode 100644
index 15afba7682..0000000000
--- a/contrib/pg_test_fsync/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# contrib/pg_test_fsync/Makefile
-
-PGFILEDESC = "pg_test_fsync - test various disk sync methods"
-PGAPPICON = win32
-
-PROGRAM = pg_test_fsync
-OBJS = pg_test_fsync.o $(WIN32RES)
-
-ifdef USE_PGXS
-PG_CONFIG = pg_config
-PGXS := $(shell $(PG_CONFIG) --pgxs)
-include $(PGXS)
-else
-subdir = contrib/pg_test_fsync
-top_builddir = ../..
-include $(top_builddir)/src/Makefile.global
-include $(top_srcdir)/contrib/contrib-global.mk
-endif
diff --git a/contrib/pg_test_timing/Makefile b/contrib/pg_test_timing/Makefile
deleted file mode 100644
index 8b37aa8249..0000000000
--- a/contrib/pg_test_timing/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# contrib/pg_test_timing/Makefile
-
-PGFILEDESC = "pg_test_timing - test timing overhead"
-PGAPPICON = win32
-
-PROGRAM = pg_test_timing
-OBJS = pg_test_timing.o $(WIN32RES)
-
-ifdef USE_PGXS
-PG_CONFIG = pg_config
-PGXS := $(shell $(PG_CONFIG) --pgxs)
-include $(PGXS)
-else
-subdir = contrib/pg_test_timing
-top_builddir = ../..
-include $(top_builddir)/src/Makefile.global
-include $(top_srcdir)/contrib/contrib-global.mk
-endif
diff --git a/contrib/pg_trgm/trgm_gin.c b/contrib/pg_trgm/trgm_gin.c
index c59925c575..d524ceaa19 100644
--- a/contrib/pg_trgm/trgm_gin.c
+++ b/contrib/pg_trgm/trgm_gin.c
@@ -6,7 +6,8 @@
#include "trgm.h"
#include "access/gin.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
+#include "fmgr.h"
PG_FUNCTION_INFO_V1(gin_extract_trgm);
diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c
index 69dc7f71f0..07d1dc308b 100644
--- a/contrib/pg_trgm/trgm_gist.c
+++ b/contrib/pg_trgm/trgm_gist.c
@@ -5,7 +5,8 @@
#include "trgm.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
+#include "fmgr.h"
typedef struct
diff --git a/contrib/pgcrypto/expected/pgp-decrypt.out b/contrib/pgcrypto/expected/pgp-decrypt.out
index 7193dca026..2dabfaf7b0 100644
--- a/contrib/pgcrypto/expected/pgp-decrypt.out
+++ b/contrib/pgcrypto/expected/pgp-decrypt.out
@@ -372,3 +372,54 @@ select pgp_sym_decrypt(pgp_sym_encrypt(repeat('x',65530),'1'),'1') = repeat('x',
(1 row)
-- expected: true
+-- Negative tests
+-- Decryption with a certain incorrect key yields an apparent Literal Data
+-- packet reporting its content to be binary data. Ciphertext source:
+-- iterative pgp_sym_encrypt('secret', 'key') until the random prefix gave
+-- rise to that property.
+select pgp_sym_decrypt(dearmor('
+-----BEGIN PGP MESSAGE-----
+
+ww0EBwMCxf8PTrQBmJdl0jcB6y2joE7GSLKRv7trbNsF5Z8ou5NISLUg31llVH/S0B2wl4bvzZjV
+VsxxqLSPzNLAeIspJk5G
+=mSd/
+-----END PGP MESSAGE-----
+'), 'wrong-key', 'debug=1');
+NOTICE: dbg: prefix_init: corrupt prefix
+NOTICE: dbg: parse_literal_data: data type=b
+NOTICE: dbg: mdcbuf_finish: bad MDC pkt hdr
+ERROR: Wrong key or corrupt data
+-- Routine text/binary mismatch.
+select pgp_sym_decrypt(pgp_sym_encrypt_bytea('P', 'key'), 'key', 'debug=1');
+NOTICE: dbg: parse_literal_data: data type=b
+ERROR: Not text data
+-- Decryption with a certain incorrect key yields an apparent BZip2-compressed
+-- plaintext. Ciphertext source: iterative pgp_sym_encrypt('secret', 'key')
+-- until the random prefix gave rise to that property.
+select pgp_sym_decrypt(dearmor('
+-----BEGIN PGP MESSAGE-----
+
+ww0EBwMC9rK/dMkF5Zlt0jcBlzAQ1mQY2qYbKYbw8h3EZ5Jk0K2IiY92R82TRhWzBIF/8cmXDPtP
+GXsd65oYJZp3Khz0qfyn
+=Nmpq
+-----END PGP MESSAGE-----
+'), 'wrong-key', 'debug=1');
+NOTICE: dbg: prefix_init: corrupt prefix
+NOTICE: dbg: parse_compressed_data: bzip2 unsupported
+NOTICE: dbg: mdcbuf_finish: bad MDC pkt hdr
+ERROR: Wrong key or corrupt data
+-- Routine use of BZip2 compression. Ciphertext source:
+-- echo x | gpg --homedir /nonexistent --personal-compress-preferences bzip2 \
+-- --personal-cipher-preferences aes --no-emit-version --batch \
+-- --symmetric --passphrase key --armor
+select pgp_sym_decrypt(dearmor('
+-----BEGIN PGP MESSAGE-----
+
+jA0EBwMCRhFrAKNcLVJg0mMBLJG1cCASNk/x/3dt1zJ+2eo7jHfjgg3N6wpB3XIe
+QCwkWJwlBG5pzbO5gu7xuPQN+TbPJ7aQ2sLx3bAHhtYb0i3vV9RO10Gw++yUyd4R
+UCAAw2JRIISttRHMfDpDuZJpvYo=
+=AZ9M
+-----END PGP MESSAGE-----
+'), 'key', 'debug=1');
+NOTICE: dbg: parse_compressed_data: bzip2 unsupported
+ERROR: Unsupported compression algorithm
diff --git a/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out b/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out
index d290a1349f..b4b6810a3c 100644
--- a/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out
+++ b/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out
@@ -625,7 +625,7 @@ ERROR: No encryption key found
-- rsa: password-protected secret key, wrong password
select pgp_pub_decrypt(dearmor(data), dearmor(seckey), '123')
from keytbl, encdata where keytbl.id=7 and encdata.id=4;
-ERROR: Corrupt data
+ERROR: Wrong key or corrupt data
-- rsa: password-protected secret key, right password
select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'parool')
from keytbl, encdata where keytbl.id=7 and encdata.id=4;
@@ -641,7 +641,7 @@ ERROR: Need password for secret key
-- password-protected secret key, wrong password
select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'foo')
from keytbl, encdata where keytbl.id=5 and encdata.id=1;
-ERROR: Corrupt data
+ERROR: Wrong key or corrupt data
-- password-protected secret key, right password
select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'parool')
from keytbl, encdata where keytbl.id=5 and encdata.id=1;
diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c
index c59691ed2c..44d9adcd2a 100644
--- a/contrib/pgcrypto/mbuf.c
+++ b/contrib/pgcrypto/mbuf.c
@@ -325,7 +325,7 @@ pullf_read_fixed(PullFilter *src, int len, uint8 *dst)
if (res != len)
{
px_debug("pullf_read_fixed: need=%d got=%d", len, res);
- return PXE_MBUF_SHORT_READ;
+ return PXE_PGP_CORRUPT_DATA;
}
if (p != dst)
memcpy(dst, p, len);
diff --git a/contrib/pgcrypto/pgp-armor.c b/contrib/pgcrypto/pgp-armor.c
index 24eb42fa89..5c8355808a 100644
--- a/contrib/pgcrypto/pgp-armor.c
+++ b/contrib/pgcrypto/pgp-armor.c
@@ -399,7 +399,7 @@ pgp_extract_armor_headers(const uint8 *src, unsigned len,
char *line;
char *nextline;
char *eol,
- *colon;
+ *colon;
int hlen;
char *buf;
int hdrlines;
diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c
index c0c5773e66..5c69745156 100644
--- a/contrib/pgcrypto/pgp-decrypt.c
+++ b/contrib/pgcrypto/pgp-decrypt.c
@@ -236,6 +236,8 @@ pgp_create_pkt_reader(PullFilter **pf_p, PullFilter *src, int len,
/*
* Prefix check filter
+ * https://tools.ietf.org/html/rfc4880#section-5.7
+ * https://tools.ietf.org/html/rfc4880#section-5.13
*/
static int
@@ -264,20 +266,7 @@ prefix_init(void **priv_p, void *arg, PullFilter *src)
if (buf[len - 2] != buf[len] || buf[len - 1] != buf[len + 1])
{
px_debug("prefix_init: corrupt prefix");
-
- /*
- * The original purpose of the 2-byte check was to show user a
- * friendly "wrong key" message. This made following possible:
- *
- * "An Attack on CFB Mode Encryption As Used By OpenPGP" by Serge
- * Mister and Robert Zuccherato
- *
- * To avoid being 'oracle', we delay reporting, which basically means
- * we prefer to run into corrupt packet header.
- *
- * We _could_ throw PXE_PGP_CORRUPT_DATA here, but there is
- * possibility of attack via timing, so we don't.
- */
+ /* report error in pgp_decrypt() */
ctx->corrupt_prefix = 1;
}
px_memset(tmpbuf, 0, sizeof(tmpbuf));
@@ -788,12 +777,15 @@ parse_literal_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt)
}
px_memset(tmpbuf, 0, 4);
- /* check if text */
+ /*
+ * If called from an SQL function that returns text, pgp_decrypt() rejects
+ * inputs not self-identifying as text.
+ */
if (ctx->text_mode)
if (type != 't' && type != 'u')
{
px_debug("parse_literal_data: data type=%c", type);
- return PXE_PGP_NOT_TEXT;
+ ctx->unexpected_binary = true;
}
ctx->unicode_mode = (type == 'u') ? 1 : 0;
@@ -827,6 +819,7 @@ parse_compressed_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt)
int res;
uint8 type;
PullFilter *pf_decompr;
+ uint8 *discard_buf;
GETBYTE(pkt, type);
@@ -850,7 +843,20 @@ parse_compressed_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt)
case PGP_COMPR_BZIP2:
px_debug("parse_compressed_data: bzip2 unsupported");
- res = PXE_PGP_UNSUPPORTED_COMPR;
+ /* report error in pgp_decrypt() */
+ ctx->unsupported_compr = 1;
+
+ /*
+ * Discard the compressed data, allowing it to first affect any
+ * MDC digest computation.
+ */
+ while (1)
+ {
+ res = pullf_read(pkt, 32 * 1024, &discard_buf);
+ if (res <= 0)
+ break;
+ }
+
break;
default:
@@ -1168,8 +1174,36 @@ pgp_decrypt(PGP_Context *ctx, MBuf *msrc, MBuf *mdst)
if (res < 0)
return res;
+ /*
+ * Report a failure of the prefix_init() "quick check" now, rather than
+ * upon detection, to hinder timing attacks. pgcrypto is not generally
+ * secure against timing attacks, but this helps.
+ */
if (!got_data || ctx->corrupt_prefix)
- res = PXE_PGP_CORRUPT_DATA;
+ return PXE_PGP_CORRUPT_DATA;
+
+ /*
+ * Code interpreting purportedly-decrypted data prior to this stage shall
+ * report no error other than PXE_PGP_CORRUPT_DATA. (PXE_BUG is okay so
+ * long as it remains unreachable.) This ensures that an attacker able to
+ * choose a ciphertext and receive a corresponding decryption error
+ * message cannot use that oracle to gather clues about the decryption
+ * key. See "An Attack on CFB Mode Encryption As Used By OpenPGP" by
+ * Serge Mister and Robert Zuccherato.
+ *
+ * A problematic value in the first octet of a Literal Data or Compressed
+ * Data packet may indicate a simple user error, such as the need to call
+ * pgp_sym_decrypt_bytea instead of pgp_sym_decrypt. Occasionally,
+ * though, it is the first symptom of the encryption key not matching the
+ * decryption key. When this was the only problem encountered, report a
+ * specific error to guide the user; otherwise, we will have reported
+ * PXE_PGP_CORRUPT_DATA before now. A key mismatch makes the other errors
+ * into red herrings, and this avoids leaking clues to attackers.
+ */
+ if (ctx->unsupported_compr)
+ return PXE_PGP_UNSUPPORTED_COMPR;
+ if (ctx->unexpected_binary)
+ return PXE_PGP_NOT_TEXT;
return res;
}
diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c
index d0da05cd13..1842985e53 100644
--- a/contrib/pgcrypto/pgp-pgsql.c
+++ b/contrib/pgcrypto/pgp-pgsql.c
@@ -259,6 +259,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
res = pgp_set_convert_crlf(ctx, atoi(val));
else if (strcmp(key, "unicode-mode") == 0)
res = pgp_set_unicode_mode(ctx, atoi(val));
+
/*
* The remaining options are for debugging/testing and are therefore not
* documented in the user-facing docs.
@@ -834,22 +835,22 @@ static int
parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
char ***p_keys, char ***p_values)
{
- int nkdims = ARR_NDIM(key_array);
- int nvdims = ARR_NDIM(val_array);
- char **keys,
- **values;
- Datum *key_datums,
- *val_datums;
- bool *key_nulls,
- *val_nulls;
- int key_count,
- val_count;
- int i;
+ int nkdims = ARR_NDIM(key_array);
+ int nvdims = ARR_NDIM(val_array);
+ char **keys,
+ **values;
+ Datum *key_datums,
+ *val_datums;
+ bool *key_nulls,
+ *val_nulls;
+ int key_count,
+ val_count;
+ int i;
if (nkdims > 1 || nkdims != nvdims)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("wrong number of array subscripts")));
+ errmsg("wrong number of array subscripts")));
if (nkdims == 0)
return 0;
@@ -871,7 +872,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
for (i = 0; i < key_count; i++)
{
- char *v;
+ char *v;
/* Check that the key doesn't contain anything funny */
if (key_nulls[i])
@@ -884,7 +885,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
if (!string_is_ascii(v))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("header key must not contain non-ASCII characters")));
+ errmsg("header key must not contain non-ASCII characters")));
if (strstr(v, ": "))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -906,7 +907,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
if (!string_is_ascii(v))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("header value must not contain non-ASCII characters")));
+ errmsg("header value must not contain non-ASCII characters")));
if (strchr(v, '\n'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -1045,7 +1046,7 @@ pgp_armor_headers(PG_FUNCTION_ARGS)
SRF_RETURN_DONE(funcctx);
else
{
- char *values[2];
+ char *values[2];
/* we assume that the keys (and values) are in UTF-8. */
utf8key = state->keys[funcctx->call_cntr];
diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h
index 398f21bca2..62b8517c27 100644
--- a/contrib/pgcrypto/pgp.h
+++ b/contrib/pgcrypto/pgp.h
@@ -153,7 +153,9 @@ struct PGP_Context
* internal variables
*/
int mdc_checked;
- int corrupt_prefix;
+ int corrupt_prefix; /* prefix failed RFC 4880 "quick check" */
+ int unsupported_compr; /* has bzip2 compression */
+ int unexpected_binary; /* binary data seen in text_mode */
int in_mdc_pkt;
int use_mdcbuf_filter;
PX_MD *mdc_ctx;
@@ -276,11 +278,11 @@ void pgp_cfb_free(PGP_CFB *ctx);
int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
int pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
-void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
- int num_headers, char **keys, char **values);
+void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
+ int num_headers, char **keys, char **values);
int pgp_armor_decode(const uint8 *src, int len, StringInfo dst);
-int pgp_extract_armor_headers(const uint8 *src, unsigned len,
- int *nheaders, char ***keys, char ***values);
+int pgp_extract_armor_headers(const uint8 *src, unsigned len,
+ int *nheaders, char ***keys, char ***values);
int pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst);
int pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src);
diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c
index 93c436daa0..cfb3b50985 100644
--- a/contrib/pgcrypto/px.c
+++ b/contrib/pgcrypto/px.c
@@ -87,9 +87,6 @@ static const struct error_desc px_err_list[] = {
{PXE_PGP_UNSUPPORTED_PUBALGO, "Unsupported public key algorithm"},
{PXE_PGP_MULTIPLE_SUBKEYS, "Several subkeys not supported"},
- /* fake this as PXE_PGP_CORRUPT_DATA */
- {PXE_MBUF_SHORT_READ, "Corrupt data"},
-
{0, NULL},
};
diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h
index 7255ebf04c..0f6bbd7a8d 100644
--- a/contrib/pgcrypto/px.h
+++ b/contrib/pgcrypto/px.h
@@ -80,8 +80,6 @@ void px_free(void *p);
#define PXE_NO_RANDOM -17
#define PXE_DECRYPT_FAILED -18
-#define PXE_MBUF_SHORT_READ -50
-
#define PXE_PGP_CORRUPT_DATA -100
#define PXE_PGP_CORRUPT_ARMOR -101
#define PXE_PGP_UNSUPPORTED_COMPR -102
diff --git a/contrib/pgcrypto/sql/pgp-decrypt.sql b/contrib/pgcrypto/sql/pgp-decrypt.sql
index 5457152ccf..f46a18f8cf 100644
--- a/contrib/pgcrypto/sql/pgp-decrypt.sql
+++ b/contrib/pgcrypto/sql/pgp-decrypt.sql
@@ -268,3 +268,48 @@ a3nsOzKTXUfS9VyaXo8IrncM6n7fdaXpwba/3tNsAhJG4lDv1k4g9v8Ix2dfv6Rs
-- check BUG #11905, problem with messages 6 less than a power of 2.
select pgp_sym_decrypt(pgp_sym_encrypt(repeat('x',65530),'1'),'1') = repeat('x',65530);
-- expected: true
+
+
+-- Negative tests
+
+-- Decryption with a certain incorrect key yields an apparent Literal Data
+-- packet reporting its content to be binary data. Ciphertext source:
+-- iterative pgp_sym_encrypt('secret', 'key') until the random prefix gave
+-- rise to that property.
+select pgp_sym_decrypt(dearmor('
+-----BEGIN PGP MESSAGE-----
+
+ww0EBwMCxf8PTrQBmJdl0jcB6y2joE7GSLKRv7trbNsF5Z8ou5NISLUg31llVH/S0B2wl4bvzZjV
+VsxxqLSPzNLAeIspJk5G
+=mSd/
+-----END PGP MESSAGE-----
+'), 'wrong-key', 'debug=1');
+
+-- Routine text/binary mismatch.
+select pgp_sym_decrypt(pgp_sym_encrypt_bytea('P', 'key'), 'key', 'debug=1');
+
+-- Decryption with a certain incorrect key yields an apparent BZip2-compressed
+-- plaintext. Ciphertext source: iterative pgp_sym_encrypt('secret', 'key')
+-- until the random prefix gave rise to that property.
+select pgp_sym_decrypt(dearmor('
+-----BEGIN PGP MESSAGE-----
+
+ww0EBwMC9rK/dMkF5Zlt0jcBlzAQ1mQY2qYbKYbw8h3EZ5Jk0K2IiY92R82TRhWzBIF/8cmXDPtP
+GXsd65oYJZp3Khz0qfyn
+=Nmpq
+-----END PGP MESSAGE-----
+'), 'wrong-key', 'debug=1');
+
+-- Routine use of BZip2 compression. Ciphertext source:
+-- echo x | gpg --homedir /nonexistent --personal-compress-preferences bzip2 \
+-- --personal-cipher-preferences aes --no-emit-version --batch \
+-- --symmetric --passphrase key --armor
+select pgp_sym_decrypt(dearmor('
+-----BEGIN PGP MESSAGE-----
+
+jA0EBwMCRhFrAKNcLVJg0mMBLJG1cCASNk/x/3dt1zJ+2eo7jHfjgg3N6wpB3XIe
+QCwkWJwlBG5pzbO5gu7xuPQN+TbPJ7aQ2sLx3bAHhtYb0i3vV9RO10Gw++yUyd4R
+UCAAw2JRIISttRHMfDpDuZJpvYo=
+=AZ9M
+-----END PGP MESSAGE-----
+'), 'key', 'debug=1');
diff --git a/contrib/pgstattuple/Makefile b/contrib/pgstattuple/Makefile
index 862585cc01..6083dabefd 100644
--- a/contrib/pgstattuple/Makefile
+++ b/contrib/pgstattuple/Makefile
@@ -1,10 +1,10 @@
# contrib/pgstattuple/Makefile
MODULE_big = pgstattuple
-OBJS = pgstattuple.o pgstatindex.o $(WIN32RES)
+OBJS = pgstattuple.o pgstatindex.o pgstatapprox.o $(WIN32RES)
EXTENSION = pgstattuple
-DATA = pgstattuple--1.2.sql pgstattuple--1.1--1.2.sql pgstattuple--1.0--1.1.sql pgstattuple--unpackaged--1.0.sql
+DATA = pgstattuple--1.3.sql pgstattuple--1.2--1.3.sql pgstattuple--1.1--1.2.sql pgstattuple--1.0--1.1.sql pgstattuple--unpackaged--1.0.sql
PGFILEDESC = "pgstattuple - tuple-level statistics"
REGRESS = pgstattuple
diff --git a/contrib/pgstattuple/pgstatapprox.c b/contrib/pgstattuple/pgstatapprox.c
new file mode 100644
index 0000000000..22c5f7a9ee
--- /dev/null
+++ b/contrib/pgstattuple/pgstatapprox.c
@@ -0,0 +1,274 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgstatapproc.c
+ * Bloat estimation functions
+ *
+ * Copyright (c) 2014-2015, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/pgstattuple/pgstatapprox.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/visibilitymap.h"
+#include "access/transam.h"
+#include "access/xact.h"
+#include "access/multixact.h"
+#include "access/htup_details.h"
+#include "catalog/namespace.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+#include "storage/bufmgr.h"
+#include "storage/freespace.h"
+#include "storage/procarray.h"
+#include "storage/lmgr.h"
+#include "utils/builtins.h"
+#include "utils/tqual.h"
+#include "commands/vacuum.h"
+
+PG_FUNCTION_INFO_V1(pgstattuple_approx);
+
+typedef struct output_type
+{
+ uint64 table_len;
+ uint64 scanned_percent;
+ uint64 tuple_count;
+ uint64 tuple_len;
+ double tuple_percent;
+ uint64 dead_tuple_count;
+ uint64 dead_tuple_len;
+ double dead_tuple_percent;
+ uint64 free_space;
+ double free_percent;
+} output_type;
+
+#define NUM_OUTPUT_COLUMNS 10
+
+/*
+ * This function takes an already open relation and scans its pages,
+ * skipping those that have the corresponding visibility map bit set.
+ * For pages we skip, we find the free space from the free space map
+ * and approximate tuple_len on that basis. For the others, we count
+ * the exact number of dead tuples etc.
+ *
+ * This scan is loosely based on vacuumlazy.c:lazy_scan_heap(), but
+ * we do not try to avoid skipping single pages.
+ */
+static void
+statapprox_heap(Relation rel, output_type *stat)
+{
+ BlockNumber scanned,
+ nblocks,
+ blkno;
+ Buffer vmbuffer = InvalidBuffer;
+ BufferAccessStrategy bstrategy;
+ TransactionId OldestXmin;
+ uint64 misc_count = 0;
+
+ OldestXmin = GetOldestXmin(rel, true);
+ bstrategy = GetAccessStrategy(BAS_BULKREAD);
+
+ nblocks = RelationGetNumberOfBlocks(rel);
+ scanned = 0;
+
+ for (blkno = 0; blkno < nblocks; blkno++)
+ {
+ Buffer buf;
+ Page page;
+ OffsetNumber offnum,
+ maxoff;
+ Size freespace;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * If the page has only visible tuples, then we can find out the free
+ * space from the FSM and move on.
+ */
+ if (visibilitymap_test(rel, blkno, &vmbuffer))
+ {
+ freespace = GetRecordedFreeSpace(rel, blkno);
+ stat->tuple_len += BLCKSZ - freespace;
+ stat->free_space += freespace;
+ continue;
+ }
+
+ buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, bstrategy);
+
+ LockBuffer(buf, BUFFER_LOCK_SHARE);
+
+ page = BufferGetPage(buf);
+
+ /*
+ * It's not safe to call PageGetHeapFreeSpace() on new pages, so we
+ * treat them as being free space for our purposes.
+ */
+ if (!PageIsNew(page))
+ stat->free_space += PageGetHeapFreeSpace(page);
+ else
+ stat->free_space += BLCKSZ - SizeOfPageHeaderData;
+
+ if (PageIsNew(page) || PageIsEmpty(page))
+ {
+ UnlockReleaseBuffer(buf);
+ continue;
+ }
+
+ scanned++;
+
+ /*
+ * Look at each tuple on the page and decide whether it's live or
+ * dead, then count it and its size. Unlike lazy_scan_heap, we can
+ * afford to ignore problems and special cases.
+ */
+ maxoff = PageGetMaxOffsetNumber(page);
+
+ for (offnum = FirstOffsetNumber;
+ offnum <= maxoff;
+ offnum = OffsetNumberNext(offnum))
+ {
+ ItemId itemid;
+ HeapTupleData tuple;
+
+ itemid = PageGetItemId(page, offnum);
+
+ if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid) ||
+ ItemIdIsDead(itemid))
+ {
+ continue;
+ }
+
+ Assert(ItemIdIsNormal(itemid));
+
+ ItemPointerSet(&(tuple.t_self), blkno, offnum);
+
+ tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
+ tuple.t_len = ItemIdGetLength(itemid);
+ tuple.t_tableOid = RelationGetRelid(rel);
+
+ /*
+ * We count live and dead tuples, but we also need to add up
+ * others in order to feed vac_estimate_reltuples.
+ */
+ switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
+ {
+ case HEAPTUPLE_RECENTLY_DEAD:
+ misc_count++;
+ /* Fall through */
+ case HEAPTUPLE_DEAD:
+ stat->dead_tuple_len += tuple.t_len;
+ stat->dead_tuple_count++;
+ break;
+ case HEAPTUPLE_LIVE:
+ stat->tuple_len += tuple.t_len;
+ stat->tuple_count++;
+ break;
+ case HEAPTUPLE_INSERT_IN_PROGRESS:
+ case HEAPTUPLE_DELETE_IN_PROGRESS:
+ misc_count++;
+ break;
+ default:
+ elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
+ break;
+ }
+ }
+
+ UnlockReleaseBuffer(buf);
+ }
+
+ stat->table_len = (uint64) nblocks *BLCKSZ;
+
+ stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned,
+ stat->tuple_count + misc_count);
+
+ /*
+ * Calculate percentages if the relation has one or more pages.
+ */
+ if (nblocks != 0)
+ {
+ stat->scanned_percent = 100 * scanned / nblocks;
+ stat->tuple_percent = 100.0 * stat->tuple_len / stat->table_len;
+ stat->dead_tuple_percent = 100.0 * stat->dead_tuple_len / stat->table_len;
+ stat->free_percent = 100.0 * stat->free_space / stat->table_len;
+ }
+
+ if (BufferIsValid(vmbuffer))
+ {
+ ReleaseBuffer(vmbuffer);
+ vmbuffer = InvalidBuffer;
+ }
+}
+
+/*
+ * Returns estimated live/dead tuple statistics for the given relid.
+ */
+Datum
+pgstattuple_approx(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ Relation rel;
+ output_type stat = {0};
+ TupleDesc tupdesc;
+ bool nulls[NUM_OUTPUT_COLUMNS];
+ Datum values[NUM_OUTPUT_COLUMNS];
+ HeapTuple ret;
+ int i = 0;
+
+ if (!superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("must be superuser to use pgstattuple functions"))));
+
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ if (tupdesc->natts != NUM_OUTPUT_COLUMNS)
+ elog(ERROR, "incorrect number of output arguments");
+
+ rel = relation_open(relid, AccessShareLock);
+
+ /*
+ * Reject attempts to read non-local temporary relations; we would be
+ * likely to get wrong data since we have no visibility into the owning
+ * session's local buffers.
+ */
+ if (RELATION_IS_OTHER_TEMP(rel))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot access temporary tables of other sessions")));
+
+ /*
+ * We support only ordinary relations and materialised views, because we
+ * depend on the visibility map and free space map for our estimates about
+ * unscanned pages.
+ */
+ if (!(rel->rd_rel->relkind == RELKIND_RELATION ||
+ rel->rd_rel->relkind == RELKIND_MATVIEW))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("\"%s\" is not a table or materialized view",
+ RelationGetRelationName(rel))));
+
+ statapprox_heap(rel, &stat);
+
+ relation_close(rel, AccessShareLock);
+
+ memset(nulls, 0, sizeof(nulls));
+
+ values[i++] = Int64GetDatum(stat.table_len);
+ values[i++] = Float8GetDatum(stat.scanned_percent);
+ values[i++] = Int64GetDatum(stat.tuple_count);
+ values[i++] = Int64GetDatum(stat.tuple_len);
+ values[i++] = Float8GetDatum(stat.tuple_percent);
+ values[i++] = Int64GetDatum(stat.dead_tuple_count);
+ values[i++] = Int64GetDatum(stat.dead_tuple_len);
+ values[i++] = Float8GetDatum(stat.dead_tuple_percent);
+ values[i++] = Int64GetDatum(stat.free_space);
+ values[i++] = Float8GetDatum(stat.free_percent);
+
+ ret = heap_form_tuple(tupdesc, values, nulls);
+ return HeapTupleGetDatum(ret);
+}
diff --git a/contrib/pgstattuple/pgstattuple--1.2--1.3.sql b/contrib/pgstattuple/pgstattuple--1.2--1.3.sql
new file mode 100644
index 0000000000..99301a27ca
--- /dev/null
+++ b/contrib/pgstattuple/pgstattuple--1.2--1.3.sql
@@ -0,0 +1,18 @@
+/* contrib/pgstattuple/pgstattuple--1.2--1.3.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pgstattuple UPDATE TO '1.3'" to load this file. \quit
+
+CREATE FUNCTION pgstattuple_approx(IN reloid regclass,
+ OUT table_len BIGINT, -- physical table length in bytes
+ OUT scanned_percent FLOAT8, -- what percentage of the table's pages was scanned
+ OUT approx_tuple_count BIGINT, -- estimated number of live tuples
+ OUT approx_tuple_len BIGINT, -- estimated total length in bytes of live tuples
+ OUT approx_tuple_percent FLOAT8, -- live tuples in % (based on estimate)
+ OUT dead_tuple_count BIGINT, -- exact number of dead tuples
+ OUT dead_tuple_len BIGINT, -- exact total length in bytes of dead tuples
+ OUT dead_tuple_percent FLOAT8, -- dead tuples in % (based on estimate)
+ OUT approx_free_space BIGINT, -- estimated free space in bytes
+ OUT approx_free_percent FLOAT8) -- free space in % (based on estimate)
+AS 'MODULE_PATHNAME', 'pgstattuple_approx'
+LANGUAGE C STRICT;
diff --git a/contrib/pgstattuple/pgstattuple--1.2.sql b/contrib/pgstattuple/pgstattuple--1.3.sql
index e5fa2f58da..f3996e74a8 100644
--- a/contrib/pgstattuple/pgstattuple--1.2.sql
+++ b/contrib/pgstattuple/pgstattuple--1.3.sql
@@ -1,4 +1,4 @@
-/* contrib/pgstattuple/pgstattuple--1.2.sql */
+/* contrib/pgstattuple/pgstattuple--1.3.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION pgstattuple" to load this file. \quit
@@ -77,3 +77,19 @@ CREATE FUNCTION pg_relpages(IN relname regclass)
RETURNS BIGINT
AS 'MODULE_PATHNAME', 'pg_relpagesbyid'
LANGUAGE C STRICT;
+
+/* New stuff in 1.3 begins here */
+
+CREATE FUNCTION pgstattuple_approx(IN reloid regclass,
+ OUT table_len BIGINT, -- physical table length in bytes
+ OUT scanned_percent FLOAT8, -- what percentage of the table's pages was scanned
+ OUT approx_tuple_count BIGINT, -- estimated number of live tuples
+ OUT approx_tuple_len BIGINT, -- estimated total length in bytes of live tuples
+ OUT approx_tuple_percent FLOAT8, -- live tuples in % (based on estimate)
+ OUT dead_tuple_count BIGINT, -- exact number of dead tuples
+ OUT dead_tuple_len BIGINT, -- exact total length in bytes of dead tuples
+ OUT dead_tuple_percent FLOAT8, -- dead tuples in % (based on estimate)
+ OUT approx_free_space BIGINT, -- estimated free space in bytes
+ OUT approx_free_percent FLOAT8) -- free space in % (based on estimate)
+AS 'MODULE_PATHNAME', 'pgstattuple_approx'
+LANGUAGE C STRICT;
diff --git a/contrib/pgstattuple/pgstattuple.control b/contrib/pgstattuple/pgstattuple.control
index a7cf47fd92..c03b180143 100644
--- a/contrib/pgstattuple/pgstattuple.control
+++ b/contrib/pgstattuple/pgstattuple.control
@@ -1,5 +1,5 @@
# pgstattuple extension
comment = 'show tuple-level statistics'
-default_version = '1.2'
+default_version = '1.3'
module_pathname = '$libdir/pgstattuple'
relocatable = true
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 4e02cb289d..1a1e5b5eae 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -546,6 +546,7 @@ pgfdw_xact_callback(XactEvent event, void *arg)
switch (event)
{
+ case XACT_EVENT_PARALLEL_PRE_COMMIT:
case XACT_EVENT_PRE_COMMIT:
/* Commit all remote transactions during pre-commit */
do_sql_command(entry->conn, "COMMIT TRANSACTION");
@@ -588,11 +589,13 @@ pgfdw_xact_callback(XactEvent event, void *arg)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot prepare a transaction that modified remote tables")));
break;
+ case XACT_EVENT_PARALLEL_COMMIT:
case XACT_EVENT_COMMIT:
case XACT_EVENT_PREPARE:
/* Pre-commit should have closed the open transaction */
elog(ERROR, "missed cleaning up connection during pre-commit");
break;
+ case XACT_EVENT_PARALLEL_ABORT:
case XACT_EVENT_ABORT:
/* Assume we might have lost track of prepared statements */
entry->have_error = true;
diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c
index 94fab18c42..81cb2b447d 100644
--- a/contrib/postgres_fdw/deparse.c
+++ b/contrib/postgres_fdw/deparse.c
@@ -847,8 +847,8 @@ appendWhereClause(StringInfo buf,
void
deparseInsertSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
- List *targetAttrs, List *returningList,
- List **retrieved_attrs)
+ List *targetAttrs, bool doNothing,
+ List *returningList, List **retrieved_attrs)
{
AttrNumber pindex;
bool first;
@@ -892,6 +892,9 @@ deparseInsertSql(StringInfo buf, PlannerInfo *root,
else
appendStringInfoString(buf, " DEFAULT VALUES");
+ if (doNothing)
+ appendStringInfoString(buf, " ON CONFLICT DO NOTHING");
+
deparseReturningList(buf, root, rtindex, rel,
rel->trigdesc && rel->trigdesc->trig_insert_after_row,
returningList, retrieved_attrs);
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 783cb41571..1f417b30be 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -2327,6 +2327,11 @@ INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
ERROR: duplicate key value violates unique constraint "t1_pkey"
DETAIL: Key ("C 1")=(11) already exists.
CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT DO NOTHING; -- works
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO NOTHING; -- unsupported
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO UPDATE SET c3 = 'ffg'; -- unsupported
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
ERROR: new row for relation "T 1" violates check constraint "c2positive"
DETAIL: Failing row contains (1111, -2, null, null, null, null, ft1 , null).
@@ -3193,26 +3198,26 @@ select * from bar where f1 in (select f1 from foo) for update;
QUERY PLAN
----------------------------------------------------------------------------------------------
LockRows
- Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*, foo.ctid, foo.tableoid, foo.*
+ Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid, foo.ctid, foo.*, foo.tableoid
-> Hash Join
- Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*, foo.ctid, foo.tableoid, foo.*
+ Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid, foo.ctid, foo.*, foo.tableoid
Hash Cond: (bar.f1 = foo.f1)
-> Append
-> Seq Scan on public.bar
- Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*
+ Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid
-> Foreign Scan on public.bar2
- Output: bar2.f1, bar2.f2, bar2.ctid, bar2.tableoid, bar2.*
+ Output: bar2.f1, bar2.f2, bar2.ctid, bar2.*, bar2.tableoid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE
-> Hash
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
-> HashAggregate
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
Group Key: foo.f1
-> Append
-> Seq Scan on public.foo
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
-> Foreign Scan on public.foo2
- Output: foo2.ctid, foo2.tableoid, foo2.*, foo2.f1
+ Output: foo2.ctid, foo2.*, foo2.tableoid, foo2.f1
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
(22 rows)
@@ -3230,26 +3235,26 @@ select * from bar where f1 in (select f1 from foo) for share;
QUERY PLAN
----------------------------------------------------------------------------------------------
LockRows
- Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*, foo.ctid, foo.tableoid, foo.*
+ Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid, foo.ctid, foo.*, foo.tableoid
-> Hash Join
- Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*, foo.ctid, foo.tableoid, foo.*
+ Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid, foo.ctid, foo.*, foo.tableoid
Hash Cond: (bar.f1 = foo.f1)
-> Append
-> Seq Scan on public.bar
- Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*
+ Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid
-> Foreign Scan on public.bar2
- Output: bar2.f1, bar2.f2, bar2.ctid, bar2.tableoid, bar2.*
+ Output: bar2.f1, bar2.f2, bar2.ctid, bar2.*, bar2.tableoid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR SHARE
-> Hash
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
-> HashAggregate
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
Group Key: foo.f1
-> Append
-> Seq Scan on public.foo
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
-> Foreign Scan on public.foo2
- Output: foo2.ctid, foo2.tableoid, foo2.*, foo2.f1
+ Output: foo2.ctid, foo2.*, foo2.tableoid, foo2.f1
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
(22 rows)
@@ -3272,37 +3277,37 @@ update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
Foreign Update on public.bar2
Remote SQL: UPDATE public.loct2 SET f2 = $2 WHERE ctid = $1
-> Hash Join
- Output: bar.f1, (bar.f2 + 100), bar.ctid, foo.ctid, foo.tableoid, foo.*
+ Output: bar.f1, (bar.f2 + 100), bar.ctid, foo.ctid, foo.*, foo.tableoid
Hash Cond: (bar.f1 = foo.f1)
-> Seq Scan on public.bar
Output: bar.f1, bar.f2, bar.ctid
-> Hash
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
-> HashAggregate
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
Group Key: foo.f1
-> Append
-> Seq Scan on public.foo
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
-> Foreign Scan on public.foo2
- Output: foo2.ctid, foo2.tableoid, foo2.*, foo2.f1
+ Output: foo2.ctid, foo2.*, foo2.tableoid, foo2.f1
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
-> Hash Join
- Output: bar2.f1, (bar2.f2 + 100), bar2.f3, bar2.ctid, foo.ctid, foo.tableoid, foo.*
+ Output: bar2.f1, (bar2.f2 + 100), bar2.f3, bar2.ctid, foo.ctid, foo.*, foo.tableoid
Hash Cond: (bar2.f1 = foo.f1)
-> Foreign Scan on public.bar2
Output: bar2.f1, bar2.f2, bar2.f3, bar2.ctid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE
-> Hash
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
-> HashAggregate
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
Group Key: foo.f1
-> Append
-> Seq Scan on public.foo
- Output: foo.ctid, foo.tableoid, foo.*, foo.f1
+ Output: foo.ctid, foo.*, foo.tableoid, foo.f1
-> Foreign Scan on public.foo2
- Output: foo2.ctid, foo2.tableoid, foo2.*, foo2.f1
+ Output: foo2.ctid, foo2.*, foo2.tableoid, foo2.f1
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
(37 rows)
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 478e12484b..6da01e1d6f 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -37,6 +37,7 @@
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
+#include "utils/sampling.h"
PG_MODULE_MAGIC;
@@ -202,7 +203,7 @@ typedef struct PgFdwAnalyzeState
/* for random sampling */
double samplerows; /* # of rows fetched */
double rowstoskip; /* # of rows to skip before next sample */
- double rstate; /* random state */
+ ReservoirStateData rstate; /* state for reservoir sampling */
/* working memory contexts */
MemoryContext anl_cxt; /* context for per-analyze lifespan data */
@@ -872,7 +873,8 @@ postgresGetForeignPlan(PlannerInfo *root,
local_exprs,
scan_relid,
params_list,
- fdw_private);
+ fdw_private,
+ NIL /* no custom tlist */ );
}
/*
@@ -1171,6 +1173,7 @@ postgresPlanForeignModify(PlannerInfo *root,
List *targetAttrs = NIL;
List *returningList = NIL;
List *retrieved_attrs = NIL;
+ bool doNothing = false;
initStringInfo(&sql);
@@ -1205,7 +1208,7 @@ postgresPlanForeignModify(PlannerInfo *root,
int col;
col = -1;
- while ((col = bms_next_member(rte->modifiedCols, col)) >= 0)
+ while ((col = bms_next_member(rte->updatedCols, col)) >= 0)
{
/* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */
AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
@@ -1223,13 +1226,25 @@ postgresPlanForeignModify(PlannerInfo *root,
returningList = (List *) list_nth(plan->returningLists, subplan_index);
/*
+ * ON CONFLICT DO UPDATE and DO NOTHING case with inference specification
+ * should have already been rejected in the optimizer, as presently there
+ * is no way to recognize an arbiter index on a foreign table. Only DO
+ * NOTHING is supported without an inference specification.
+ */
+ if (plan->onConflictAction == ONCONFLICT_NOTHING)
+ doNothing = true;
+ else if (plan->onConflictAction != ONCONFLICT_NONE)
+ elog(ERROR, "unexpected ON CONFLICT specification: %d",
+ (int) plan->onConflictAction);
+
+ /*
* Construct the SQL command string.
*/
switch (operation)
{
case CMD_INSERT:
deparseInsertSql(&sql, root, resultRelation, rel,
- targetAttrs, returningList,
+ targetAttrs, doNothing, returningList,
&retrieved_attrs);
break;
case CMD_UPDATE:
@@ -2397,7 +2412,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
astate.numrows = 0;
astate.samplerows = 0;
astate.rowstoskip = -1; /* -1 means not set yet */
- astate.rstate = anl_init_selection_state(targrows);
+ reservoir_init_selection_state(&astate.rstate, targrows);
/* Remember ANALYZE context, and create a per-tuple temp context */
astate.anl_cxt = CurrentMemoryContext;
@@ -2537,13 +2552,12 @@ analyze_row_processor(PGresult *res, int row, PgFdwAnalyzeState *astate)
* analyze.c; see Jeff Vitter's paper.
*/
if (astate->rowstoskip < 0)
- astate->rowstoskip = anl_get_next_S(astate->samplerows, targrows,
- &astate->rstate);
+ astate->rowstoskip = reservoir_get_next_S(&astate->rstate, astate->samplerows, targrows);
if (astate->rowstoskip <= 0)
{
/* Choose a random reservoir element to replace. */
- pos = (int) (targrows * anl_random_fract());
+ pos = (int) (targrows * sampler_random_fract(astate->rstate.randstate));
Assert(pos >= 0 && pos < targrows);
heap_freetuple(astate->rows[pos]);
}
@@ -2720,7 +2734,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
appendStringInfoString(&buf, ", ");
deparseStringLiteral(&buf, rv->relname);
}
- appendStringInfoString(&buf, ")");
+ appendStringInfoChar(&buf, ')');
}
/* Append ORDER BY at the end of query to ensure output ordering */
@@ -2784,7 +2798,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
*/
appendStringInfoString(&buf, " OPTIONS (column_name ");
deparseStringLiteral(&buf, attname);
- appendStringInfoString(&buf, ")");
+ appendStringInfoChar(&buf, ')');
/* Add COLLATE if needed */
if (import_collate && collname != NULL && collnamespace != NULL)
@@ -2950,8 +2964,14 @@ make_tuple_from_result_row(PGresult *res,
tuple = heap_form_tuple(tupdesc, values, nulls);
+ /*
+ * If we have a CTID to return, install it in both t_self and t_ctid.
+ * t_self is the normal place, but if the tuple is converted to a
+ * composite Datum, t_self will be lost; setting t_ctid allows CTID to be
+ * preserved during EvalPlanQual re-evaluations (see ROW_MARK_COPY code).
+ */
if (ctid)
- tuple->t_self = *ctid;
+ tuple->t_self = tuple->t_data->t_ctid = *ctid;
/* Clean up */
MemoryContextReset(temp_context);
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
index 950c6f79a2..3835ddb79a 100644
--- a/contrib/postgres_fdw/postgres_fdw.h
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -60,7 +60,7 @@ extern void appendWhereClause(StringInfo buf,
List **params);
extern void deparseInsertSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
- List *targetAttrs, List *returningList,
+ List *targetAttrs, bool doNothing, List *returningList,
List **retrieved_attrs);
extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index 4a23457e79..fcdd92e280 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -372,6 +372,9 @@ UPDATE ft2 SET c2 = c2 + 600 WHERE c1 % 10 = 8 AND c1 < 1200 RETURNING *;
ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0);
INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT DO NOTHING; -- works
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO NOTHING; -- unsupported
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO UPDATE SET c3 = 'ffg'; -- unsupported
INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c
index 8e2d5343ae..1e6c37d9e1 100644
--- a/contrib/seg/seg.c
+++ b/contrib/seg/seg.c
@@ -12,7 +12,8 @@
#include <float.h>
#include "access/gist.h"
-#include "access/skey.h"
+#include "access/stratnum.h"
+#include "fmgr.h"
#include "segdata.h"
diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c
index 36c6a37ac1..4a71753d3f 100644
--- a/contrib/sepgsql/dml.c
+++ b/contrib/sepgsql/dml.c
@@ -145,7 +145,8 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns)
static bool
check_relation_privileges(Oid relOid,
Bitmapset *selected,
- Bitmapset *modified,
+ Bitmapset *inserted,
+ Bitmapset *updated,
uint32 required,
bool abort_on_violation)
{
@@ -231,8 +232,9 @@ check_relation_privileges(Oid relOid,
* Check permissions on the columns
*/
selected = fixup_whole_row_references(relOid, selected);
- modified = fixup_whole_row_references(relOid, modified);
- columns = bms_union(selected, modified);
+ inserted = fixup_whole_row_references(relOid, inserted);
+ updated = fixup_whole_row_references(relOid, updated);
+ columns = bms_union(selected, bms_union(inserted, updated));
while ((index = bms_first_member(columns)) >= 0)
{
@@ -241,13 +243,16 @@ check_relation_privileges(Oid relOid,
if (bms_is_member(index, selected))
column_perms |= SEPG_DB_COLUMN__SELECT;
- if (bms_is_member(index, modified))
+ if (bms_is_member(index, inserted))
{
- if (required & SEPG_DB_TABLE__UPDATE)
- column_perms |= SEPG_DB_COLUMN__UPDATE;
if (required & SEPG_DB_TABLE__INSERT)
column_perms |= SEPG_DB_COLUMN__INSERT;
}
+ if (bms_is_member(index, updated))
+ {
+ if (required & SEPG_DB_TABLE__UPDATE)
+ column_perms |= SEPG_DB_COLUMN__UPDATE;
+ }
if (column_perms == 0)
continue;
@@ -304,7 +309,7 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort_on_violation)
required |= SEPG_DB_TABLE__INSERT;
if (rte->requiredPerms & ACL_UPDATE)
{
- if (!bms_is_empty(rte->modifiedCols))
+ if (!bms_is_empty(rte->updatedCols))
required |= SEPG_DB_TABLE__UPDATE;
else
required |= SEPG_DB_TABLE__LOCK;
@@ -333,7 +338,8 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort_on_violation)
{
Oid tableOid = lfirst_oid(li);
Bitmapset *selectedCols;
- Bitmapset *modifiedCols;
+ Bitmapset *insertedCols;
+ Bitmapset *updatedCols;
/*
* child table has different attribute numbers, so we need to fix
@@ -341,15 +347,18 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort_on_violation)
*/
selectedCols = fixup_inherited_columns(rte->relid, tableOid,
rte->selectedCols);
- modifiedCols = fixup_inherited_columns(rte->relid, tableOid,
- rte->modifiedCols);
+ insertedCols = fixup_inherited_columns(rte->relid, tableOid,
+ rte->insertedCols);
+ updatedCols = fixup_inherited_columns(rte->relid, tableOid,
+ rte->updatedCols);
/*
* check permissions on individual tables
*/
if (!check_relation_privileges(tableOid,
selectedCols,
- modifiedCols,
+ insertedCols,
+ updatedCols,
required, abort_on_violation))
return false;
}
diff --git a/contrib/spi/insert_username.c b/contrib/spi/insert_username.c
index 875207881a..3812525c4c 100644
--- a/contrib/spi/insert_username.c
+++ b/contrib/spi/insert_username.c
@@ -79,7 +79,7 @@ insert_username(PG_FUNCTION_ARGS)
args[0], relname)));
/* create fields containing name */
- newval = CStringGetTextDatum(GetUserNameFromId(GetUserId()));
+ newval = CStringGetTextDatum(GetUserNameFromId(GetUserId(), false));
/* construct new tuple */
rettuple = SPI_modifytuple(rel, rettuple, 1, &attnum, &newval, NULL);
diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c
index 0699438d6f..5a345841c6 100644
--- a/contrib/spi/timetravel.c
+++ b/contrib/spi/timetravel.c
@@ -51,7 +51,7 @@ static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans);
* and all other column values as in new tuple, and insert tuple
* with old data and stop_date eq current date
* ELSE - skip updation of tuple.
- * 2. IF an delete affects tuple with stop_date eq INFINITY
+ * 2. IF a delete affects tuple with stop_date eq INFINITY
* then insert the same tuple with stop_date eq current date
* [ and delete_user eq current user ]
* ELSE - skip deletion of tuple.
@@ -174,7 +174,7 @@ timetravel(PG_FUNCTION_ARGS)
}
/* create fields containing name */
- newuser = CStringGetTextDatum(GetUserNameFromId(GetUserId()));
+ newuser = CStringGetTextDatum(GetUserNameFromId(GetUserId(), false));
nulltext = (Datum) NULL;
diff --git a/contrib/start-scripts/linux b/contrib/start-scripts/linux
index 2dff0094cd..763a8064ab 100644
--- a/contrib/start-scripts/linux
+++ b/contrib/start-scripts/linux
@@ -81,7 +81,7 @@ test -x $DAEMON ||
# If we want to tell child processes to adjust their OOM scores, set up the
# necessary environment variables. Can't just export them through the "su".
-if [ -e "$PG_OOM_ADJUST_FILE" -a -n "PG_CHILD_OOM_SCORE_ADJ" ]
+if [ -e "$PG_OOM_ADJUST_FILE" -a -n "$PG_CHILD_OOM_SCORE_ADJ" ]
then
DAEMON_ENV="PG_OOM_ADJUST_FILE=$PG_OOM_ADJUST_FILE PG_OOM_ADJUST_VALUE=$PG_CHILD_OOM_SCORE_ADJ"
fi
diff --git a/contrib/test_decoding/Makefile b/contrib/test_decoding/Makefile
index 438be44afc..a362e69691 100644
--- a/contrib/test_decoding/Makefile
+++ b/contrib/test_decoding/Makefile
@@ -37,37 +37,36 @@ submake-isolation:
submake-test_decoding:
$(MAKE) -C $(top_builddir)/contrib/test_decoding
-REGRESSCHECKS=ddl rewrite toast permissions decoding_in_xact decoding_into_rel binary prepared
+REGRESSCHECKS=ddl rewrite toast permissions decoding_in_xact decoding_into_rel \
+ binary prepared replorigin
-regresscheck: all | submake-regress submake-test_decoding
+regresscheck: | submake-regress submake-test_decoding temp-install
$(MKDIR_P) regression_output
$(pg_regress_check) \
--temp-config $(top_srcdir)/contrib/test_decoding/logical.conf \
- --temp-install=./tmp_check \
- --extra-install=contrib/test_decoding \
+ --temp-instance=./tmp_check \
--outputdir=./regression_output \
$(REGRESSCHECKS)
-regresscheck-install-force: | submake-regress submake-test_decoding
+regresscheck-install-force: | submake-regress submake-test_decoding temp-install
$(pg_regress_installcheck) \
- --extra-install=contrib/test_decoding \
$(REGRESSCHECKS)
ISOLATIONCHECKS=mxact delayed_startup ondisk_startup concurrent_ddl_dml
-isolationcheck: all | submake-isolation submake-test_decoding
+isolationcheck: | submake-isolation submake-test_decoding temp-install
$(MKDIR_P) isolation_output
$(pg_isolation_regress_check) \
--temp-config $(top_srcdir)/contrib/test_decoding/logical.conf \
- --extra-install=contrib/test_decoding \
--outputdir=./isolation_output \
$(ISOLATIONCHECKS)
-isolationcheck-install-force: all | submake-isolation submake-test_decoding
+isolationcheck-install-force: all | submake-isolation submake-test_decoding temp-install
$(pg_isolation_regress_installcheck) \
- --extra-install=contrib/test_decoding \
$(ISOLATIONCHECKS)
PHONY: submake-test_decoding submake-regress check \
regresscheck regresscheck-install-force \
isolationcheck isolationcheck-install-force
+
+temp-install: EXTRA_INSTALL=contrib/test_decoding
diff --git a/contrib/test_decoding/expected/ddl.out b/contrib/test_decoding/expected/ddl.out
index 780120d731..3283fdfcd7 100644
--- a/contrib/test_decoding/expected/ddl.out
+++ b/contrib/test_decoding/expected/ddl.out
@@ -148,6 +148,50 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc
COMMIT
(9 rows)
+-- ON CONFLICT DO UPDATE support
+BEGIN;
+INSERT INTO replication_example(id, somedata, somenum) SELECT i, i, i FROM generate_series(-15, 15) i
+ ON CONFLICT (id) DO UPDATE SET somenum = excluded.somenum + 1;
+COMMIT;
+/* display results */
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+ data
+--------------------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:-15 somedata[integer]:-15 somenum[integer]:-15 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-14 somedata[integer]:-14 somenum[integer]:-14 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-13 somedata[integer]:-13 somenum[integer]:-13 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-12 somedata[integer]:-12 somenum[integer]:-12 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-11 somedata[integer]:-11 somenum[integer]:-11 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-10 somedata[integer]:-10 somenum[integer]:-10 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-9 somedata[integer]:-9 somenum[integer]:-9 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-8 somedata[integer]:-8 somenum[integer]:-8 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-7 somedata[integer]:-7 somenum[integer]:-7 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-6 somedata[integer]:-6 somenum[integer]:-6 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-5 somedata[integer]:-5 somenum[integer]:-5 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-4 somedata[integer]:-4 somenum[integer]:-4 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-3 somedata[integer]:-3 somenum[integer]:-3 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-2 somedata[integer]:-2 somenum[integer]:-2 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:-1 somedata[integer]:-1 somenum[integer]:-1 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: INSERT: id[integer]:0 somedata[integer]:0 somenum[integer]:0 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:1 somedata[integer]:1 somenum[integer]:2 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:2 somedata[integer]:1 somenum[integer]:3 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:3 somedata[integer]:2 somenum[integer]:4 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:4 somedata[integer]:2 somenum[integer]:5 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:5 somedata[integer]:2 somenum[integer]:6 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:6 somedata[integer]:2 somenum[integer]:7 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:7 somedata[integer]:3 somenum[integer]:8 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:8 somedata[integer]:3 somenum[integer]:9 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:9 somedata[integer]:3 somenum[integer]:10 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:10 somedata[integer]:4 somenum[integer]:11 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:11 somedata[integer]:5 somenum[integer]:12 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:12 somedata[integer]:6 somenum[integer]:13 zaphod1[integer]:null zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:13 somedata[integer]:6 somenum[integer]:14 zaphod1[integer]:1 zaphod2[integer]:null
+ table public.replication_example: UPDATE: id[integer]:14 somedata[integer]:6 somenum[integer]:15 zaphod1[integer]:null zaphod2[integer]:1
+ table public.replication_example: UPDATE: id[integer]:15 somedata[integer]:6 somenum[integer]:16 zaphod1[integer]:2 zaphod2[integer]:null
+ COMMIT
+(33 rows)
+
-- hide changes bc of oid visible in full table rewrites
CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int);
INSERT INTO tr_unique(data) VALUES(10);
@@ -196,6 +240,22 @@ ORDER BY 1,2;
20467 | table public.tr_etoomuch: DELETE: id[integer]:1 | table public.tr_etoomuch: UPDATE: id[integer]:9999 data[integer]:-9999
(3 rows)
+-- check that a large, spooled, upsert works
+INSERT INTO tr_etoomuch (id, data)
+SELECT g.i, -g.i FROM generate_series(8000, 12000) g(i)
+ON CONFLICT(id) DO UPDATE SET data = EXCLUDED.data;
+SELECT substring(data, 1, 29), count(*)
+FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1')
+GROUP BY 1
+ORDER BY min(location - '0/0');
+ substring | count
+-------------------------------+-------
+ BEGIN | 1
+ table public.tr_etoomuch: UPD | 2235
+ table public.tr_etoomuch: INS | 1766
+ COMMIT | 1
+(4 rows)
+
/*
* check whether we decode subtransactions correctly in relation with each
* other
@@ -603,7 +663,7 @@ SELECT pg_drop_replication_slot('regression_slot');
/* check that the slot is gone */
SELECT * FROM pg_replication_slots;
- slot_name | plugin | slot_type | datoid | database | active | xmin | catalog_xmin | restart_lsn
------------+--------+-----------+--------+----------+--------+------+--------------+-------------
+ slot_name | plugin | slot_type | datoid | database | active | active_pid | xmin | catalog_xmin | restart_lsn
+-----------+--------+-----------+--------+----------+--------+------------+------+--------------+-------------
(0 rows)
diff --git a/contrib/test_decoding/expected/decoding_in_xact.out b/contrib/test_decoding/expected/decoding_in_xact.out
index 456840886a..ab4d3aee72 100644
--- a/contrib/test_decoding/expected/decoding_in_xact.out
+++ b/contrib/test_decoding/expected/decoding_in_xact.out
@@ -11,7 +11,7 @@ SELECT txid_current() = 0;
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
ERROR: cannot create logical replication slot in transaction that has performed writes
ROLLBACK;
--- fail because we're creating a slot while in an subxact whose topxact has a xid
+-- fail because we're creating a slot while in a subxact whose topxact has an xid
BEGIN;
SELECT txid_current() = 0;
?column?
diff --git a/contrib/test_decoding/expected/replorigin.out b/contrib/test_decoding/expected/replorigin.out
new file mode 100644
index 0000000000..c0f512579c
--- /dev/null
+++ b/contrib/test_decoding/expected/replorigin.out
@@ -0,0 +1,141 @@
+-- predictability
+SET synchronous_commit = on;
+CREATE TABLE origin_tbl(id serial primary key, data text);
+CREATE TABLE target_tbl(id serial primary key, data text);
+SELECT pg_replication_origin_create('test_decoding: regression_slot');
+ pg_replication_origin_create
+------------------------------
+ 1
+(1 row)
+
+-- ensure duplicate creations fail
+SELECT pg_replication_origin_create('test_decoding: regression_slot');
+ERROR: duplicate key value violates unique constraint "pg_replication_origin_roname_index"
+DETAIL: Key (roname)=(test_decoding: regression_slot) already exists.
+--ensure deletions work (once)
+SELECT pg_replication_origin_create('test_decoding: temp');
+ pg_replication_origin_create
+------------------------------
+ 2
+(1 row)
+
+SELECT pg_replication_origin_drop('test_decoding: temp');
+ pg_replication_origin_drop
+----------------------------
+
+(1 row)
+
+SELECT pg_replication_origin_drop('test_decoding: temp');
+ERROR: cache lookup failed for replication origin 'test_decoding: temp'
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+-- origin tx
+INSERT INTO origin_tbl(data) VALUES ('will be replicated and decoded and decoded again');
+INSERT INTO target_tbl(data)
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+-- as is normal, the insert into target_tbl shows up
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+ data
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ table public.target_tbl: INSERT: id[integer]:1 data[text]:'BEGIN'
+ table public.target_tbl: INSERT: id[integer]:2 data[text]:'table public.origin_tbl: INSERT: id[integer]:1 data[text]:''will be replicated and decoded and decoded again'''
+ table public.target_tbl: INSERT: id[integer]:3 data[text]:'COMMIT'
+ COMMIT
+(5 rows)
+
+INSERT INTO origin_tbl(data) VALUES ('will be replicated, but not decoded again');
+-- mark session as replaying
+SELECT pg_replication_origin_session_setup('test_decoding: regression_slot');
+ pg_replication_origin_session_setup
+-------------------------------------
+
+(1 row)
+
+-- ensure we prevent duplicate setup
+SELECT pg_replication_origin_session_setup('test_decoding: regression_slot');
+ERROR: cannot setup replication origin when one is already setup
+BEGIN;
+-- setup transaction origin
+SELECT pg_replication_origin_xact_setup('0/aabbccdd', '2013-01-01 00:00');
+ pg_replication_origin_xact_setup
+----------------------------------
+
+(1 row)
+
+INSERT INTO target_tbl(data)
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1');
+COMMIT;
+-- check replication progress for the session is correct
+SELECT pg_replication_origin_session_progress(false);
+ pg_replication_origin_session_progress
+----------------------------------------
+ 0/AABBCCDD
+(1 row)
+
+SELECT pg_replication_origin_session_progress(true);
+ pg_replication_origin_session_progress
+----------------------------------------
+ 0/AABBCCDD
+(1 row)
+
+SELECT pg_replication_origin_session_reset();
+ pg_replication_origin_session_reset
+-------------------------------------
+
+(1 row)
+
+SELECT local_id, external_id, remote_lsn, local_lsn <> '0/0' FROM pg_replication_origin_status;
+ local_id | external_id | remote_lsn | ?column?
+----------+--------------------------------+------------+----------
+ 1 | test_decoding: regression_slot | 0/AABBCCDD | t
+(1 row)
+
+-- check replication progress identified by name is correct
+SELECT pg_replication_origin_progress('test_decoding: regression_slot', false);
+ pg_replication_origin_progress
+--------------------------------
+ 0/AABBCCDD
+(1 row)
+
+SELECT pg_replication_origin_progress('test_decoding: regression_slot', true);
+ pg_replication_origin_progress
+--------------------------------
+ 0/AABBCCDD
+(1 row)
+
+-- ensure reset requires previously setup state
+SELECT pg_replication_origin_session_reset();
+ERROR: no replication origin is configured
+-- and magically the replayed xact will be filtered!
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1');
+ data
+------
+(0 rows)
+
+--but new original changes still show up
+INSERT INTO origin_tbl(data) VALUES ('will be replicated');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1');
+ data
+--------------------------------------------------------------------------------
+ BEGIN
+ table public.origin_tbl: INSERT: id[integer]:3 data[text]:'will be replicated'
+ COMMIT
+(3 rows)
+
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+SELECT pg_replication_origin_drop('test_decoding: regression_slot');
+ pg_replication_origin_drop
+----------------------------
+
+(1 row)
+
diff --git a/contrib/test_decoding/expected/toast.out b/contrib/test_decoding/expected/toast.out
index 0a850b7acd..735b14c978 100644
--- a/contrib/test_decoding/expected/toast.out
+++ b/contrib/test_decoding/expected/toast.out
@@ -23,6 +23,10 @@ INSERT INTO xpto (toasted_col2) SELECT repeat(string_agg(to_char(g.i, 'FM0000'),
-- update of existing column
UPDATE xpto SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i)) WHERE id = 1;
UPDATE xpto SET rand1 = 123.456 WHERE id = 1;
+-- updating external via INSERT ... ON CONFLICT DO UPDATE
+INSERT INTO xpto(id, toasted_col2) VALUES (2, 'toasted2-upsert')
+ON CONFLICT (id)
+DO UPDATE SET toasted_col2 = EXCLUDED.toasted_col2 || xpto.toasted_col2;
DELETE FROM xpto WHERE id = 1;
DROP TABLE IF EXISTS toasted_key;
NOTICE: table "toasted_key" does not exist, skipping
@@ -64,6 +68,9 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot',
table public.xpto: UPDATE: id[integer]:1 toasted_col1[text]:unchanged-toast-datum rand1[double precision]:123.456 toasted_col2[text]:unchanged-toast-datum rand2[double precision]:1578
COMMIT
BEGIN
+ table public.xpto: UPDATE: id[integer]:2 toasted_col1[text]:null rand1[double precision]:3077 toasted_col2[text]:'toasted2-upsert00010002000300040005000600070008000900100011001200130014001500160017001
+ COMMIT
+ BEGIN
table public.xpto: DELETE: id[integer]:1
COMMIT
BEGIN
@@ -283,7 +290,7 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot',
table public.toasted_copy: INSERT: id[integer]:202 data[text]:'untoasted199'
table public.toasted_copy: INSERT: id[integer]:203 data[text]:'untoasted200'
COMMIT
-(232 rows)
+(235 rows)
SELECT pg_drop_replication_slot('regression_slot');
pg_drop_replication_slot
diff --git a/contrib/test_decoding/specs/ondisk_startup.spec b/contrib/test_decoding/specs/ondisk_startup.spec
index 39c4a223ae..8223705639 100644
--- a/contrib/test_decoding/specs/ondisk_startup.spec
+++ b/contrib/test_decoding/specs/ondisk_startup.spec
@@ -36,7 +36,7 @@ step "s3txid" { BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS
step "s3c" { COMMIT; }
# Force usage of ondisk snapshot by starting and not finishing a
-# transaction with a assigned xid after consistency has been
+# transaction with an assigned xid after consistency has been
# reached. In combination with a checkpoint forcing a snapshot to be
# written and a new restart point computed that'll lead to the usage
# of the snapshot.
diff --git a/contrib/test_decoding/sql/ddl.sql b/contrib/test_decoding/sql/ddl.sql
index 03314d18ac..9c0502f155 100644
--- a/contrib/test_decoding/sql/ddl.sql
+++ b/contrib/test_decoding/sql/ddl.sql
@@ -84,6 +84,15 @@ COMMIT;
-- show changes
SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+-- ON CONFLICT DO UPDATE support
+BEGIN;
+INSERT INTO replication_example(id, somedata, somenum) SELECT i, i, i FROM generate_series(-15, 15) i
+ ON CONFLICT (id) DO UPDATE SET somenum = excluded.somenum + 1;
+COMMIT;
+
+/* display results */
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+
-- hide changes bc of oid visible in full table rewrites
CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int);
INSERT INTO tr_unique(data) VALUES(10);
@@ -114,6 +123,16 @@ FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids',
GROUP BY substring(data, 1, 24)
ORDER BY 1,2;
+-- check that a large, spooled, upsert works
+INSERT INTO tr_etoomuch (id, data)
+SELECT g.i, -g.i FROM generate_series(8000, 12000) g(i)
+ON CONFLICT(id) DO UPDATE SET data = EXCLUDED.data;
+
+SELECT substring(data, 1, 29), count(*)
+FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1')
+GROUP BY 1
+ORDER BY min(location - '0/0');
+
/*
* check whether we decode subtransactions correctly in relation with each
* other
diff --git a/contrib/test_decoding/sql/decoding_in_xact.sql b/contrib/test_decoding/sql/decoding_in_xact.sql
index 990f61885e..b524eb9a6e 100644
--- a/contrib/test_decoding/sql/decoding_in_xact.sql
+++ b/contrib/test_decoding/sql/decoding_in_xact.sql
@@ -7,7 +7,7 @@ SELECT txid_current() = 0;
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
ROLLBACK;
--- fail because we're creating a slot while in an subxact whose topxact has a xid
+-- fail because we're creating a slot while in a subxact whose topxact has an xid
BEGIN;
SELECT txid_current() = 0;
SAVEPOINT barf;
diff --git a/contrib/test_decoding/sql/replorigin.sql b/contrib/test_decoding/sql/replorigin.sql
new file mode 100644
index 0000000000..e12404e106
--- /dev/null
+++ b/contrib/test_decoding/sql/replorigin.sql
@@ -0,0 +1,64 @@
+-- predictability
+SET synchronous_commit = on;
+
+CREATE TABLE origin_tbl(id serial primary key, data text);
+CREATE TABLE target_tbl(id serial primary key, data text);
+
+SELECT pg_replication_origin_create('test_decoding: regression_slot');
+-- ensure duplicate creations fail
+SELECT pg_replication_origin_create('test_decoding: regression_slot');
+
+--ensure deletions work (once)
+SELECT pg_replication_origin_create('test_decoding: temp');
+SELECT pg_replication_origin_drop('test_decoding: temp');
+SELECT pg_replication_origin_drop('test_decoding: temp');
+
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+
+-- origin tx
+INSERT INTO origin_tbl(data) VALUES ('will be replicated and decoded and decoded again');
+INSERT INTO target_tbl(data)
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+
+-- as is normal, the insert into target_tbl shows up
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+
+INSERT INTO origin_tbl(data) VALUES ('will be replicated, but not decoded again');
+
+-- mark session as replaying
+SELECT pg_replication_origin_session_setup('test_decoding: regression_slot');
+
+-- ensure we prevent duplicate setup
+SELECT pg_replication_origin_session_setup('test_decoding: regression_slot');
+
+BEGIN;
+-- setup transaction origin
+SELECT pg_replication_origin_xact_setup('0/aabbccdd', '2013-01-01 00:00');
+INSERT INTO target_tbl(data)
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1');
+COMMIT;
+
+-- check replication progress for the session is correct
+SELECT pg_replication_origin_session_progress(false);
+SELECT pg_replication_origin_session_progress(true);
+
+SELECT pg_replication_origin_session_reset();
+
+SELECT local_id, external_id, remote_lsn, local_lsn <> '0/0' FROM pg_replication_origin_status;
+
+-- check replication progress identified by name is correct
+SELECT pg_replication_origin_progress('test_decoding: regression_slot', false);
+SELECT pg_replication_origin_progress('test_decoding: regression_slot', true);
+
+-- ensure reset requires previously setup state
+SELECT pg_replication_origin_session_reset();
+
+-- and magically the replayed xact will be filtered!
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1');
+
+--but new original changes still show up
+INSERT INTO origin_tbl(data) VALUES ('will be replicated');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1');
+
+SELECT pg_drop_replication_slot('regression_slot');
+SELECT pg_replication_origin_drop('test_decoding: regression_slot');
diff --git a/contrib/test_decoding/sql/toast.sql b/contrib/test_decoding/sql/toast.sql
index 09293865df..26d6b4fbdd 100644
--- a/contrib/test_decoding/sql/toast.sql
+++ b/contrib/test_decoding/sql/toast.sql
@@ -25,6 +25,11 @@ UPDATE xpto SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_s
UPDATE xpto SET rand1 = 123.456 WHERE id = 1;
+-- updating external via INSERT ... ON CONFLICT DO UPDATE
+INSERT INTO xpto(id, toasted_col2) VALUES (2, 'toasted2-upsert')
+ON CONFLICT (id)
+DO UPDATE SET toasted_col2 = EXCLUDED.toasted_col2 || xpto.toasted_col2;
+
DELETE FROM xpto WHERE id = 1;
DROP TABLE IF EXISTS toasted_key;
diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c
index 963d5df9da..32d5743018 100644
--- a/contrib/test_decoding/test_decoding.c
+++ b/contrib/test_decoding/test_decoding.c
@@ -21,6 +21,7 @@
#include "replication/output_plugin.h"
#include "replication/logical.h"
+#include "replication/origin.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
@@ -43,6 +44,7 @@ typedef struct
bool include_timestamp;
bool skip_empty_xacts;
bool xact_wrote_changes;
+ bool only_local;
} TestDecodingData;
static void pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
@@ -51,14 +53,16 @@ static void pg_decode_shutdown(LogicalDecodingContext *ctx);
static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn);
static void pg_output_begin(LogicalDecodingContext *ctx,
- TestDecodingData *data,
- ReorderBufferTXN *txn,
- bool last_write);
+ TestDecodingData *data,
+ ReorderBufferTXN *txn,
+ bool last_write);
static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
static void pg_decode_change(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, Relation rel,
ReorderBufferChange *change);
+static bool pg_decode_filter(LogicalDecodingContext *ctx,
+ RepOriginId origin_id);
void
_PG_init(void)
@@ -76,6 +80,7 @@ _PG_output_plugin_init(OutputPluginCallbacks *cb)
cb->begin_cb = pg_decode_begin_txn;
cb->change_cb = pg_decode_change;
cb->commit_cb = pg_decode_commit_txn;
+ cb->filter_by_origin_cb = pg_decode_filter;
cb->shutdown_cb = pg_decode_shutdown;
}
@@ -97,6 +102,7 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
data->include_xids = true;
data->include_timestamp = false;
data->skip_empty_xacts = false;
+ data->only_local = false;
ctx->output_plugin_private = data;
@@ -155,6 +161,17 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
errmsg("could not parse value \"%s\" for parameter \"%s\"",
strVal(elem->arg), elem->defname)));
}
+ else if (strcmp(elem->defname, "only-local") == 0)
+ {
+
+ if (elem->arg == NULL)
+ data->only_local = true;
+ else if (!parse_bool(strVal(elem->arg), &data->only_local))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("could not parse value \"%s\" for parameter \"%s\"",
+ strVal(elem->arg), elem->defname)));
+ }
else
{
ereport(ERROR,
@@ -223,6 +240,17 @@ pg_decode_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
OutputPluginWrite(ctx, true);
}
+static bool
+pg_decode_filter(LogicalDecodingContext *ctx,
+ RepOriginId origin_id)
+{
+ TestDecodingData *data = ctx->output_plugin_private;
+
+ if (data->only_local && origin_id != InvalidRepOriginId)
+ return true;
+ return false;
+}
+
/*
* Print literal `outputstr' already represented as string of type `typid'
* into stringbuf `s'.
@@ -391,7 +419,7 @@ pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
get_namespace_name(
get_rel_namespace(RelationGetRelid(relation))),
NameStr(class_form->relname)));
- appendStringInfoString(ctx->out, ":");
+ appendStringInfoChar(ctx->out, ':');
switch (change->action)
{
diff --git a/contrib/tsm_system_rows/.gitignore b/contrib/tsm_system_rows/.gitignore
new file mode 100644
index 0000000000..5dcb3ff972
--- /dev/null
+++ b/contrib/tsm_system_rows/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/contrib/tsm_system_rows/Makefile b/contrib/tsm_system_rows/Makefile
new file mode 100644
index 0000000000..700ab276db
--- /dev/null
+++ b/contrib/tsm_system_rows/Makefile
@@ -0,0 +1,21 @@
+# src/test/modules/tsm_system_rows/Makefile
+
+MODULE_big = tsm_system_rows
+OBJS = tsm_system_rows.o $(WIN32RES)
+PGFILEDESC = "tsm_system_rows - SYSTEM TABLESAMPLE method which accepts number of rows as a limit"
+
+EXTENSION = tsm_system_rows
+DATA = tsm_system_rows--1.0.sql
+
+REGRESS = tsm_system_rows
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/tsm_system_rows
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/tsm_system_rows/expected/tsm_system_rows.out b/contrib/tsm_system_rows/expected/tsm_system_rows.out
new file mode 100644
index 0000000000..7e0f72b02b
--- /dev/null
+++ b/contrib/tsm_system_rows/expected/tsm_system_rows.out
@@ -0,0 +1,31 @@
+CREATE EXTENSION tsm_system_rows;
+CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages
+INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) FROM generate_series(0, 30) s(i) ORDER BY i;
+ANALYZE test_tablesample;
+SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (1000);
+ count
+-------
+ 31
+(1 row)
+
+SELECT id FROM test_tablesample TABLESAMPLE system_rows (8) REPEATABLE (5432);
+ id
+----
+ 7
+ 14
+ 21
+ 28
+ 4
+ 11
+ 18
+ 25
+(8 rows)
+
+EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE system_rows (20) REPEATABLE (10);
+ QUERY PLAN
+-----------------------------------------------------------------------------------
+ Sample Scan (system_rows) on test_tablesample (cost=0.00..80.20 rows=20 width=4)
+(1 row)
+
+-- done
+DROP TABLE test_tablesample CASCADE;
diff --git a/contrib/tsm_system_rows/sql/tsm_system_rows.sql b/contrib/tsm_system_rows/sql/tsm_system_rows.sql
new file mode 100644
index 0000000000..bd812220ed
--- /dev/null
+++ b/contrib/tsm_system_rows/sql/tsm_system_rows.sql
@@ -0,0 +1,14 @@
+CREATE EXTENSION tsm_system_rows;
+
+CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages
+
+INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) FROM generate_series(0, 30) s(i) ORDER BY i;
+ANALYZE test_tablesample;
+
+SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (1000);
+SELECT id FROM test_tablesample TABLESAMPLE system_rows (8) REPEATABLE (5432);
+
+EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE system_rows (20) REPEATABLE (10);
+
+-- done
+DROP TABLE test_tablesample CASCADE;
diff --git a/contrib/tsm_system_rows/tsm_system_rows--1.0.sql b/contrib/tsm_system_rows/tsm_system_rows--1.0.sql
new file mode 100644
index 0000000000..1a29c584b5
--- /dev/null
+++ b/contrib/tsm_system_rows/tsm_system_rows--1.0.sql
@@ -0,0 +1,44 @@
+/* src/test/modules/tablesample/tsm_system_rows--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION tsm_system_rows" to load this file. \quit
+
+CREATE FUNCTION tsm_system_rows_init(internal, int4, int4)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_rows_nextblock(internal)
+RETURNS int4
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_rows_nexttuple(internal, int4, int2)
+RETURNS int2
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_rows_examinetuple(internal, int4, internal, bool)
+RETURNS bool
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_rows_end(internal)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_rows_reset(internal)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_rows_cost(internal, internal, internal, internal, internal, internal, internal)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+INSERT INTO pg_tablesample_method VALUES('system_rows', false, true,
+ 'tsm_system_rows_init', 'tsm_system_rows_nextblock',
+ 'tsm_system_rows_nexttuple', 'tsm_system_rows_examinetuple',
+ 'tsm_system_rows_end', 'tsm_system_rows_reset', 'tsm_system_rows_cost');
diff --git a/contrib/tsm_system_rows/tsm_system_rows.c b/contrib/tsm_system_rows/tsm_system_rows.c
new file mode 100644
index 0000000000..e325eaff49
--- /dev/null
+++ b/contrib/tsm_system_rows/tsm_system_rows.c
@@ -0,0 +1,271 @@
+/*-------------------------------------------------------------------------
+ *
+ * tsm_system_rows.c
+ * interface routines for system_rows tablesample method
+ *
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/tsm_system_rows_rowlimit/tsm_system_rows.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "fmgr.h"
+
+#include "access/tablesample.h"
+#include "access/relscan.h"
+#include "miscadmin.h"
+#include "nodes/execnodes.h"
+#include "nodes/relation.h"
+#include "optimizer/clauses.h"
+#include "storage/bufmgr.h"
+#include "utils/sampling.h"
+
+PG_MODULE_MAGIC;
+
+/*
+ * State
+ */
+typedef struct
+{
+ SamplerRandomState randstate;
+ uint32 seed; /* random seed */
+ BlockNumber nblocks; /* number of block in relation */
+ int32 ntuples; /* number of tuples to return */
+ int32 donetuples; /* tuples already returned */
+ OffsetNumber lt; /* last tuple returned from current block */
+ BlockNumber step; /* step size */
+ BlockNumber lb; /* last block visited */
+ BlockNumber doneblocks; /* number of already returned blocks */
+} SystemSamplerData;
+
+
+PG_FUNCTION_INFO_V1(tsm_system_rows_init);
+PG_FUNCTION_INFO_V1(tsm_system_rows_nextblock);
+PG_FUNCTION_INFO_V1(tsm_system_rows_nexttuple);
+PG_FUNCTION_INFO_V1(tsm_system_rows_examinetuple);
+PG_FUNCTION_INFO_V1(tsm_system_rows_end);
+PG_FUNCTION_INFO_V1(tsm_system_rows_reset);
+PG_FUNCTION_INFO_V1(tsm_system_rows_cost);
+
+static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
+
+/*
+ * Initializes the state.
+ */
+Datum
+tsm_system_rows_init(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
+
+ if (ntuples < 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ errmsg("invalid sample size"),
+ errhint("Sample size must be positive integer value.")));
+
+ sampler = palloc0(sizeof(SystemSamplerData));
+
+ /* Remember initial values for reinit */
+ sampler->seed = seed;
+ sampler->nblocks = scan->rs_nblocks;
+ sampler->ntuples = ntuples;
+ sampler->donetuples = 0;
+ sampler->lt = InvalidOffsetNumber;
+ sampler->doneblocks = 0;
+
+ sampler_random_init_state(sampler->seed, sampler->randstate);
+
+ /* Find relative prime as step size for linear probing. */
+ sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
+ /*
+ * Randomize start position so that blocks close to step size don't have
+ * higher probability of being chosen on very short scan.
+ */
+ sampler->lb = sampler_random_fract(sampler->randstate) *
+ (sampler->nblocks / sampler->step);
+
+ tsdesc->tsmdata = (void *) sampler;
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Get next block number or InvalidBlockNumber when we're done.
+ *
+ * Uses linear probing algorithm for picking next block.
+ */
+Datum
+tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+
+ sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
+ sampler->doneblocks++;
+
+ /* All blocks have been read, we're done */
+ if (sampler->doneblocks > sampler->nblocks ||
+ sampler->donetuples >= sampler->ntuples)
+ PG_RETURN_UINT32(InvalidBlockNumber);
+
+ PG_RETURN_UINT32(sampler->lb);
+}
+
+/*
+ * Get next tuple offset in current block or InvalidOffsetNumber if we are done
+ * with this block.
+ */
+Datum
+tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
+
+ if (tupoffset == InvalidOffsetNumber)
+ tupoffset = FirstOffsetNumber;
+ else
+ tupoffset++;
+
+ if (tupoffset > maxoffset ||
+ sampler->donetuples >= sampler->ntuples)
+ tupoffset = InvalidOffsetNumber;
+
+ sampler->lt = tupoffset;
+
+ PG_RETURN_UINT16(tupoffset);
+}
+
+/*
+ * Examine tuple and decide if it should be returned.
+ */
+Datum
+tsm_system_rows_examinetuple(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ bool visible = PG_GETARG_BOOL(3);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+
+ if (!visible)
+ PG_RETURN_BOOL(false);
+
+ sampler->donetuples++;
+
+ PG_RETURN_BOOL(true);
+}
+
+/*
+ * Cleanup method.
+ */
+Datum
+tsm_system_rows_end(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+
+ pfree(tsdesc->tsmdata);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Reset state (called by ReScan).
+ */
+Datum
+tsm_system_rows_reset(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+
+ sampler->lt = InvalidOffsetNumber;
+ sampler->donetuples = 0;
+ sampler->doneblocks = 0;
+
+ sampler_random_init_state(sampler->seed, sampler->randstate);
+ sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+ sampler->lb = sampler_random_fract(sampler->randstate) * (sampler->nblocks / sampler->step);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Costing function.
+ */
+Datum
+tsm_system_rows_cost(PG_FUNCTION_ARGS)
+{
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *limitnode;
+ int32 ntuples;
+
+ limitnode = linitial(args);
+ limitnode = estimate_expression_value(root, limitnode);
+
+ if (IsA(limitnode, RelabelType))
+ limitnode = (Node *) ((RelabelType *) limitnode)->arg;
+
+ if (IsA(limitnode, Const))
+ ntuples = DatumGetInt32(((Const *) limitnode)->constvalue);
+ else
+ {
+ /* Default ntuples if the estimation didn't return Const. */
+ ntuples = 1000;
+ }
+
+ *pages = Min(baserel->pages, ntuples);
+ *tuples = ntuples;
+ path->rows = *tuples;
+
+ PG_RETURN_VOID();
+}
+
+
+static uint32
+gcd(uint32 a, uint32 b)
+{
+ uint32 c;
+
+ while (a != 0)
+ {
+ c = a;
+ a = b % a;
+ b = c;
+ }
+
+ return b;
+}
+
+static uint32
+random_relative_prime(uint32 n, SamplerRandomState randstate)
+{
+ /* Pick random starting number, with some limits on what it can be. */
+ uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+ t;
+
+ /*
+ * This should only take 2 or 3 iterations as the probability of 2 numbers
+ * being relatively prime is ~61%.
+ */
+ while ((t = gcd(r, n)) > 1)
+ {
+ CHECK_FOR_INTERRUPTS();
+ r /= t;
+ }
+
+ return r;
+}
diff --git a/contrib/tsm_system_rows/tsm_system_rows.control b/contrib/tsm_system_rows/tsm_system_rows.control
new file mode 100644
index 0000000000..84ea7adb49
--- /dev/null
+++ b/contrib/tsm_system_rows/tsm_system_rows.control
@@ -0,0 +1,5 @@
+# tsm_system_rows extension
+comment = 'SYSTEM TABLESAMPLE method which accepts number rows as a limit'
+default_version = '1.0'
+module_pathname = '$libdir/tsm_system_rows'
+relocatable = true
diff --git a/contrib/tsm_system_time/.gitignore b/contrib/tsm_system_time/.gitignore
new file mode 100644
index 0000000000..5dcb3ff972
--- /dev/null
+++ b/contrib/tsm_system_time/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/contrib/tsm_system_time/Makefile b/contrib/tsm_system_time/Makefile
new file mode 100644
index 0000000000..c42c1c6bb6
--- /dev/null
+++ b/contrib/tsm_system_time/Makefile
@@ -0,0 +1,21 @@
+# src/test/modules/tsm_system_time/Makefile
+
+MODULE_big = tsm_system_time
+OBJS = tsm_system_time.o $(WIN32RES)
+PGFILEDESC = "tsm_system_time - SYSTEM TABLESAMPLE method which accepts number rows of as a limit"
+
+EXTENSION = tsm_system_time
+DATA = tsm_system_time--1.0.sql
+
+REGRESS = tsm_system_time
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/tsm_system_time
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/tsm_system_time/expected/tsm_system_time.out b/contrib/tsm_system_time/expected/tsm_system_time.out
new file mode 100644
index 0000000000..32ad03c4bd
--- /dev/null
+++ b/contrib/tsm_system_time/expected/tsm_system_time.out
@@ -0,0 +1,54 @@
+CREATE EXTENSION tsm_system_time;
+CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages
+INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) FROM generate_series(0, 30) s(i) ORDER BY i;
+ANALYZE test_tablesample;
+SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (1000);
+ count
+-------
+ 31
+(1 row)
+
+SELECT id FROM test_tablesample TABLESAMPLE system_time (1000) REPEATABLE (5432);
+ id
+----
+ 7
+ 14
+ 21
+ 28
+ 4
+ 11
+ 18
+ 25
+ 1
+ 8
+ 15
+ 22
+ 29
+ 5
+ 12
+ 19
+ 26
+ 2
+ 9
+ 16
+ 23
+ 30
+ 6
+ 13
+ 20
+ 27
+ 3
+ 10
+ 17
+ 24
+ 0
+(31 rows)
+
+EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE system_time (100) REPEATABLE (10);
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Sample Scan (system_time) on test_tablesample (cost=0.00..100.25 rows=25 width=4)
+(1 row)
+
+-- done
+DROP TABLE test_tablesample CASCADE;
diff --git a/contrib/tsm_system_time/sql/tsm_system_time.sql b/contrib/tsm_system_time/sql/tsm_system_time.sql
new file mode 100644
index 0000000000..68dbbf98af
--- /dev/null
+++ b/contrib/tsm_system_time/sql/tsm_system_time.sql
@@ -0,0 +1,14 @@
+CREATE EXTENSION tsm_system_time;
+
+CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages
+
+INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) FROM generate_series(0, 30) s(i) ORDER BY i;
+ANALYZE test_tablesample;
+
+SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (1000);
+SELECT id FROM test_tablesample TABLESAMPLE system_time (1000) REPEATABLE (5432);
+
+EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE system_time (100) REPEATABLE (10);
+
+-- done
+DROP TABLE test_tablesample CASCADE;
diff --git a/contrib/tsm_system_time/tsm_system_time--1.0.sql b/contrib/tsm_system_time/tsm_system_time--1.0.sql
new file mode 100644
index 0000000000..1f390d6ed7
--- /dev/null
+++ b/contrib/tsm_system_time/tsm_system_time--1.0.sql
@@ -0,0 +1,39 @@
+/* src/test/modules/tablesample/tsm_system_time--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION tsm_system_time" to load this file. \quit
+
+CREATE FUNCTION tsm_system_time_init(internal, int4, int4)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_time_nextblock(internal)
+RETURNS int4
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_time_nexttuple(internal, int4, int2)
+RETURNS int2
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_time_end(internal)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_time_reset(internal)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION tsm_system_time_cost(internal, internal, internal, internal, internal, internal, internal)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+INSERT INTO pg_tablesample_method VALUES('system_time', false, true,
+ 'tsm_system_time_init', 'tsm_system_time_nextblock',
+ 'tsm_system_time_nexttuple', '-', 'tsm_system_time_end',
+ 'tsm_system_time_reset', 'tsm_system_time_cost');
diff --git a/contrib/tsm_system_time/tsm_system_time.c b/contrib/tsm_system_time/tsm_system_time.c
new file mode 100644
index 0000000000..7708fc0761
--- /dev/null
+++ b/contrib/tsm_system_time/tsm_system_time.c
@@ -0,0 +1,317 @@
+/*-------------------------------------------------------------------------
+ *
+ * tsm_system_time.c
+ * interface routines for system_time tablesample method
+ *
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/tsm_system_time_rowlimit/tsm_system_time.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "fmgr.h"
+
+#include "access/tablesample.h"
+#include "access/relscan.h"
+#include "miscadmin.h"
+#include "nodes/execnodes.h"
+#include "nodes/relation.h"
+#include "optimizer/clauses.h"
+#include "storage/bufmgr.h"
+#include "utils/sampling.h"
+#include "utils/spccache.h"
+#include "utils/timestamp.h"
+
+PG_MODULE_MAGIC;
+
+/*
+ * State
+ */
+typedef struct
+{
+ SamplerRandomState randstate;
+ uint32 seed; /* random seed */
+ BlockNumber nblocks; /* number of block in relation */
+ int32 time; /* time limit for sampling */
+ TimestampTz start_time; /* start time of sampling */
+ TimestampTz end_time; /* end time of sampling */
+ OffsetNumber lt; /* last tuple returned from current block */
+ BlockNumber step; /* step size */
+ BlockNumber lb; /* last block visited */
+ BlockNumber estblocks; /* estimated number of returned blocks
+ * (moving) */
+ BlockNumber doneblocks; /* number of already returned blocks */
+} SystemSamplerData;
+
+
+PG_FUNCTION_INFO_V1(tsm_system_time_init);
+PG_FUNCTION_INFO_V1(tsm_system_time_nextblock);
+PG_FUNCTION_INFO_V1(tsm_system_time_nexttuple);
+PG_FUNCTION_INFO_V1(tsm_system_time_end);
+PG_FUNCTION_INFO_V1(tsm_system_time_reset);
+PG_FUNCTION_INFO_V1(tsm_system_time_cost);
+
+static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
+
+/*
+ * Initializes the state.
+ */
+Datum
+tsm_system_time_init(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
+
+ if (time < 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ errmsg("invalid time limit"),
+ errhint("Time limit must be positive integer value.")));
+
+ sampler = palloc0(sizeof(SystemSamplerData));
+
+ /* Remember initial values for reinit */
+ sampler->seed = seed;
+ sampler->nblocks = scan->rs_nblocks;
+ sampler->lt = InvalidOffsetNumber;
+ sampler->estblocks = 2;
+ sampler->doneblocks = 0;
+ sampler->time = time;
+ sampler->start_time = GetCurrentTimestamp();
+ sampler->end_time = TimestampTzPlusMilliseconds(sampler->start_time,
+ sampler->time);
+
+ sampler_random_init_state(sampler->seed, sampler->randstate);
+
+ /* Find relative prime as step size for linear probing. */
+ sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
+ /*
+ * Randomize start position so that blocks close to step size don't have
+ * higher probability of being chosen on very short scan.
+ */
+ sampler->lb = sampler_random_fract(sampler->randstate) * (sampler->nblocks / sampler->step);
+
+ tsdesc->tsmdata = (void *) sampler;
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Get next block number or InvalidBlockNumber when we're done.
+ *
+ * Uses linear probing algorithm for picking next block.
+ */
+Datum
+tsm_system_time_nextblock(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+
+ sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
+ sampler->doneblocks++;
+
+ /* All blocks have been read, we're done */
+ if (sampler->doneblocks > sampler->nblocks)
+ PG_RETURN_UINT32(InvalidBlockNumber);
+
+ /*
+ * Update the estimations for time limit at least 10 times per estimated
+ * number of returned blocks to handle variations in block read speed.
+ */
+ if (sampler->doneblocks % Max(sampler->estblocks / 10, 1) == 0)
+ {
+ TimestampTz now = GetCurrentTimestamp();
+ long secs;
+ int usecs;
+ int usecs_remaining;
+ int time_per_block;
+
+ TimestampDifference(sampler->start_time, now, &secs, &usecs);
+ usecs += (int) secs *1000000;
+
+ time_per_block = usecs / sampler->doneblocks;
+
+ /* No time left, end. */
+ TimestampDifference(now, sampler->end_time, &secs, &usecs);
+ if (secs <= 0 && usecs <= 0)
+ PG_RETURN_UINT32(InvalidBlockNumber);
+
+ /* Remaining microseconds */
+ usecs_remaining = usecs + (int) secs *1000000;
+
+ /* Recalculate estimated returned number of blocks */
+ if (time_per_block < usecs_remaining && time_per_block > 0)
+ sampler->estblocks = sampler->time * time_per_block;
+ }
+
+ PG_RETURN_UINT32(sampler->lb);
+}
+
+/*
+ * Get next tuple offset in current block or InvalidOffsetNumber if we are done
+ * with this block.
+ */
+Datum
+tsm_system_time_nexttuple(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
+
+ if (tupoffset == InvalidOffsetNumber)
+ tupoffset = FirstOffsetNumber;
+ else
+ tupoffset++;
+
+ if (tupoffset > maxoffset)
+ tupoffset = InvalidOffsetNumber;
+
+ sampler->lt = tupoffset;
+
+ PG_RETURN_UINT16(tupoffset);
+}
+
+/*
+ * Cleanup method.
+ */
+Datum
+tsm_system_time_end(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+
+ pfree(tsdesc->tsmdata);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Reset state (called by ReScan).
+ */
+Datum
+tsm_system_time_reset(PG_FUNCTION_ARGS)
+{
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+
+ sampler->lt = InvalidOffsetNumber;
+ sampler->start_time = GetCurrentTimestamp();
+ sampler->end_time = TimestampTzPlusMilliseconds(sampler->start_time,
+ sampler->time);
+ sampler->estblocks = 2;
+ sampler->doneblocks = 0;
+
+ sampler_random_init_state(sampler->seed, sampler->randstate);
+ sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+ sampler->lb = sampler_random_fract(sampler->randstate) * (sampler->nblocks / sampler->step);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Costing function.
+ */
+Datum
+tsm_system_time_cost(PG_FUNCTION_ARGS)
+{
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *limitnode;
+ int32 time;
+ BlockNumber relpages;
+ double reltuples;
+ double density;
+ double spc_random_page_cost;
+
+ limitnode = linitial(args);
+ limitnode = estimate_expression_value(root, limitnode);
+
+ if (IsA(limitnode, RelabelType))
+ limitnode = (Node *) ((RelabelType *) limitnode)->arg;
+
+ if (IsA(limitnode, Const))
+ time = DatumGetInt32(((Const *) limitnode)->constvalue);
+ else
+ {
+ /* Default time (1s) if the estimation didn't return Const. */
+ time = 1000;
+ }
+
+ relpages = baserel->pages;
+ reltuples = baserel->tuples;
+
+ /* estimate the tuple density */
+ if (relpages > 0)
+ density = reltuples / (double) relpages;
+ else
+ density = (BLCKSZ - SizeOfPageHeaderData) / baserel->width;
+
+ /*
+ * We equal random page cost value to number of ms it takes to read the
+ * random page here which is far from accurate but we don't have anything
+ * better to base our predicted page reads.
+ */
+ get_tablespace_page_costs(baserel->reltablespace,
+ &spc_random_page_cost,
+ NULL);
+
+ /*
+ * Assumption here is that we'll never read less than 1% of table pages,
+ * this is here mainly because it is much less bad to overestimate than
+ * underestimate and using just spc_random_page_cost will probably lead to
+ * underestimations in general.
+ */
+ *pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100));
+ *tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
+ path->rows = *tuples;
+
+ PG_RETURN_VOID();
+}
+
+static uint32
+gcd(uint32 a, uint32 b)
+{
+ uint32 c;
+
+ while (a != 0)
+ {
+ c = a;
+ a = b % a;
+ b = c;
+ }
+
+ return b;
+}
+
+static uint32
+random_relative_prime(uint32 n, SamplerRandomState randstate)
+{
+ /* Pick random starting number, with some limits on what it can be. */
+ uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+ t;
+
+ /*
+ * This should only take 2 or 3 iterations as the probability of 2 numbers
+ * being relatively prime is ~61%.
+ */
+ while ((t = gcd(r, n)) > 1)
+ {
+ CHECK_FOR_INTERRUPTS();
+ r /= t;
+ }
+
+ return r;
+}
diff --git a/contrib/tsm_system_time/tsm_system_time.control b/contrib/tsm_system_time/tsm_system_time.control
new file mode 100644
index 0000000000..ebcee19d23
--- /dev/null
+++ b/contrib/tsm_system_time/tsm_system_time.control
@@ -0,0 +1,5 @@
+# tsm_system_time extension
+comment = 'SYSTEM TABLESAMPLE method which accepts time in milliseconds as a limit'
+default_version = '1.0'
+module_pathname = '$libdir/tsm_system_time'
+relocatable = true
diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile
index 8bdd26ce28..ac3a142bb2 100644
--- a/doc/src/sgml/Makefile
+++ b/doc/src/sgml/Makefile
@@ -17,6 +17,8 @@
# to want to use.
html:
+NO_TEMP_INSTALL=yes
+
subdir = doc/src/sgml
top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
@@ -331,6 +333,7 @@ endif
installdirs:
$(MKDIR_P) '$(DESTDIR)$(htmldir)'/html $(addprefix '$(DESTDIR)$(mandir)'/man, 1 3 $(sqlmansectnum))
+# If the install used a man directory shared with other applications, this will remove all files.
uninstall:
rm -f '$(DESTDIR)$(htmldir)/html/'* $(addprefix '$(DESTDIR)$(mandir)'/man, 1/* 3/* $(sqlmansectnum)/*)
diff --git a/doc/src/sgml/backup.sgml b/doc/src/sgml/backup.sgml
index e25e0d0edf..def43a21da 100644
--- a/doc/src/sgml/backup.sgml
+++ b/doc/src/sgml/backup.sgml
@@ -836,8 +836,11 @@ SELECT pg_start_backup('label');
<function>pg_start_backup</> creates a <firstterm>backup label</> file,
called <filename>backup_label</>, in the cluster directory with
information about your backup, including the start time and label
- string. The file is critical to the integrity of the backup, should
- you need to restore from it.
+ string. The function also creates a <firstterm>tablespace map</> file,
+ called <filename>tablespace_map</>, in the cluster directory with
+ information about tablespace symbolic links in <filename>pg_tblspc/</>
+ if one or more such link is present. Both files are critical to the
+ integrity of the backup, should you need to restore from it.
</para>
<para>
@@ -965,17 +968,20 @@ SELECT pg_stop_backup();
<para>
It's also worth noting that the <function>pg_start_backup</> function
- makes a file named <filename>backup_label</> in the database cluster
- directory, which is removed by <function>pg_stop_backup</>.
- This file will of course be archived as a part of your backup dump file.
- The backup label file includes the label string you gave to
- <function>pg_start_backup</>, as well as the time at which
- <function>pg_start_backup</> was run, and the name of the starting WAL
- file. In case of confusion it is therefore possible to look inside a
- backup dump file and determine exactly which backup session the dump file
- came from. However, this file is not merely for your information; its
- presence and contents are critical to the proper operation of the system's
- recovery process.
+ makes files named <filename>backup_label</> and
+ <filename>tablesapce_map</> in the database cluster directory,
+ which are removed by <function>pg_stop_backup</>. These files will of
+ course be archived as a part of your backup dump file. The backup label
+ file includes the label string you gave to <function>pg_start_backup</>,
+ as well as the time at which <function>pg_start_backup</> was run, and
+ the name of the starting WAL file. In case of confusion it is therefore
+ possible to look inside a backup dump file and determine exactly which
+ backup session the dump file came from. The tablespace map file includes
+ the symbolic link names as they exist in the directory
+ <filename>pg_tblspc/</> and the full path of each symbolic link.
+ These files are not merely for your information; their presence and
+ contents are critical to the proper operation of the system's recovery
+ process.
</para>
<para>
diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml
index 1ac282c57a..4d8fd20c1c 100644
--- a/doc/src/sgml/brin.sgml
+++ b/doc/src/sgml/brin.sgml
@@ -72,7 +72,9 @@
<para>
The <firstterm>minmax</>
operator classes store the minimum and the maximum values appearing
- in the indexed column within the range.
+ in the indexed column within the range. The <firstterm>inclusion</>
+ operator classes store a value which includes the values in the indexed
+ column within the range.
</para>
<table id="brin-builtin-opclasses-table">
@@ -252,6 +254,18 @@
</entry>
</row>
<row>
+ <entry><literal>inet_inclusion_ops</literal></entry>
+ <entry><type>inet</type></entry>
+ <entry>
+ <literal>&amp;&amp;</>
+ <literal>&gt;&gt;</>
+ <literal>&gt;&gt;=</>
+ <literal>&lt;&lt;</literal>
+ <literal>&lt;&lt;=</literal>
+ <literal>=</literal>
+ </entry>
+ </row>
+ <row>
<entry><literal>bpchar_minmax_ops</literal></entry>
<entry><type>character</type></entry>
<entry>
@@ -373,6 +387,25 @@
</entry>
</row>
<row>
+ <entry><literal>range_inclusion_ops</></entry>
+ <entry><type>any range type</type></entry>
+ <entry>
+ <literal>&amp;&amp;</>
+ <literal>&amp;&gt;</>
+ <literal>&amp;&lt;</>
+ <literal>&gt;&gt;</>
+ <literal>&lt;&lt;</>
+ <literal>&lt;@</>
+ <literal>=</>
+ <literal>@&gt;</>
+ <literal>&lt;</literal>
+ <literal>&lt;=</literal>
+ <literal>=</literal>
+ <literal>&gt;=</literal>
+ <literal>&gt;</literal>
+ </entry>
+ </row>
+ <row>
<entry><literal>pg_lsn_minmax_ops</literal></entry>
<entry><type>pg_lsn</type></entry>
<entry>
@@ -383,6 +416,24 @@
<literal>&gt;</literal>
</entry>
</row>
+ <row>
+ <entry><literal>box_inclusion_ops</></entry>
+ <entry><type>box</type></entry>
+ <entry>
+ <literal>&amp;&amp;</>
+ <literal>&amp;&gt;</>
+ <literal>&amp;&lt;</>
+ <literal>&gt;&gt;</>
+ <literal>&lt;&lt;</>
+ <literal>&lt;@</>
+ <literal>~=</>
+ <literal>@&gt;</>
+ <literal>&amp;&gt;|</>
+ <literal>|&amp;&lt;</>
+ <literal>&gt;&gt;|</>
+ <literal>|&lt;&lt;</literal>
+ </entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -428,8 +479,8 @@ typedef struct BrinOpcInfo
/* Opaque pointer for the opclass' private use */
void *oi_opaque;
- /* Type IDs of the stored columns */
- Oid oi_typids[FLEXIBLE_ARRAY_MEMBER];
+ /* Type cache entries of the stored columns */
+ TypeCacheEntry *oi_typcache[FLEXIBLE_ARRAY_MEMBER];
} BrinOpcInfo;
</programlisting>
<structname>BrinOpcInfo</>.<structfield>oi_opaque</> can be used by the
diff --git a/doc/src/sgml/btree-gin.sgml b/doc/src/sgml/btree-gin.sgml
index 42b9e97b72..2b081db9d5 100644
--- a/doc/src/sgml/btree-gin.sgml
+++ b/doc/src/sgml/btree-gin.sgml
@@ -36,7 +36,7 @@
<programlisting>
CREATE TABLE test (a int4);
-- create index
-CREATE INDEX testidx ON test USING gin (a);
+CREATE INDEX testidx ON test USING GIN (a);
-- query
SELECT * FROM test WHERE a &lt; 10;
</programlisting>
diff --git a/doc/src/sgml/btree-gist.sgml b/doc/src/sgml/btree-gist.sgml
index 2275a997ba..f4afc09546 100644
--- a/doc/src/sgml/btree-gist.sgml
+++ b/doc/src/sgml/btree-gist.sgml
@@ -61,7 +61,7 @@
<programlisting>
CREATE TABLE test (a int4);
-- create index
-CREATE INDEX testidx ON test USING gist (a);
+CREATE INDEX testidx ON test USING GIST (a);
-- query
SELECT * FROM test WHERE a &lt; 10;
-- nearest-neighbor search: find the ten entries closest to "42"
@@ -78,7 +78,7 @@ SELECT *, a &lt;-&gt; 42 AS dist FROM test ORDER BY a &lt;-&gt; 42 LIMIT 10;
=&gt; CREATE TABLE zoo (
cage INTEGER,
animal TEXT,
- EXCLUDE USING gist (cage WITH =, animal WITH &lt;&gt;)
+ EXCLUDE USING GIST (cage WITH =, animal WITH &lt;&gt;)
);
=&gt; INSERT INTO zoo VALUES(123, 'zebra');
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index d0b78f2782..b6e0268e3f 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -239,6 +239,16 @@
</row>
<row>
+ <entry><link linkend="catalog-pg-replication-origin"><structname>pg_replication_origin</structname></link></entry>
+ <entry>registered replication origins</entry>
+ </row>
+
+ <row>
+ <entry><link linkend="catalog-pg-replication-origin-status"><structname>pg_replication_origin_status</structname></link></entry>
+ <entry>information about replication origins, including replication progress</entry>
+ </row>
+
+ <row>
<entry><link linkend="catalog-pg-replication-slots"><structname>pg_replication_slots</structname></link></entry>
<entry>replication slot information</entry>
</row>
@@ -269,11 +279,21 @@
</row>
<row>
+ <entry><link linkend="catalog-pg-tablesample-method"><structname>pg_tablesample_method</structname></link></entry>
+ <entry>table sampling methods</entry>
+ </row>
+
+ <row>
<entry><link linkend="catalog-pg-tablespace"><structname>pg_tablespace</structname></link></entry>
<entry>tablespaces within this database cluster</entry>
</row>
<row>
+ <entry><link linkend="catalog-pg-transform"><structname>pg_transform</structname></link></entry>
+ <entry>transforms (data type to procedural language conversions)</entry>
+ </row>
+
+ <row>
<entry><link linkend="catalog-pg-trigger"><structname>pg_trigger</structname></link></entry>
<entry>triggers</entry>
</row>
@@ -1995,11 +2015,11 @@
<entry><type>xid</type></entry>
<entry></entry>
<entry>
- All multitransaction IDs before this one have been replaced by a
+ All multixact IDs before this one have been replaced by a
transaction ID in this table. This is used to track
- whether the table needs to be vacuumed in order to prevent multitransaction ID
- ID wraparound or to allow <literal>pg_clog</> to be shrunk. Zero
- (<symbol>InvalidTransactionId</symbol>) if the relation is not a table.
+ whether the table needs to be vacuumed in order to prevent multixact ID
+ wraparound or to allow <literal>pg_multixact</> to be shrunk. Zero
+ (<symbol>InvalidMultiXactId</symbol>) if the relation is not a table.
</entry>
</row>
@@ -2663,10 +2683,10 @@
<entry><type>xid</type></entry>
<entry></entry>
<entry>
- All multitransaction IDs before this one have been replaced with a
+ All multixact IDs before this one have been replaced with a
transaction ID in this database. This is used to
track whether the database needs to be vacuumed in order to prevent
- transaction ID wraparound or to allow <literal>pg_clog</> to be shrunk.
+ multixact ID wraparound or to allow <literal>pg_multixact</> to be shrunk.
It is the minimum of the per-table
<structname>pg_class</>.<structfield>relminmxid</> values.
</entry>
@@ -5072,6 +5092,15 @@
</row>
<row>
+ <entry><structfield>protrftypes</structfield></entry>
+ <entry><type>oid[]</type></entry>
+ <entry></entry>
+ <entry>
+ Data type OIDs for which to apply transforms.
+ </entry>
+ </row>
+
+ <row>
<entry><structfield>prosrc</structfield></entry>
<entry><type>text</type></entry>
<entry></entry>
@@ -5323,6 +5352,119 @@
</sect1>
+ <sect1 id="catalog-pg-replication-origin">
+ <title><structname>pg_replication_origin</structname></title>
+
+ <indexterm zone="catalog-pg-replication-origin">
+ <primary>pg_replication_origin</primary>
+ </indexterm>
+
+ <para>
+ The <structname>pg_replication_origin</structname> catalog contains
+ all replication origins created. For more on replication origins
+ see <xref linkend="replication-origins">.
+ </para>
+
+ <table>
+
+ <title><structname>pg_replication_origin</structname> Columns</title>
+
+ <tgroup cols="4">
+ <thead>
+ <row>
+ <entry>Name</entry>
+ <entry>Type</entry>
+ <entry>References</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+
+ <tbody>
+ <row>
+ <entry><structfield>roident</structfield></entry>
+ <entry><type>Oid</type></entry>
+ <entry></entry>
+ <entry>A unique, cluster-wide identifier for the replication
+ origin. Should never leave the system.</entry>
+ </row>
+
+ <row>
+ <entry><structfield>roname</structfield></entry>
+ <entry><type>text</type></entry>
+ <entry></entry>
+ <entry>The external, user defined, name of a replication
+ origin.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </sect1>
+
+ <sect1 id="catalog-pg-replication-origin-status">
+ <title><structname>pg_replication_origin_status</structname></title>
+
+ <indexterm zone="catalog-pg-replication-origin-status">
+ <primary>pg_replication_origin_status</primary>
+ </indexterm>
+
+ <para>
+ The <structname>pg_replication_origin_status</structname> view
+ contains information about how far replay for a certain origin has
+ progressed. For more on replication origins
+ see <xref linkend="replication-origins">.
+ </para>
+
+ <table>
+
+ <title><structname>pg_replication_origin_status</structname> Columns</title>
+
+ <tgroup cols="4">
+ <thead>
+ <row>
+ <entry>Name</entry>
+ <entry>Type</entry>
+ <entry>References</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+
+ <tbody>
+ <row>
+ <entry><structfield>local_id</structfield></entry>
+ <entry><type>Oid</type></entry>
+ <entry><literal><link linkend="catalog-pg-replication-origin"><structname>pg_replication_origin</structname></link>.roident</literal></entry>
+ <entry>internal node identifier</entry>
+ </row>
+
+ <row>
+ <entry><structfield>external_id</structfield></entry>
+ <entry><type>text</type></entry>
+ <entry><literal><link linkend="catalog-pg-replication-origin"><structname>pg_replication_origin</structname></link>.roname</literal></entry>
+ <entry>external node identifier</entry>
+ </row>
+
+ <row>
+ <entry><structfield>remote_lsn</structfield></entry>
+ <entry><type>pg_lsn</type></entry>
+ <entry></entry>
+ <entry>The origin node's LSN up to which data has been replicated.</entry>
+ </row>
+
+
+ <row>
+ <entry><structfield>local_lsn</structfield></entry>
+ <entry><type>pg_lsn</type></entry>
+ <entry></entry>
+ <entry>This node's LSN that at
+ which <literal>remote_lsn</literal> has been replicated. Used to
+ flush commit records before persisting data to disk when using
+ asynchronous commits.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </sect1>
+
<sect1 id="catalog-pg-replication-slots">
<title><structname>pg_replication_slots</structname></title>
@@ -5401,6 +5543,16 @@
</row>
<row>
+ <entry><structfield>active_pid</structfield></entry>
+ <entry><type>integer</type></entry>
+ <entry></entry>
+ <entry>The process ID of the session using this slot if the slot
+ is currently actively being used. <literal>NULL</literal> if
+ inactive.
+ </entry>
+ </row>
+
+ <row>
<entry><structfield>xmin</structfield></entry>
<entry><type>xid</type></entry>
<entry></entry>
@@ -5980,6 +6132,121 @@
</sect1>
+ <sect1 id="catalog-pg-tablesample-method">
+ <title><structname>pg_tabesample_method</structname></title>
+
+ <indexterm zone="catalog-pg-tablesample-method">
+ <primary>pg_am</primary>
+ </indexterm>
+
+ <para>
+ The catalog <structname>pg_tablesample_method</structname> stores
+ information about table sampling methods which can be used in
+ <command>TABLESAMPLE</command> clause of a <command>SELECT</command>
+ statement.
+ </para>
+
+ <table>
+ <title><structname>pg_tablesample_method</> Columns</title>
+
+ <tgroup cols="4">
+ <thead>
+ <row>
+ <entry>Name</entry>
+ <entry>Type</entry>
+ <entry>References</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+ <tbody>
+
+ <row>
+ <entry><structfield>oid</structfield></entry>
+ <entry><type>oid</type></entry>
+ <entry></entry>
+ <entry>Row identifier (hidden attribute; must be explicitly selected)</entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmname</structfield></entry>
+ <entry><type>name</type></entry>
+ <entry></entry>
+ <entry>Name of the sampling method</entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmseqscan</structfield></entry>
+ <entry><type>bool</type></entry>
+ <entry></entry>
+ <entry>If true, the sampling method scans the whole table sequentially.
+ </entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmpagemode</structfield></entry>
+ <entry><type>bool</type></entry>
+ <entry></entry>
+ <entry>If true, the sampling method always reads the pages completely.
+ </entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsminit</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry><quote>Initialize the sampling scan</quote> function</entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmnextblock</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry><quote>Get next block number</quote> function</entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmnexttuple</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry><quote>Get next tuple offset</quote> function</entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmexaminetuple</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry>Function which examines the tuple contents and decides if to
+ return it, or zero if none</entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmend</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry><quote>End the sampling scan</quote> function</entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmreset</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry><quote>Restart the state of sampling scan</quote> function</entry>
+ </row>
+
+ <row>
+ <entry><structfield>tsmcost</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry>Costing function</entry>
+ </row>
+
+ </tbody>
+ </tgroup>
+ </table>
+
+ </sect1>
+
+
<sect1 id="catalog-pg-tablespace">
<title><structname>pg_tablespace</structname></title>
@@ -6061,6 +6328,74 @@
</sect1>
+ <sect1 id="catalog-pg-transform">
+ <title><structname>pg_transform</structname></title>
+
+ <indexterm zone="catalog-pg-transform">
+ <primary>pg_transform</primary>
+ </indexterm>
+
+ <para>
+ The catalog <structname>pg_transform</structname> stores information about
+ transforms, which are a mechanism to adapt data types to procedural
+ languages. See <xref linkend="sql-createtransform"> for more information.
+ </para>
+
+ <table>
+ <title><structname>pg_transform</> Columns</title>
+
+ <tgroup cols="4">
+ <thead>
+ <row>
+ <entry>Name</entry>
+ <entry>Type</entry>
+ <entry>References</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+
+ <tbody>
+ <row>
+ <entry><structfield>trftype</structfield></entry>
+ <entry><type>oid</type></entry>
+ <entry><literal><link linkend="catalog-pg-type"><structname>pg_type</structname></link>.oid</literal></entry>
+ <entry>OID of the data type this transform is for</entry>
+ </row>
+
+ <row>
+ <entry><structfield>trflang</structfield></entry>
+ <entry><type>oid</type></entry>
+ <entry><literal><link linkend="catalog-pg-language"><structname>pg_language</structname></link>.oid</literal></entry>
+ <entry>OID of the language this transform is for</entry>
+ </row>
+
+ <row>
+ <entry><structfield>trffromsql</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry>
+ The OID of the function to use when converting the data type for input
+ to the procedural language (e.g., function parameters). Zero is stored
+ if this operation is not supported.
+ </entry>
+ </row>
+
+ <row>
+ <entry><structfield>trftosql</structfield></entry>
+ <entry><type>regproc</type></entry>
+ <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+ <entry>
+ The OID of the function to use when converting output from the
+ procedural language (e.g., return values) to the data type. Zero is
+ stored if this operation is not supported.
+ </entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </sect1>
+
+
<sect1 id="catalog-pg-trigger">
<title><structname>pg_trigger</structname></title>
@@ -7345,6 +7680,11 @@
</row>
<row>
+ <entry><link linkend="view-pg-file-settings"><structname>pg_file_settings</structname></link></entry>
+ <entry>file location of parameter settings</entry>
+ </row>
+
+ <row>
<entry><link linkend="view-pg-shadow"><structname>pg_shadow</structname></link></entry>
<entry>database users</entry>
</row>
@@ -8822,6 +9162,14 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
or when examined by a non-superuser)
</entry>
</row>
+ <row>
+ <entry><structfield>pending_restart</structfield></entry>
+ <entry><type>boolean</type></entry>
+ <entry><literal>true</literal> if the value has been changed in the
+ configuration file but needs a restart; or <literal>false</literal>
+ otherwise.
+ </entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -8958,6 +9306,79 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</sect1>
+ <sect1 id="view-pg-file-settings">
+ <title><structname>pg_file_settings</structname></title>
+
+ <indexterm zone="view-pg-file-settings">
+ <primary>pg_file_settings</primary>
+ </indexterm>
+
+ <para>
+ The view <structname>pg_file_settings</structname> provides the file
+ name, line number and value of all parameters which are set through
+ configuration files.
+ In contrast to <structname>pg_settings</structname>, a row is provided for
+ each occurrence of the parameter across all configuration files. This is helpful
+ for discovering why one value may have been used in preference to another
+ when the parameters were loaded.
+ </para>
+
+ <table>
+ <title><structname>pg_file_settings</> Columns</title>
+
+ <tgroup cols="3">
+ <thead>
+ <row>
+ <entry>Name</entry>
+ <entry>Type</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+ <tbody>
+ <row>
+ <entry><structfield>sourcefile</structfield></entry>
+ <entry><structfield>text</structfield></entry>
+ <entry>Path to and name of the configration file</entry>
+ </row>
+ <row>
+ <entry><structfield>sourceline</structfield></entry>
+ <entry><structfield>integer</structfield></entry>
+ <entry>
+ Line number within the configuration file where the value was set
+ </entry>
+ </row>
+ <row>
+ <entry><structfield>seqno</structfield></entry>
+ <entry><structfield>integer</structfield></entry>
+ <entry>Order in which the setting was loaded</entry>
+ </row>
+ <row>
+ <entry><structfield>name</structfield></entry>
+ <entry><structfield>text</structfield></entry>
+ <entry>Run-time configuration parameter name</entry>
+ </row>
+ <row>
+ <entry><structfield>setting</structfield></entry>
+ <entry><structfield>text</structfield></entry>
+ <entry>value of the parameter</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <para>
+ See <xref linkend="config-setting"> for more information about the various
+ ways to change these parameters.
+ </para>
+
+ <para>
+ The <structname>pg_file_settings</structname> view cannot be modified
+ directly as it represents information, as read in at server start or
+ reload time, about all parameter settings across all configuration files.
+ </para>
+
+</sect1>
+
<sect1 id="view-pg-shadow">
<title><structname>pg_shadow</structname></title>
diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml
index 1bbd2f4415..f8c7ac3b16 100644
--- a/doc/src/sgml/charset.sgml
+++ b/doc/src/sgml/charset.sgml
@@ -694,7 +694,7 @@ SELECT a COLLATE "C" &lt; b COLLATE "POSIX" FROM test1;
<entry>National Standard</entry>
<entry>Chinese</entry>
<entry>No</entry>
- <entry>1-2</entry>
+ <entry>1-4</entry>
<entry></entry>
</row>
<row>
diff --git a/doc/src/sgml/citext.sgml b/doc/src/sgml/citext.sgml
index 0c6855fea6..7fdf30252a 100644
--- a/doc/src/sgml/citext.sgml
+++ b/doc/src/sgml/citext.sgml
@@ -126,6 +126,11 @@ SELECT * FROM users WHERE nick = 'Larry';
<itemizedlist>
<listitem>
<para>
+ <function>regexp_matches()</>
+ </para>
+ </listitem>
+ <listitem>
+ <para>
<function>regexp_replace()</>
</para>
</listitem>
diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml
index d27dd49145..5f72bebba8 100644
--- a/doc/src/sgml/client-auth.sgml
+++ b/doc/src/sgml/client-auth.sgml
@@ -947,15 +947,24 @@ omicron bryanh guest1
</para>
<para>
- Client principals must have their <productname>PostgreSQL</> database user
- name as their first component, for example
- <literal>pgusername@realm</>. Alternatively, you can use a user name
- mapping to map from the first component of the principal name to the
- database user name. By default, the realm of the client is
- not checked by <productname>PostgreSQL</>. If you have cross-realm
- authentication enabled and need to verify the realm, use the
- <literal>krb_realm</> parameter, or enable <literal>include_realm</>
- and use user name mapping to check the realm.
+ Client principals can be mapped to different <productname>PostgreSQL</>
+ database user names with <filename>pg_ident.conf</>. For example,
+ <literal>pgusername@realm</> could be mapped to just <literal>pgusername</>.
+ Alternatively, you can use the full <literal>username@realm</> principal as
+ the role name in <productname>PostgreSQL</> without any mapping.
+ </para>
+
+ <para>
+ <productname>PostgreSQL</> also supports a parameter to strip the realm from
+ the principal. This method is supported for backwards compatibility and is
+ strongly discouraged as it is then impossible to distinguish different users
+ with the same username but coming from different realms. To enable this,
+ set <literal>include_realm</> to 0. For simple single-realm
+ installations, <literal>include_realm</> combined with the
+ <literal>krb_realm</> parameter (which checks that the realm provided
+ matches exactly what is in the krb_realm parameter) would be a secure but
+ less capable option compared to specifying an explicit mapping in
+ <filename>pg_ident.conf</>.
</para>
<para>
@@ -997,10 +1006,13 @@ omicron bryanh guest1
<term><literal>include_realm</literal></term>
<listitem>
<para>
- If set to 1, the realm name from the authenticated user
- principal is included in the system user name that's passed through
- user name mapping (<xref linkend="auth-username-maps">). This is
- useful for handling users from multiple realms.
+ If set to 0, the realm name from the authenticated user principal is
+ stripped off before being passed through the user name mapping
+ (<xref linkend="auth-username-maps">). This is discouraged and is
+ primairly available for backwards compatibility as it is not secure
+ in multi-realm environments unless krb_realm is also used. Users
+ are recommended to leave include_realm set to the default (1) and to
+ provide an explicit mapping in <filename>pg_ident.conf</>.
</para>
</listitem>
</varlistentry>
@@ -1010,12 +1022,15 @@ omicron bryanh guest1
<listitem>
<para>
Allows for mapping between system and database user names. See
- <xref linkend="auth-username-maps"> for details. For a Kerberos
- principal <literal>username/[email protected]</literal>, the
- user name used for mapping is <literal>username/hostbased</literal>
- if <literal>include_realm</literal> is disabled, and
- <literal>username/[email protected]</literal> if
- <literal>include_realm</literal> is enabled.
+ <xref linkend="auth-username-maps"> for details. For a GSSAPI/Kerberos
+ principal, such as <literal>[email protected]</literal> (or, less
+ commonly, <literal>username/[email protected]</literal>), the
+ user name used for mapping is
+ <literal>[email protected]</literal> (or
+ <literal>username/[email protected]</literal>, respectively),
+ unless <literal>include_realm</literal> has been set to 0, in which case
+ <literal>username</literal> (or <literal>username/hostbased</literal>)
+ is what is seen as the system username when mapping.
</para>
</listitem>
</varlistentry>
@@ -1070,10 +1085,13 @@ omicron bryanh guest1
<term><literal>include_realm</literal></term>
<listitem>
<para>
- If set to 1, the realm name from the authenticated user
- principal is included in the system user name that's passed through
- user name mapping (<xref linkend="auth-username-maps">). This is
- useful for handling users from multiple realms.
+ If set to 0, the realm name from the authenticated user principal is
+ stripped off before being passed through the user name mapping
+ (<xref linkend="auth-username-maps">). This is discouraged and is
+ primairly available for backwards compatibility as it is not secure
+ in multi-realm environments unless krb_realm is also used. Users
+ are recommended to leave include_realm set to the default (1) and to
+ provide an explicit mapping in <filename>pg_ident.conf</>.
</para>
</listitem>
</varlistentry>
@@ -1083,7 +1101,15 @@ omicron bryanh guest1
<listitem>
<para>
Allows for mapping between system and database user names. See
- <xref linkend="auth-username-maps"> for details.
+ <xref linkend="auth-username-maps"> for details. For a SSPI/Kerberos
+ principal, such as <literal>[email protected]</literal> (or, less
+ commonly, <literal>username/[email protected]</literal>), the
+ user name used for mapping is
+ <literal>[email protected]</literal> (or
+ <literal>username/[email protected]</literal>, respectively),
+ unless <literal>include_realm</literal> has been set to 0, in which case
+ <literal>username</literal> (or <literal>username/hostbased</literal>)
+ is what is seen as the system username when mapping.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index b30c68dc13..5549b7dce6 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -2521,7 +2521,7 @@ include_dir 'conf.d'
<variablelist>
<varlistentry id="guc-archive-mode" xreflabel="archive_mode">
- <term><varname>archive_mode</varname> (<type>boolean</type>)
+ <term><varname>archive_mode</varname> (<type>enum</type>)
<indexterm>
<primary><varname>archive_mode</> configuration parameter</primary>
</indexterm>
@@ -2530,7 +2530,16 @@ include_dir 'conf.d'
<para>
When <varname>archive_mode</> is enabled, completed WAL segments
are sent to archive storage by setting
- <xref linkend="guc-archive-command">.
+ <xref linkend="guc-archive-command">. In addition to <literal>off</>,
+ to disable, there are two modes: <literal>on</>, and
+ <literal>always</>. During normal operation, there is no
+ difference between the two modes, but when set to <literal>always</>
+ the WAL archiver is enabled also during archive recovery or standby
+ mode. In <literal>always</> mode, all files restored from the archive
+ or streamed with streaming replication will be archived (again). See
+ <xref linkend="continuous-archiving-in-standby"> for details.
+ </para>
+ <para>
<varname>archive_mode</> and <varname>archive_command</> are
separate variables so that <varname>archive_command</> can be
changed without leaving archiving mode.
@@ -6752,7 +6761,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
determines whether OIDs will be included in tables created by
<command>SELECT INTO</command>. The parameter is <literal>off</>
by default; in <productname>PostgreSQL</> 8.0 and earlier, it
- was on by default.
+ was <literal>on</> by default.
</para>
<para>
diff --git a/doc/src/sgml/contrib.sgml b/doc/src/sgml/contrib.sgml
index adc21843db..1b3d2d93c7 100644
--- a/doc/src/sgml/contrib.sgml
+++ b/doc/src/sgml/contrib.sgml
@@ -141,6 +141,8 @@ CREATE EXTENSION <replaceable>module_name</> FROM unpackaged;
&tcn;
&test-decoding;
&tsearch2;
+ &tsm-system-rows;
+ &tsm-system-time;
&unaccent;
&uuid-ossp;
&xml2;
@@ -202,8 +204,5 @@ pages.
</para>
&pgstandby;
- &pgtestfsync;
- &pgtesttiming;
- &pgxlogdump;
</sect1>
</appendix>
diff --git a/doc/src/sgml/custom-scan.sgml b/doc/src/sgml/custom-scan.sgml
index 8a4a3dfcfe..62a8a3305b 100644
--- a/doc/src/sgml/custom-scan.sgml
+++ b/doc/src/sgml/custom-scan.sgml
@@ -32,12 +32,13 @@
</para>
<sect1 id="custom-scan-path">
- <title>Implementing Custom Paths</title>
+ <title>Creating Custom Scan Paths</title>
<para>
- A custom scan provider will typically add paths by setting the following
- hook, which is called after the core code has generated what it believes
- to be the complete and correct set of access paths for the relation.
+ A custom scan provider will typically add paths for a base relation by
+ setting the following hook, which is called after the core code has
+ generated what it believes to be the complete and correct set of access
+ paths for the relation.
<programlisting>
typedef void (*set_rel_pathlist_hook_type) (PlannerInfo *root,
RelOptInfo *rel,
@@ -74,15 +75,36 @@ typedef struct CustomPath
can support mark and restore. Both capabilities are optional.
<structfield>custom_private</> can be used to store the custom path's
private data. Private data should be stored in a form that can be handled
- by <literal>nodeToString</>, so that debugging routines which attempt to
+ by <literal>nodeToString</>, so that debugging routines that attempt to
print the custom path will work as designed. <structfield>methods</> must
point to a (usually statically allocated) object implementing the required
custom path methods, of which there are currently only two, as further
detailed below.
</para>
+ <para>
+ A custom scan provider can also provide join paths. Just as for base
+ relations, such a path must produce the same output as would normally be
+ produced by the join it replaces. To do this, the join provider should
+ set the following hook, and then within the hook function,
+ create <structname>CustomPath</> path(s) for the join relation.
+<programlisting>
+typedef void (*set_join_pathlist_hook_type) (PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra);
+extern PGDLLIMPORT set_join_pathlist_hook_type set_join_pathlist_hook;
+</programlisting>
+
+ This hook will be invoked repeatedly for the same join relation, with
+ different combinations of inner and outer relations; it is the
+ responsibility of the hook to minimize duplicated work.
+ </para>
+
<sect2 id="custom-scan-path-callbacks">
- <title>Custom Path Callbacks</title>
+ <title>Custom Scan Path Callbacks</title>
<para>
<programlisting>
@@ -103,7 +125,7 @@ void (*TextOutCustomPath) (StringInfo str,
const CustomPath *node);
</programlisting>
Generate additional output when <function>nodeToString</> is invoked on
- this custom path. This callback is optional. Since
+ this custom path. This callback is optional. Since
<function>nodeToString</> will automatically dump all fields in the
structure that it can see, including <structfield>custom_private</>, this
is only useful if the <structname>CustomPath</> is actually embedded in a
@@ -113,7 +135,7 @@ void (*TextOutCustomPath) (StringInfo str,
</sect1>
<sect1 id="custom-scan-plan">
- <title>Implementing Custom Plans</title>
+ <title>Creating Custom Scan Plans</title>
<para>
A custom scan is represented in a finished plan tree using the following
@@ -125,6 +147,8 @@ typedef struct CustomScan
uint32 flags;
List *custom_exprs;
List *custom_private;
+ List *custom_scan_tlist;
+ Bitmapset *custom_relids;
const CustomScanMethods *methods;
} CustomScan;
</programlisting>
@@ -134,20 +158,44 @@ typedef struct CustomScan
<structfield>scan</> must be initialized as for any other scan, including
estimated costs, target lists, qualifications, and so on.
<structfield>flags</> is a bitmask with the same meaning as in
- <structname>CustomPath</>. <structfield>custom_exprs</> should be used to
+ <structname>CustomPath</>.
+ <structfield>custom_exprs</> should be used to
store expression trees that will need to be fixed up by
<filename>setrefs.c</> and <filename>subselect.c</>, while
- <literal>custom_private</> should be used to store other private data that
- is only used by the custom scan provider itself. Plan trees must be able
- to be duplicated using <function>copyObject</>, so all the data stored
- within these two fields must consist of nodes that function can handle.
+ <structfield>custom_private</> should be used to store other private data
+ that is only used by the custom scan provider itself.
+ <structfield>custom_scan_tlist</> can be NIL when scanning a base
+ relation, indicating that the custom scan returns scan tuples that match
+ the base relation's rowtype. Otherwise it is a targetlist describing
+ the actual scan tuples. <structfield>custom_scan_tlist</> must be
+ provided for joins, and could be provided for scans if the custom scan
+ provider can compute some non-Var expressions.
+ <structfield>custom_relids</> is set by the core code to the set of
+ relations (rangetable indexes) that this scan node handles; except when
+ this scan is replacing a join, it will have only one member.
<structfield>methods</> must point to a (usually statically allocated)
object implementing the required custom scan methods, which are further
detailed below.
</para>
+ <para>
+ When a <structname>CustomScan</> scans a single relation,
+ <structfield>scan.scanrelid</> must be the range table index of the table
+ to be scanned. When it replaces a join, <structfield>scan.scanrelid</>
+ should be zero.
+ </para>
+
+ <para>
+ Plan trees must be able to be duplicated using <function>copyObject</>,
+ so all the data stored within the <quote>custom</> fields must consist of
+ nodes that that function can handle. Furthermore, custom scan providers
+ cannot substitute a larger structure that embeds
+ a <structname>CustomScan</> for the structure itself, as would be possible
+ for a <structname>CustomPath</> or <structname>CustomScanState</>.
+ </para>
+
<sect2 id="custom-scan-plan-callbacks">
- <title>Custom Scan Callbacks</title>
+ <title>Custom Scan Plan Callbacks</title>
<para>
<programlisting>
Node *(*CreateCustomScanState) (CustomScan *cscan);
@@ -155,12 +203,12 @@ Node *(*CreateCustomScanState) (CustomScan *cscan);
Allocate a <structname>CustomScanState</> for this
<structname>CustomScan</>. The actual allocation will often be larger than
required for an ordinary <structname>CustomScanState</>, because many
- scan types will wish to embed that as the first field of a large structure.
+ providers will wish to embed that as the first field of a larger structure.
The value returned must have the node tag and <structfield>methods</>
- set appropriately, but the other fields need not be initialized at this
+ set appropriately, but other fields should be left as zeroes at this
stage; after <function>ExecInitCustomScan</> performs basic initialization,
the <function>BeginCustomScan</> callback will be invoked to give the
- custom scan state a chance to do whatever else is needed.
+ custom scan provider a chance to do whatever else is needed.
</para>
<para>
@@ -169,23 +217,21 @@ void (*TextOutCustomScan) (StringInfo str,
const CustomScan *node);
</programlisting>
Generate additional output when <function>nodeToString</> is invoked on
- this custom plan. This callback is optional. Since a
- <structname>CustomScan</> must be copyable by <function>copyObject</>,
- custom scan providers cannot substitute a larger structure that embeds a
- <structname>CustomScan</> for the structure itself, as would be possible
- for a <structname>CustomPath</> or <structname>CustomScanState</>.
- Therefore, providing this callback is unlikely to be useful.
+ this custom plan node. This callback is optional. Since
+ <function>nodeToString</> will automatically dump all fields in the
+ structure, including the substructure of the <quote>custom</> fields,
+ there is usually not much need for this callback.
</para>
</sect2>
</sect1>
- <sect1 id="custom-scan-scan">
- <title>Implementing Custom Scans</title>
+ <sect1 id="custom-scan-execution">
+ <title>Executing Custom Scans</title>
<para>
When a <structfield>CustomScan</> is executed, its execution state is
represented by a <structfield>CustomScanState</>, which is declared as
- follows.
+ follows:
<programlisting>
typedef struct CustomScanState
{
@@ -197,7 +243,9 @@ typedef struct CustomScanState
</para>
<para>
- <structfield>ss</> must be initialized as for any other scanstate;
+ <structfield>ss</> is initialized as for any other scanstate,
+ except that if the scan is for a join rather than a base relation,
+ <literal>ss.ss_currentRelation</> is left NULL.
<structfield>flags</> is a bitmask with the same meaning as in
<structname>CustomPath</> and <structname>CustomScan</>.
<structfield>methods</> must point to a (usually statically allocated)
@@ -207,8 +255,8 @@ typedef struct CustomScanState
structure embedding the above as its first member.
</para>
- <sect2 id="custom-scan-scan-callbacks">
- <title>Custom Execution-Time Callbacks</title>
+ <sect2 id="custom-scan-execution-callbacks">
+ <title>Custom Scan Execution Callbacks</title>
<para>
<programlisting>
@@ -217,8 +265,8 @@ void (*BeginCustomScan) (CustomScanState *node,
int eflags);
</programlisting>
Complete initialization of the supplied <structname>CustomScanState</>.
- Some initialization is performed by <function>ExecInitCustomScan</>, but
- any private fields should be initialized here.
+ Standard fields have been initialized by <function>ExecInitCustomScan</>,
+ but any private fields should be initialized here.
</para>
<para>
@@ -236,8 +284,8 @@ TupleTableSlot *(*ExecCustomScan) (CustomScanState *node);
void (*EndCustomScan) (CustomScanState *node);
</programlisting>
Clean up any private data associated with the <literal>CustomScanState</>.
- This method is required, but may not need to do anything if the associated
- data does not exist or will be cleaned up automatically.
+ This method is required, but it does not need to do anything if there is
+ no associated data or it will be cleaned up automatically.
</para>
<para>
@@ -253,8 +301,8 @@ void (*ReScanCustomScan) (CustomScanState *node);
void (*MarkPosCustomScan) (CustomScanState *node);
</programlisting>
Save the current scan position so that it can subsequently be restored
- by the <function>RestrPosCustomScan</> callback. This calback is optional,
- and need only be supplied if
+ by the <function>RestrPosCustomScan</> callback. This callback is
+ optional, and need only be supplied if the
<literal>CUSTOMPATH_SUPPORT_MARK_RESTORE</> flag is set.
</para>
@@ -264,7 +312,7 @@ void (*RestrPosCustomScan) (CustomScanState *node);
</programlisting>
Restore the previous scan position as saved by the
<function>MarkPosCustomScan</> callback. This callback is optional,
- and need only be supplied if
+ and need only be supplied if the
<literal>CUSTOMPATH_SUPPORT_MARK_RESTORE</> flag is set.
</para>
@@ -274,8 +322,8 @@ void (*ExplainCustomScan) (CustomScanState *node,
List *ancestors,
ExplainState *es);
</programlisting>
- Output additional information on <command>EXPLAIN</> that involves
- custom-scan node. This callback is optional. Common data stored in the
+ Output additional information for <command>EXPLAIN</> of a custom-scan
+ plan node. This callback is optional. Common data stored in the
<structname>ScanState</>, such as the target list and scan relation, will
be shown even without this callback, but the callback allows the display
of additional, private state.
diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml
index da1f25fe28..9d5ce953f1 100644
--- a/doc/src/sgml/datatype.sgml
+++ b/doc/src/sgml/datatype.sgml
@@ -4321,7 +4321,8 @@ SET xmloption TO { DOCUMENT | CONTENT };
an object identifier. There are also several alias types for
<type>oid</>: <type>regproc</>, <type>regprocedure</>,
<type>regoper</>, <type>regoperator</>, <type>regclass</>,
- <type>regtype</>, <type>regconfig</>, and <type>regdictionary</>.
+ <type>regtype</>, <type>regrole</>, <type>regnamespace</>,
+ <type>regconfig</>, and <type>regdictionary</>.
<xref linkend="datatype-oid-table"> shows an overview.
</para>
@@ -4431,6 +4432,20 @@ SELECT * FROM pg_attribute
</row>
<row>
+ <entry><type>regrole</></entry>
+ <entry><structname>pg_authid</></entry>
+ <entry>role name</entry>
+ <entry><literal>smithee</></entry>
+ </row>
+
+ <row>
+ <entry><type>regnamespace</></entry>
+ <entry><structname>pg_namespace</></entry>
+ <entry>namespace name</entry>
+ <entry><literal>pg_catalog</></entry>
+ </row>
+
+ <row>
<entry><type>regconfig</></entry>
<entry><structname>pg_ts_config</></entry>
<entry>text search configuration</entry>
@@ -4448,7 +4463,8 @@ SELECT * FROM pg_attribute
</table>
<para>
- All of the OID alias types accept schema-qualified names, and will
+ All of the OID alias types for objects grouped by namespace accept
+ schema-qualified names, and will
display schema-qualified names on output if the object would not
be found in the current search path without being qualified.
The <type>regproc</> and <type>regoper</> alias types will only
@@ -4460,7 +4476,7 @@ SELECT * FROM pg_attribute
</para>
<para>
- An additional property of the OID alias types is the creation of
+ An additional property of most of the OID alias types is the creation of
dependencies. If a
constant of one of these types appears in a stored expression
(such as a column default expression or view), it creates a dependency
@@ -4470,8 +4486,18 @@ SELECT * FROM pg_attribute
understands that the default expression depends on the sequence
<literal>my_seq</>; the system will not let the sequence be dropped
without first removing the default expression.
+ <type>regrole</> is the only exception for the property. Constants of this
+ type are not allowed in such expressions.
</para>
+ <note>
+ <para>
+ The OID alias types do not completely follow transaction isolation
+ rules. The planner also treats them as simple constants, which may
+ result in sub-optimal planning.
+ </para>
+ </note>
+
<para>
Another identifier type used by the system is <type>xid</>, or transaction
(abbreviated <abbrev>xact</>) identifier. This is the data type of the system columns
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index 1c56f162de..0aa0c13c5c 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -1553,7 +1553,7 @@ REVOKE ALL ON accounts FROM PUBLIC;
both. The commands available are <literal>ALL</literal>,
<literal>SELECT</>, <literal>INSERT</>, <literal>UPDATE</>, and
<literal>DELETE</>. Multiple roles can be assigned to a given policy
- and normal role membership and inheiritance rules apply.
+ and normal role membership and inheritance rules apply.
</para>
<para>
diff --git a/doc/src/sgml/event-trigger.sgml b/doc/src/sgml/event-trigger.sgml
index f151eb7375..b6cbb1bc24 100644
--- a/doc/src/sgml/event-trigger.sgml
+++ b/doc/src/sgml/event-trigger.sgml
@@ -29,7 +29,8 @@
occurs in the database in which it is defined. Currently, the only
supported events are
<literal>ddl_command_start</>,
- <literal>ddl_command_end</>
+ <literal>ddl_command_end</>,
+ <literal>table_rewrite</>
and <literal>sql_drop</>.
Support for additional events may be added in future releases.
</para>
@@ -52,7 +53,13 @@
<para>
The <literal>ddl_command_end</> event occurs just after the execution of
- this same set of commands.
+ this same set of commands. To obtain more details on the <acronym>DDL</>
+ operations that took place, use the set-returning function
+ <literal>pg_event_trigger_ddl_commands()</> from the
+ <literal>ddl_command_end</> event trigger code (see
+ <xref linkend="functions-event-triggers">). Note that the trigger fires
+ after the actions have taken place (but before the transaction commits),
+ and thus the system catalogs can be read as already changed.
</para>
<para>
@@ -991,8 +998,6 @@ typedef struct EventTriggerData
PG_MODULE_MAGIC;
-Datum noddl(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(noddl);
Datum
diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml
index c1daa4be5a..236157743a 100644
--- a/doc/src/sgml/fdwhandler.sgml
+++ b/doc/src/sgml/fdwhandler.sgml
@@ -175,8 +175,11 @@ GetForeignPlan (PlannerInfo *root,
access path. This is called at the end of query planning.
The parameters are as for <function>GetForeignRelSize</>, plus
the selected <structname>ForeignPath</> (previously produced by
- <function>GetForeignPaths</>), the target list to be emitted by the
- plan node, and the restriction clauses to be enforced by the plan node.
+ <function>GetForeignPaths</> or <function>GetForeignJoinPaths</>),
+ the target list to be emitted by the plan node,
+ and the restriction clauses to be enforced by the plan node.
+ (If the path is for a join rather than a base
+ relation, <literal>foreigntableid</> is <literal>InvalidOid</>.)
</para>
<para>
@@ -235,20 +238,23 @@ IterateForeignScan (ForeignScanState *node);
</para>
<para>
- The rows returned must match the column signature of the foreign table
- being scanned. If you choose to optimize away fetching columns that
- are not needed, you should insert nulls in those column positions.
+ The rows returned must match the <structfield>fdw_scan_tlist</> target
+ list if one was supplied, otherwise they must match the rowtype of the
+ foreign table being scanned. If you choose to optimize away fetching
+ columns that are not needed, you should insert nulls in those column
+ positions, or else generate a <structfield>fdw_scan_tlist</> list with
+ those columns omitted.
</para>
<para>
Note that <productname>PostgreSQL</productname>'s executor doesn't care
- whether the rows returned violate any <literal>NOT NULL</literal>
- constraints that were defined on the foreign table columns &mdash; but
- the planner does care, and may optimize queries incorrectly if
- <literal>NULL</> values are present in a column declared not to contain
- them. If a <literal>NULL</> value is encountered when the user has
- declared that none should be present, it may be appropriate to raise an
- error (just as you would need to do in the case of a data type mismatch).
+ whether the rows returned violate any constraints that were defined on
+ the foreign table &mdash; but the planner does care, and may optimize
+ queries incorrectly if there are rows visible in the foreign table that
+ do not satisfy a declared constraint. If a constraint is violated when
+ the user has declared that the constraint should hold true, it may be
+ appropriate to raise an error (just as you would need to do in the case
+ of a data type mismatch).
</para>
<para>
@@ -275,6 +281,67 @@ EndForeignScan (ForeignScanState *node);
</sect2>
+ <sect2 id="fdw-callbacks-join-scan">
+ <title>FDW Routines For Scanning Foreign Joins</title>
+
+ <para>
+ If an FDW supports performing foreign joins remotely (rather than
+ by fetching both tables' data and doing the join locally), it should
+ provide this callback function:
+ </para>
+
+ <para>
+<programlisting>
+void
+GetForeignJoinPaths (PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra);
+</programlisting>
+ Create possible access paths for a join of two (or more) foreign tables
+ that all belong to the same foreign server. This optional
+ function is called during query planning. As
+ with <function>GetForeignPaths</>, this function should
+ generate <structname>ForeignPath</> path(s) for the
+ supplied <literal>joinrel</>, and call <function>add_path</> to add these
+ paths to the set of paths considered for the join. But unlike
+ <function>GetForeignPaths</>, it is not necessary that this function
+ succeed in creating at least one path, since paths involving local
+ joining are always possible.
+ </para>
+
+ <para>
+ Note that this function will be invoked repeatedly for the same join
+ relation, with different combinations of inner and outer relations; it is
+ the responsibility of the FDW to minimize duplicated work.
+ </para>
+
+ <para>
+ If a <structname>ForeignPath</> path is chosen for the join, it will
+ represent the entire join process; paths generated for the component
+ tables and subsidiary joins will not be used. Subsequent processing of
+ the join path proceeds much as it does for a path scanning a single
+ foreign table. One difference is that the <structfield>scanrelid</> of
+ the resulting <structname>ForeignScan</> plan node should be set to zero,
+ since there is no single relation that it represents; instead,
+ the <structfield>fs_relids</> field of the <structname>ForeignScan</>
+ node represents the set of relations that were joined. (The latter field
+ is set up automatically by the core planner code, and need not be filled
+ by the FDW.) Another difference is that, because the column list for a
+ remote join cannot be found from the system catalogs, the FDW must
+ fill <structfield>fdw_scan_tlist</> with an appropriate list
+ of <structfield>TargetEntry</> nodes, representing the set of columns
+ it will supply at runtime in the tuples it returns.
+ </para>
+
+ <para>
+ See <xref linkend="fdw-planning"> for additional information.
+ </para>
+
+ </sect2>
+
<sect2 id="fdw-callbacks-update">
<title>FDW Routines For Updating Foreign Tables</title>
@@ -598,6 +665,108 @@ IsForeignRelUpdatable (Relation rel);
</sect2>
+ <sect2 id="fdw-callbacks-row-locking">
+ <title>FDW Routines For Row Locking</title>
+
+ <para>
+ If an FDW wishes to support <firstterm>late row locking</> (as described
+ in <xref linkend="fdw-row-locking">), it must provide the following
+ callback functions:
+ </para>
+
+ <para>
+<programlisting>
+RowMarkType
+GetForeignRowMarkType (RangeTblEntry *rte,
+ LockClauseStrength strength);
+</programlisting>
+
+ Report which row-marking option to use for a foreign table.
+ <literal>rte</> is the <structname>RangeTblEntry</> node for the table
+ and <literal>strength</> describes the lock strength requested by the
+ relevant <literal>FOR UPDATE/SHARE</> clause, if any. The result must be
+ a member of the <literal>RowMarkType</> enum type.
+ </para>
+
+ <para>
+ This function is called during query planning for each foreign table that
+ appears in an <command>UPDATE</>, <command>DELETE</>, or <command>SELECT
+ FOR UPDATE/SHARE</> query and is not the target of <command>UPDATE</>
+ or <command>DELETE</>.
+ </para>
+
+ <para>
+ If the <function>GetForeignRowMarkType</> pointer is set to
+ <literal>NULL</>, the <literal>ROW_MARK_COPY</> option is always used.
+ (This implies that <function>RefetchForeignRow</> will never be called,
+ so it need not be provided either.)
+ </para>
+
+ <para>
+ See <xref linkend="fdw-row-locking"> for more information.
+ </para>
+
+ <para>
+<programlisting>
+HeapTuple
+RefetchForeignRow (EState *estate,
+ ExecRowMark *erm,
+ Datum rowid,
+ bool *updated);
+</programlisting>
+
+ Re-fetch one tuple from the foreign table, after locking it if required.
+ <literal>estate</> is global execution state for the query.
+ <literal>erm</> is the <structname>ExecRowMark</> struct describing
+ the target foreign table and the row lock type (if any) to acquire.
+ <literal>rowid</> identifies the tuple to be fetched.
+ <literal>updated</> is an output parameter.
+ </para>
+
+ <para>
+ This function should return a palloc'ed copy of the fetched tuple,
+ or <literal>NULL</> if the row lock couldn't be obtained. The row lock
+ type to acquire is defined by <literal>erm-&gt;markType</>, which is the
+ value previously returned by <function>GetForeignRowMarkType</>.
+ (<literal>ROW_MARK_REFERENCE</> means to just re-fetch the tuple without
+ acquiring any lock, and <literal>ROW_MARK_COPY</> will never be seen by
+ this routine.)
+ </para>
+
+ <para>
+ In addition, <literal>*updated</> should be set to <literal>true</>
+ if what was fetched was an updated version of the tuple rather than
+ the same version previously obtained. (If the FDW cannot be sure about
+ this, always returning <literal>true</> is recommended.)
+ </para>
+
+ <para>
+ Note that by default, failure to acquire a row lock should result in
+ raising an error; a <literal>NULL</> return is only appropriate if
+ the <literal>SKIP LOCKED</> option is specified
+ by <literal>erm-&gt;waitPolicy</>.
+ </para>
+
+ <para>
+ The <literal>rowid</> is the <structfield>ctid</> value previously read
+ for the row to be re-fetched. Although the <literal>rowid</> value is
+ passed as a <type>Datum</>, it can currently only be a <type>tid</>. The
+ function API is chosen in hopes that it may be possible to allow other
+ datatypes for row IDs in future.
+ </para>
+
+ <para>
+ If the <function>RefetchForeignRow</> pointer is set to
+ <literal>NULL</>, attempts to re-fetch rows will fail
+ with an error message.
+ </para>
+
+ <para>
+ See <xref linkend="fdw-row-locking"> for more information.
+ </para>
+
+ </sect2>
+
<sect2 id="fdw-callbacks-explain">
<title>FDW Routines for <command>EXPLAIN</></title>
@@ -868,10 +1037,10 @@ GetForeignServerByName(const char *name, bool missing_ok);
<para>
The FDW callback functions <function>GetForeignRelSize</>,
- <function>GetForeignPaths</>, <function>GetForeignPlan</>, and
- <function>PlanForeignModify</> must fit into the workings of the
- <productname>PostgreSQL</> planner. Here are some notes about what
- they must do.
+ <function>GetForeignPaths</>, <function>GetForeignPlan</>,
+ <function>PlanForeignModify</>, and <function>GetForeignJoinPaths</>
+ must fit into the workings of the <productname>PostgreSQL</> planner.
+ Here are some notes about what they must do.
</para>
<para>
@@ -898,7 +1067,7 @@ GetForeignServerByName(const char *name, bool missing_ok);
<literal>baserel-&gt;fdw_private</> is a <type>void</> pointer that is
available for FDW planning functions to store information relevant to
the particular foreign table. The core planner does not touch it except
- to initialize it to NULL when the <literal>baserel</> node is created.
+ to initialize it to NULL when the <literal>RelOptInfo</> node is created.
It is useful for passing information forward from
<function>GetForeignRelSize</> to <function>GetForeignPaths</> and/or
<function>GetForeignPaths</> to <function>GetForeignPlan</>, thereby
@@ -967,6 +1136,23 @@ GetForeignServerByName(const char *name, bool missing_ok);
</para>
<para>
+ Another <structname>ForeignScan</> field that can be filled by FDWs
+ is <structfield>fdw_scan_tlist</>, which describes the tuples returned by
+ the FDW for this plan node. For simple foreign table scans this can be
+ set to <literal>NIL</>, implying that the returned tuples have the
+ rowtype declared for the foreign table. A non-NIL value must be a
+ targetlist (list of <structname>TargetEntry</>s) containing Vars and/or
+ expressions representing the returned columns. This might be used, for
+ example, to show that the FDW has omitted some columns that it noticed
+ won't be needed for the query. Also, if the FDW can compute expressions
+ used by the query more cheaply than can be done locally, it could add
+ those expressions to <structfield>fdw_scan_tlist</>. Note that join
+ plans (created from paths made by <function>GetForeignJoinPaths</>) must
+ always supply <structfield>fdw_scan_tlist</> to describe the set of
+ columns they will return.
+ </para>
+
+ <para>
The FDW should always construct at least one path that depends only on
the table's restriction clauses. In join queries, it might also choose
to construct path(s) that depend on join clauses, for example
@@ -984,6 +1170,18 @@ GetForeignServerByName(const char *name, bool missing_ok);
</para>
<para>
+ If an FDW supports remote joins, <function>GetForeignJoinPaths</> should
+ produce <structname>ForeignPath</>s for potential remote joins in much
+ the same way as <function>GetForeignPaths</> works for base tables.
+ Information about the intended join can be passed forward
+ to <function>GetForeignPlan</> in the same ways described above.
+ However, <structfield>baserestrictinfo</> is not relevant for join
+ relations; instead, the relevant join clauses for a particular join are
+ passed to <function>GetForeignJoinPaths</> as a separate parameter
+ (<literal>extra-&gt;restrictlist</>).
+ </para>
+
+ <para>
When planning an <command>UPDATE</> or <command>DELETE</>,
<function>PlanForeignModify</> can look up the <structname>RelOptInfo</>
struct for the foreign table and make use of the
@@ -997,21 +1195,122 @@ GetForeignServerByName(const char *name, bool missing_ok);
</para>
<para>
- For an <command>UPDATE</> or <command>DELETE</> against an external data
- source that supports concurrent updates, it is recommended that the
- <literal>ForeignScan</> operation lock the rows that it fetches, perhaps
- via the equivalent of <command>SELECT FOR UPDATE</>. The FDW may also
- choose to lock rows at fetch time when the foreign table is referenced
- in a <command>SELECT FOR UPDATE/SHARE</>; if it does not, the
- <literal>FOR UPDATE</> or <literal>FOR SHARE</> option is essentially a
- no-op so far as the foreign table is concerned. This behavior may yield
- semantics slightly different from operations on local tables, where row
- locking is customarily delayed as long as possible: remote rows may get
- locked even though they subsequently fail locally-applied restriction or
- join conditions. However, matching the local semantics exactly would
- require an additional remote access for every row, and might be
- impossible anyway depending on what locking semantics the external data
- source provides.
+ <command>INSERT</> with an <literal>ON CONFLICT</> clause does not
+ support specifying the conflict target, as remote constraints are not
+ locally known. This in turn implies that <literal>ON CONFLICT DO
+ UPDATE</> is not supported, since the specification is mandatory there.
+ </para>
+
+ </sect1>
+
+ <sect1 id="fdw-row-locking">
+ <title>Row Locking in Foreign Data Wrappers</title>
+
+ <para>
+ If an FDW's underlying storage mechanism has a concept of locking
+ individual rows to prevent concurrent updates of those rows, it is
+ usually worthwhile for the FDW to perform row-level locking with as
+ close an approximation as practical to the semantics used in
+ ordinary <productname>PostgreSQL</> tables. There are multiple
+ considerations involved in this.
+ </para>
+
+ <para>
+ One key decision to be made is whether to perform <firstterm>early
+ locking</> or <firstterm>late locking</>. In early locking, a row is
+ locked when it is first retrieved from the underlying store, while in
+ late locking, the row is locked only when it is known that it needs to
+ be locked. (The difference arises because some rows may be discarded by
+ locally-checked restriction or join conditions.) Early locking is much
+ simpler and avoids extra round trips to a remote store, but it can cause
+ locking of rows that need not have been locked, resulting in reduced
+ concurrency or even unexpected deadlocks. Also, late locking is only
+ possible if the row to be locked can be uniquely re-identified later.
+ Preferably the row identifier should identify a specific version of the
+ row, as <productname>PostgreSQL</> TIDs do.
+ </para>
+
+ <para>
+ By default, <productname>PostgreSQL</> ignores locking considerations
+ when interfacing to FDWs, but an FDW can perform early locking without
+ any explicit support from the core code. The API functions described
+ in <xref linkend="fdw-callbacks-row-locking">, which were added
+ in <productname>PostgreSQL</> 9.5, allow an FDW to use late locking if
+ it wishes.
+ </para>
+
+ <para>
+ An additional consideration is that in <literal>READ COMMITTED</>
+ isolation mode, <productname>PostgreSQL</> may need to re-check
+ restriction and join conditions against an updated version of some
+ target tuple. Rechecking join conditions requires re-obtaining copies
+ of the non-target rows that were previously joined to the target tuple.
+ When working with standard <productname>PostgreSQL</> tables, this is
+ done by including the TIDs of the non-target tables in the column list
+ projected through the join, and then re-fetching non-target rows when
+ required. This approach keeps the join data set compact, but it
+ requires inexpensive re-fetch capability, as well as a TID that can
+ uniquely identify the row version to be re-fetched. By default,
+ therefore, the approach used with foreign tables is to include a copy of
+ the entire row fetched from a foreign table in the column list projected
+ through the join. This puts no special demands on the FDW but can
+ result in reduced performance of merge and hash joins. An FDW that is
+ capable of meeting the re-fetch requirements can choose to do it the
+ first way.
+ </para>
+
+ <para>
+ For an <command>UPDATE</> or <command>DELETE</> on a foreign table, it
+ is recommended that the <literal>ForeignScan</> operation on the target
+ table perform early locking on the rows that it fetches, perhaps via the
+ equivalent of <command>SELECT FOR UPDATE</>. An FDW can detect whether
+ a table is an <command>UPDATE</>/<command>DELETE</> target at plan time
+ by comparing its relid to <literal>root-&gt;parse-&gt;resultRelation</>,
+ or at execution time by using <function>ExecRelationIsTargetRelation()</>.
+ An alternative possibility is to perform late locking within the
+ <function>ExecForeignUpdate</> or <function>ExecForeignDelete</>
+ callback, but no special support is provided for this.
+ </para>
+
+ <para>
+ For foreign tables that are specified to be locked by a <command>SELECT
+ FOR UPDATE/SHARE</> command, the <literal>ForeignScan</> operation can
+ again perform early locking by fetching tuples with the equivalent
+ of <command>SELECT FOR UPDATE/SHARE</>. To perform late locking
+ instead, provide the callback functions defined
+ in <xref linkend="fdw-callbacks-row-locking">.
+ In <function>GetForeignRowMarkType</>, select rowmark option
+ <literal>ROW_MARK_EXCLUSIVE</>, <literal>ROW_MARK_NOKEYEXCLUSIVE</>,
+ <literal>ROW_MARK_SHARE</>, or <literal>ROW_MARK_KEYSHARE</> depending
+ on the requested lock strength. (The core code will act the same
+ regardless of which of these four options you choose.)
+ Elsewhere, you can detect whether a foreign table was specified to be
+ locked by this type of command by using <function>get_plan_rowmark</> at
+ plan time, or <function>ExecFindRowMark</> at execution time; you must
+ check not only whether a non-null rowmark struct is returned, but that
+ its <structfield>strength</> field is not <literal>LCS_NONE</>.
+ </para>
+
+ <para>
+ Lastly, for foreign tables that are used in an <command>UPDATE</>,
+ <command>DELETE</> or <command>SELECT FOR UPDATE/SHARE</> command but
+ are not specified to be row-locked, you can override the default choice
+ to copy entire rows by having <function>GetForeignRowMarkType</> select
+ option <literal>ROW_MARK_REFERENCE</> when it sees lock strength
+ <literal>LCS_NONE</>. This will cause <function>RefetchForeignRow</> to
+ be called with that value for <structfield>markType</>; it should then
+ re-fetch the row without acquiring any new lock. (If you have
+ a <function>GetForeignRowMarkType</> function but don't wish to re-fetch
+ unlocked rows, select option <literal>ROW_MARK_COPY</>
+ for <literal>LCS_NONE</>.)
+ </para>
+
+ <para>
+ See <filename>src/include/nodes/lockoptions.h</>, the comments
+ for <type>RowMarkType</> and <type>PlanRowMark</>
+ in <filename>src/include/nodes/plannodes.h</>, and the comments for
+ <type>ExecRowMark</> in <filename>src/include/nodes/execnodes.h</> for
+ additional information.
</para>
</sect1>
diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml
index 2d7514c3ea..a8d1281ab6 100644
--- a/doc/src/sgml/filelist.sgml
+++ b/doc/src/sgml/filelist.sgml
@@ -95,9 +95,11 @@
<!ENTITY fdwhandler SYSTEM "fdwhandler.sgml">
<!ENTITY custom-scan SYSTEM "custom-scan.sgml">
<!ENTITY logicaldecoding SYSTEM "logicaldecoding.sgml">
+<!ENTITY replication-origins SYSTEM "replication-origins.sgml">
<!ENTITY protocol SYSTEM "protocol.sgml">
<!ENTITY sources SYSTEM "sources.sgml">
<!ENTITY storage SYSTEM "storage.sgml">
+<!ENTITY tablesample-method SYSTEM "tablesample-method.sgml">
<!-- contrib information -->
<!ENTITY contrib SYSTEM "contrib.sgml">
@@ -133,10 +135,7 @@
<!ENTITY pgstandby SYSTEM "pgstandby.sgml">
<!ENTITY pgstatstatements SYSTEM "pgstatstatements.sgml">
<!ENTITY pgstattuple SYSTEM "pgstattuple.sgml">
-<!ENTITY pgtestfsync SYSTEM "pgtestfsync.sgml">
-<!ENTITY pgtesttiming SYSTEM "pgtesttiming.sgml">
<!ENTITY pgtrgm SYSTEM "pgtrgm.sgml">
-<!ENTITY pgxlogdump SYSTEM "pg_xlogdump.sgml">
<!ENTITY postgres-fdw SYSTEM "postgres-fdw.sgml">
<!ENTITY seg SYSTEM "seg.sgml">
<!ENTITY contrib-spi SYSTEM "contrib-spi.sgml">
@@ -148,6 +147,8 @@
<!ENTITY test-parser SYSTEM "test-parser.sgml">
<!ENTITY test-shm-mq SYSTEM "test-shm-mq.sgml">
<!ENTITY tsearch2 SYSTEM "tsearch2.sgml">
+<!ENTITY tsm-system-rows SYSTEM "tsm-system-rows.sgml">
+<!ENTITY tsm-system-time SYSTEM "tsm-system-time.sgml">
<!ENTITY unaccent SYSTEM "unaccent.sgml">
<!ENTITY uuid-ossp SYSTEM "uuid-ossp.sgml">
<!ENTITY vacuumlo SYSTEM "vacuumlo.sgml">
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index 5f7bf6aa13..c6e354054c 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -8296,6 +8296,12 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple
<entry><literal>box(circle '((0,0),2.0)')</literal></entry>
</row>
<row>
+ <entry><literal><function>box(<type>point</type>)</function></literal></entry>
+ <entry><type>box</type></entry>
+ <entry>point to empty box</entry>
+ <entry><literal>box(point '(0,0)')</literal></entry>
+ </row>
+ <row>
<entry><literal><function>box(<type>point</type>, <type>point</type>)</function></literal></entry>
<entry><type>box</type></entry>
<entry>points to box</entry>
@@ -8308,6 +8314,12 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple
<entry><literal>box(polygon '((0,0),(1,1),(2,0))')</literal></entry>
</row>
<row>
+ <entry><literal><function>bound_box(<type>box</type>, <type>box</type>)</function></literal></entry>
+ <entry><type>box</type></entry>
+ <entry>boxes to bounding box</entry>
+ <entry><literal>bound_box(box '((0,0),(1,1))', box '((3,3),(4,4))')</literal></entry>
+ </row>
+ <row>
<entry>
<indexterm>
<primary>circle</primary>
@@ -8734,6 +8746,30 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple
<entry><literal>text(inet '192.168.1.5')</literal></entry>
<entry><literal>192.168.1.5/32</literal></entry>
</row>
+ <row>
+ <entry>
+ <indexterm>
+ <primary>inet_same_family</primary>
+ </indexterm>
+ <literal><function>inet_same_family(<type>inet</type>, <type>inet</type>)</function></literal>
+ </entry>
+ <entry><type>boolean</type></entry>
+ <entry>are the addresses from the same family?</entry>
+ <entry><literal>inet_same_family('192.168.1.5/24', '::1')</literal></entry>
+ <entry><literal>false</literal></entry>
+ </row>
+ <row>
+ <entry>
+ <indexterm>
+ <primary>inet_merge</primary>
+ </indexterm>
+ <literal><function>inet_merge(<type>inet</type>, <type>inet</type>)</function></literal>
+ </entry>
+ <entry><type>cidr</type></entry>
+ <entry>the smallest network which includes both of the given networks</entry>
+ <entry><literal>inet_merge('192.168.1.5/24', '192.168.2.5/24')</literal></entry>
+ <entry><literal>192.168.0.0/22</literal></entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -10257,10 +10293,45 @@ table2-mapping
<entry>Do all of these key/element <emphasis>strings</emphasis> exist?</entry>
<entry><literal>'["a", "b"]'::jsonb ?&amp; array['a', 'b']</literal></entry>
</row>
+ <row>
+ <entry><literal>||</literal></entry>
+ <entry><type>jsonb</type></entry>
+ <entry>Concatentate two jsonb values into a new jsonb value</entry>
+ <entry><literal>'["a", "b"]'::jsonb || '["c", "d"]'::jsonb</literal></entry>
+ </row>
+ <row>
+ <entry><literal>-</literal></entry>
+ <entry><type>text</type></entry>
+ <entry>Delete the field with a specified key, or element with this
+ value</entry>
+ <entry><literal>'{"a": "b"}'::jsonb - 'a' </literal></entry>
+ </row>
+ <row>
+ <entry><literal>-</literal></entry>
+ <entry><type>integer</type></entry>
+ <entry>Delete the field or element with specified index (Negative
+ integers count from the end)</entry>
+ <entry><literal>'["a", "b"]'::jsonb - 1 </literal></entry>
+ </row>
+ <row>
+ <entry><literal>-</literal></entry>
+ <entry><type>text[]</type></entry>
+ <entry>Delete the field or element with specified path</entry>
+ <entry><literal>'["a", {"b":1}]'::jsonb - '{1,b}'::text[] </literal></entry>
+ </row>
</tbody>
</tgroup>
</table>
+ <note>
+ <para>
+ The <literal>||</> operator concatenates the elements at the top level of
+ each of its operands. It does not operate recursively. For example, if
+ both operands are objects with a common key field name, the value of the
+ field in the result will just be the value from the right hand operand.
+ </para>
+ </note>
+
<para>
<xref linkend="functions-json-creation-table"> shows the functions that are
available for creating <type>json</type> and <type>jsonb</type> values.
@@ -10767,6 +10838,49 @@ table2-mapping
<entry><literal>json_strip_nulls('[{"f1":1,"f2":null},2,null,3]')</literal></entry>
<entry><literal>[{"f1":1},2,null,3]</literal></entry>
</row>
+ <row>
+ <entry><para><literal>jsonb_set(target jsonb, path text[], new_value jsonb<optional>, <parameter>create_missing</parameter> <type>boolean</type></optional>)</literal>
+ </para></entry>
+ <entry><para><type>jsonb</type></para></entry>
+ <entry>
+ Returns <replaceable>target</replaceable>
+ with the section designated by <replaceable>path</replaceable>
+ replaced by <replaceable>new_value</replaceable>, or with
+ <replaceable>new_value</replaceable> added if
+ <replaceable>create_missing</replaceable> is true ( default is
+ <literal>true</>) and the item
+ designated by <replaceable>path</replaceable> does not exist.
+ </entry>
+ <entry><para><literal>jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0,f1}','[2,3,4]', false)</literal>
+ </para><para><literal>jsonb_set('[{"f1":1,"f2":null},2]', '{0,f3}','[2,3,4]')</literal>
+ </para></entry>
+ <entry><para><literal>[{"f1":[2,3,4],"f2":null},2,null,3]</literal>
+ </para><para><literal>[{"f1": 1, "f2": null, "f3": [2, 3, 4]}, 2]</literal>
+ </para></entry>
+ </row>
+ <row>
+ <entry><para><literal>jsonb_pretty(from_json jsonb)</literal>
+ </para></entry>
+ <entry><para><type>text</type></para></entry>
+ <entry>
+ Returns <replaceable>from_json</replaceable>
+ as indented json text.
+ </entry>
+ <entry><literal>jsonb_pretty('[{"f1":1,"f2":null},2,null,3]')</literal></entry>
+ <entry>
+<programlisting>
+ [
+ {
+ "f1": 1,
+ "f2": null
+ },
+ 2,
+ null,
+ 3
+ ]
+</programlisting>
+ </entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -10795,6 +10909,27 @@ table2-mapping
<note>
<para>
+ All the items of the <literal>path</> parameter of <literal>jsonb_set</>
+ must be present in the <literal>target</>, unless
+ <literal>create_missing</> is true, in which case all but the last item
+ must be present. If these conditions are not met the <literal>target</>
+ is returned unchanged.
+ </para>
+ <para>
+ If the last path item is an object key, it will be created if it
+ is absent and given the new value. If the last path item is an array
+ index, if it is positive the item to set is found by counting from
+ the left, and if negative by counting from the right - <literal>-1</>
+ designates the rightmost element, and so on.
+ If the item is out of the range -array_length .. array_length -1,
+ and create_missing is true, the new value is added at the beginning
+ of the array if the item is negative, and at the end of the array if
+ it is positive.
+ </para>
+ </note>
+
+ <note>
+ <para>
The <literal>json_typeof</> function's <literal>null</> return value
should not be confused with a SQL NULL. While
calling <literal>json_typeof('null'::json)</> will
@@ -12090,6 +12225,17 @@ NULL baz</literallayout>(3 rows)</entry>
<entry><literal>upper_inf('(,)'::daterange)</literal></entry>
<entry><literal>true</literal></entry>
</row>
+ <row>
+ <entry>
+ <literal>
+ <function>range_merge</function>(<type>anyrange</type>, <type>anyrange</type>)
+ </literal>
+ </entry>
+ <entry><type>anyrange</type></entry>
+ <entry>the smallest range which includes both of the given ranges</entry>
+ <entry><literal>range_merge('[1,2)'::int4range, '[3,4)'::int4range)</literal></entry>
+ <entry><literal>[1,4)</literal></entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -12119,7 +12265,9 @@ NULL baz</literallayout>(3 rows)</entry>
<xref linkend="functions-aggregate-statistics-table">.
The built-in ordered-set aggregate functions
are listed in <xref linkend="functions-orderedset-table"> and
- <xref linkend="functions-hypothetical-table">.
+ <xref linkend="functions-hypothetical-table">. Grouping operations,
+ which are closely related to aggregate functions, are listed in
+ <xref linkend="functions-grouping-table">.
The special syntax considerations for aggregate
functions are explained in <xref linkend="syntax-aggregates">.
Consult <xref linkend="tutorial-agg"> for additional introductory
@@ -13217,6 +13365,72 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab;
to the rule specified in the <literal>ORDER BY</> clause.
</para>
+ <table id="functions-grouping-table">
+ <title>Grouping Operations</title>
+
+ <tgroup cols="3">
+ <thead>
+ <row>
+ <entry>Function</entry>
+ <entry>Return Type</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+
+ <tbody>
+
+ <row>
+ <entry>
+ <indexterm>
+ <primary>GROUPING</primary>
+ </indexterm>
+ <function>GROUPING(<replaceable class="parameter">args...</replaceable>)</function>
+ </entry>
+ <entry>
+ <type>integer</type>
+ </entry>
+ <entry>
+ Integer bitmask indicating which arguments are not being included in the current
+ grouping set
+ </entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <para>
+ Grouping operations are used in conjunction with grouping sets (see
+ <xref linkend="queries-grouping-sets">) to distinguish result rows. The
+ arguments to the <literal>GROUPING</> operation are not actually evaluated,
+ but they must match exactly expressions given in the <literal>GROUP BY</>
+ clause of the associated query level. Bits are assigned with the rightmost
+ argument being the least-significant bit; each bit is 0 if the corresponding
+ expression is included in the grouping criteria of the grouping set generating
+ the result row, and 1 if it is not. For example:
+<screen>
+<prompt>=&gt;</> <userinput>SELECT * FROM items_sold;</>
+ make | model | sales
+-------+-------+-------
+ Foo | GT | 10
+ Foo | Tour | 20
+ Bar | City | 15
+ Bar | Sport | 5
+(4 rows)
+
+<prompt>=&gt;</> <userinput>SELECT make, model, GROUPING(make,model), sum(sales) FROM items_sold GROUP BY ROLLUP(make,model);</>
+ make | model | grouping | sum
+-------+-------+----------+-----
+ Foo | GT | 0 | 10
+ Foo | Tour | 0 | 20
+ Bar | City | 0 | 15
+ Bar | Sport | 0 | 5
+ Foo | | 1 | 30
+ Bar | | 1 | 20
+ | | 3 | 50
+(7 rows)
+</screen>
+ </para>
+
</sect1>
<sect1 id="functions-window">
@@ -14617,7 +14831,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n);
<row>
<entry><literal><function>version()</function></literal></entry>
<entry><type>text</type></entry>
- <entry><productname>PostgreSQL</> version information</entry>
+ <entry><productname>PostgreSQL</> version information. See also <xref linkend="guc-server-version-num"> for a machine-readable version.</entry>
</row>
</tbody>
</tgroup>
@@ -14804,7 +15018,12 @@ SET search_path TO <replaceable>schema</> <optional>, <replaceable>schema</>, ..
<para>
<function>version</function> returns a string describing the
- <productname>PostgreSQL</productname> server's version.
+ <productname>PostgreSQL</productname> server's version. You can also
+ get this information from <xref linkend="guc-server-version"> or
+ for a machine-readable version, <xref linkend="guc-server-version-num">.
+ Software developers should use <literal>server_version_num</literal>
+ (available since 8.2) or <xref linkend="libpq-pqserverversion"> instead
+ of parsing the text version.
</para>
<indexterm>
@@ -16051,7 +16270,7 @@ SELECT collation for ('foo' COLLATE "de_DE");
<row>
<entry><literal><function>txid_current()</function></literal></entry>
<entry><type>bigint</type></entry>
- <entry>get current transaction ID</entry>
+ <entry>get current transaction ID, assigning a new one if the current transaction does not have one</entry>
</row>
<row>
<entry><literal><function>txid_current_snapshot()</function></literal></entry>
@@ -16539,11 +16758,12 @@ SELECT set_config('log_statement_stats', 'off', false);
<function>pg_start_backup</> accepts an
arbitrary user-defined label for the backup. (Typically this would be
the name under which the backup dump file will be stored.) The function
- writes a backup label file (<filename>backup_label</>) into the
- database cluster's data directory, performs a checkpoint,
- and then returns the backup's starting transaction log location as text.
- The user can ignore this result value, but it is
- provided in case it is useful.
+ writes a backup label file (<filename>backup_label</>) and, if there
+ are any links in the <filename>pg_tblspc/</> directory, a tablespace map
+ file (<filename>tablespace_map</>) into the database cluster's data
+ directory, performs a checkpoint, and then returns the backup's starting
+ transaction log location as text. The user can ignore this result value,
+ but it is provided in case it is useful.
<programlisting>
postgres=# select pg_start_backup('label_goes_here');
pg_start_backup
@@ -16558,7 +16778,8 @@ postgres=# select pg_start_backup('label_goes_here');
</para>
<para>
- <function>pg_stop_backup</> removes the label file created by
+ <function>pg_stop_backup</> removes the label file and, if it exists,
+ the <filename>tablespace_map</> file created by
<function>pg_start_backup</>, and creates a backup history file in
the transaction log archive area. The history file includes the label given to
<function>pg_start_backup</>, the starting and ending transaction log locations for
@@ -16874,11 +17095,13 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup());
<title>Replication Functions</title>
<para>
- The functions shown in <xref linkend="functions-replication-table"> are
- for controlling and interacting with replication features.
- See <xref linkend="streaming-replication">
- and <xref linkend="streaming-replication-slots"> for information about the
- underlying features. Use of these functions is restricted to superusers.
+ The functions shown
+ in <xref linkend="functions-replication-table"> are for
+ controlling and interacting with replication features.
+ See <xref linkend="streaming-replication">,
+ <xref linkend="streaming-replication-slots">, <xref linkend="replication-origins">
+ for information about the underlying features. Use of these
+ functions is restricted to superusers.
</para>
<para>
@@ -17035,6 +17258,194 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup());
on future calls.
</entry>
</row>
+
+ <row id="pg-replication-origin-create">
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_create</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_create(<parameter>node_name</parameter> <type>text</type>)</function></literal>
+ </entry>
+ <entry>
+ <parameter>internal_id</parameter> <type>oid</type>
+ </entry>
+ <entry>
+ Create a replication origin with the passed in external
+ name, and create an internal id for it.
+ </entry>
+ </row>
+
+ <row id="pg-replication-origin-drop">
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_drop</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_drop(<parameter>node_name</parameter> <type>text</type>)</function></literal>
+ </entry>
+ <entry>
+ void
+ </entry>
+ <entry>
+ Delete a previously created replication origin, including the
+ associated replay progress.
+ </entry>
+ </row>
+
+ <row>
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_oid</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_oid(<parameter>node_name</parameter> <type>text</type>)</function></literal>
+ </entry>
+ <entry>
+ <parameter>internal_id</parameter> <type>oid</type>
+ </entry>
+ <entry>
+ Lookup replication origin by name and return the internal id. If no
+ corresponding replication origin is found an error is thrown.
+ </entry>
+ </row>
+
+ <row id="pg-replication-origin-session-setup">
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_session_setup</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_setup_session(<parameter>node_name</parameter> <type>text</type>)</function></literal>
+ </entry>
+ <entry>
+ void
+ </entry>
+ <entry>
+ Configure the current session to be replaying from the passed in
+ origin, allowing replay progress to be tracked. Use
+ <function>pg_replication_origin_session_reset</function> to revert.
+ Can only be used if no previous origin is configured.
+ </entry>
+ </row>
+
+ <row>
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_session_reset</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_session_reset()</function></literal>
+ </entry>
+ <entry>
+ void
+ </entry>
+ <entry>
+ Cancel the effects
+ of <function>pg_replication_origin_session_setup()</function>.
+ </entry>
+ </row>
+
+ <row>
+ <entry>
+ <indexterm>
+ <primary>pg_replication_session_is_setup</primary>
+ </indexterm>
+ <literal><function>pg_replication_session_is_setup()</function></literal>
+ </entry>
+ <entry>
+ bool
+ </entry>
+ <entry>
+ Has a replication origin been configured in the current session?
+ </entry>
+ </row>
+
+ <row id="pg-replication-origin-session-progress">
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_session_progress</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_progress(<parameter>flush</parameter> <type>bool</type>)</function></literal>
+ </entry>
+ <entry>
+ pg_lsn
+ </entry>
+ <entry>
+ Return the replay position for the replication origin configured in
+ the current session. The parameter <parameter>flush</parameter>
+ determines whether the corresponding local transaction will be
+ guaranteed to have been flushed to disk or not.
+ </entry>
+ </row>
+
+ <row id="pg-replication-origin-xact-setup">
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_xact_setup</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_xact_setup(<parameter>origin_lsn</parameter> <type>pg_lsn</type>, <parameter>origin_timestamp</parameter> <type>timestamptz</type>)</function></literal>
+ </entry>
+ <entry>
+ void
+ </entry>
+ <entry>
+ Mark the current transaction to be replaying a transaction that has
+ committed at the passed in <acronym>LSN</acronym> and timestamp. Can
+ only be called when a replication origin has previously been
+ configured using
+ <function>pg_replication_origin_session_setup()</function>.
+ </entry>
+ </row>
+
+ <row id="pg-replication-origin-xact-reset">
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_xact_reset</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_xact_reset()</function></literal>
+ </entry>
+ <entry>
+ void
+ </entry>
+ <entry>
+ Cancel the effects of
+ <function>pg_replication_origin_xact_setup()</function>.
+ </entry>
+ </row>
+
+ <row>
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_advance</primary>
+ </indexterm>
+ <literal>pg_replication_origin_advance<function>(<parameter>node_name</parameter> <type>text</type>, <parameter>pos</parameter> <type>pg_lsn</type>)</function></literal>
+ </entry>
+ <entry>
+ void
+ </entry>
+ <entry>
+ Set replication progress for the passed in node to the passed in
+ position. This primarily is useful for setting up the initial position
+ or a new position after configuration changes and similar. Be aware
+ that careless use of this function can lead to inconsistently
+ replicated data.
+ </entry>
+ </row>
+
+ <row id="pg-replication-origin-progress">
+ <entry>
+ <indexterm>
+ <primary>pg_replication_origin_progress</primary>
+ </indexterm>
+ <literal><function>pg_replication_origin_progress(<parameter>node_name</parameter> <type>text</type>, <parameter>flush</parameter> <type>bool</type>)</function></literal>
+ </entry>
+ <entry>
+ pg_lsn
+ </entry>
+ <entry>
+ Return the replay position for the passed in replication origin. The
+ parameter <parameter>flush</parameter> determines whether the
+ corresponding local transaction will be guaranteed to have been
+ flushed to disk or not.
+ </entry>
+ </row>
+
</tbody>
</tgroup>
</table>
@@ -17824,8 +18235,99 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
see <xref linkend="event-triggers">.
</para>
+ <sect2 id="pg-event-trigger-ddl-command-end-functions">
+ <title>Capturing Changes at Command End</title>
+
+ <indexterm>
+ <primary>pg_event_trigger_ddl_commands</primary>
+ </indexterm>
+
+ <para>
+ <function>pg_event_trigger_ddl_commands</> returns a list of
+ <acronym>DDL</acronym> commands executed by each user action,
+ when invoked in a function attached to a
+ <literal>ddl_command_end</> event trigger. If called in any other
+ context, an error is raised.
+ <function>pg_event_trigger_ddl_commands</> returns one row for each
+ base command executed; some commands that are a single SQL sentence
+ may return more than one row. This function returns the following
+ columns:
+
+ <informaltable>
+ <tgroup cols="3">
+ <thead>
+ <row>
+ <entry>Name</entry>
+ <entry>Type</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+
+ <tbody>
+ <row>
+ <entry><literal>classid</literal></entry>
+ <entry><type>Oid</type></entry>
+ <entry>OID of catalog the object belongs in</entry>
+ </row>
+ <row>
+ <entry><literal>objid</literal></entry>
+ <entry><type>Oid</type></entry>
+ <entry>OID of the object in the catalog</entry>
+ </row>
+ <row>
+ <entry><literal>objsubid</literal></entry>
+ <entry><type>integer</type></entry>
+ <entry>Object sub-id (e.g. attribute number for columns)</entry>
+ </row>
+ <row>
+ <entry><literal>command_tag</literal></entry>
+ <entry><type>text</type></entry>
+ <entry>command tag</entry>
+ </row>
+ <row>
+ <entry><literal>object_type</literal></entry>
+ <entry><type>text</type></entry>
+ <entry>Type of the object</entry>
+ </row>
+ <row>
+ <entry><literal>schema_name</literal></entry>
+ <entry><type>text</type></entry>
+ <entry>
+ Name of the schema the object belongs in, if any; otherwise <literal>NULL</>.
+ No quoting is applied.
+ </entry>
+ </row>
+ <row>
+ <entry><literal>object_identity</literal></entry>
+ <entry><type>text</type></entry>
+ <entry>
+ Text rendering of the object identity, schema-qualified. Each and every
+ identifier present in the identity is quoted if necessary.
+ </entry>
+ </row>
+ <row>
+ <entry><literal>in_extension</literal></entry>
+ <entry><type>bool</type></entry>
+ <entry>whether the command is part of an extension script</entry>
+ </row>
+ <row>
+ <entry><literal>command</literal></entry>
+ <entry><type>pg_ddl_command</type></entry>
+ <entry>
+ A complete representation of the command, in internal format.
+ This cannot be output directly, but it can be passed to other
+ functions to obtain different pieces of information about the
+ command.
+ </entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </sect2>
+
<sect2 id="pg-event-trigger-sql-drop-functions">
- <title>Processing objects dropped by a DDL command.</title>
+ <title>Processing Objects Dropped by a DDL Command</title>
<indexterm>
<primary>pg_event_trigger_dropped_objects</primary>
diff --git a/doc/src/sgml/gist.sgml b/doc/src/sgml/gist.sgml
index e7d1ff9d83..2d1a5aa863 100644
--- a/doc/src/sgml/gist.sgml
+++ b/doc/src/sgml/gist.sgml
@@ -105,6 +105,7 @@
<literal>~=</>
</entry>
<entry>
+ <literal>&lt;-&gt;</>
</entry>
</row>
<row>
@@ -163,6 +164,7 @@
<literal>~=</>
</entry>
<entry>
+ <literal>&lt;-&gt;</>
</entry>
</row>
<row>
@@ -212,7 +214,7 @@
To use it, mention the class name in <command>CREATE INDEX</>,
for example
<programlisting>
-CREATE INDEX ON my_table USING gist (my_inet_column inet_ops);
+CREATE INDEX ON my_table USING GIST (my_inet_column inet_ops);
</programlisting>
</para>
@@ -325,7 +327,6 @@ LANGUAGE C STRICT;
And the matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_consistent(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_consistent);
Datum
@@ -388,7 +389,6 @@ LANGUAGE C STRICT;
And the matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_union(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_union);
Datum
@@ -462,7 +462,6 @@ LANGUAGE C STRICT;
And the matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_compress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_compress);
Datum
@@ -523,7 +522,6 @@ LANGUAGE C STRICT;
And the matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_decompress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_decompress);
Datum
@@ -563,7 +561,6 @@ LANGUAGE C STRICT; -- in some cases penalty functions need not be strict
And the matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_penalty(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_penalty);
Datum
@@ -612,7 +609,6 @@ LANGUAGE C STRICT;
And the matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_picksplit(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_picksplit);
Datum
@@ -719,7 +715,6 @@ LANGUAGE C STRICT;
And the matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_same(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_same);
Datum
@@ -770,7 +765,6 @@ LANGUAGE C STRICT;
And the matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_distance(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_distance);
Datum
@@ -780,6 +774,7 @@ my_distance(PG_FUNCTION_ARGS)
data_type *query = PG_GETARG_DATA_TYPE_P(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
/* Oid subtype = PG_GETARG_OID(3); */
+ /* bool *recheck = (bool *) PG_GETARG_POINTER(4); */
data_type *key = DatumGetDataType(entry-&gt;key);
double retval;
@@ -792,17 +787,34 @@ my_distance(PG_FUNCTION_ARGS)
</programlisting>
The arguments to the <function>distance</> function are identical to
- the arguments of the <function>consistent</> function, except that no
- recheck flag is used. The distance to a leaf index entry must always
- be determined exactly, since there is no way to re-order the tuples
- once they are returned. Some approximation is allowed when determining
- the distance to an internal tree node, so long as the result is never
- greater than any child's actual distance. Thus, for example, distance
- to a bounding box is usually sufficient in geometric applications. The
- result value can be any finite <type>float8</> value. (Infinity and
- minus infinity are used internally to handle cases such as nulls, so it
- is not recommended that <function>distance</> functions return these
- values.)
+ the arguments of the <function>consistent</> function.
+ </para>
+
+ <para>
+ Some approximation is allowed when determining the distance, so long
+ as the result is never greater than the entry's actual distance. Thus,
+ for example, distance to a bounding box is usually sufficient in
+ geometric applications. For an internal tree node, the distance
+ returned must not be greater than the distance to any of the child
+ nodes. If the returned distance is not exact, the function must set
+ <literal>*recheck</> to true. (This is not necessary for internal tree
+ nodes; for them, the calculation is always assumed to be inexact.) In
+ this case the executor will calculate the accurate distance after
+ fetching the tuple from the heap, and reorder the tuples if necessary.
+ </para>
+
+ <para>
+ If the distance function returns <literal>*recheck = true</> for any
+ leaf node, the original ordering operator's return type must
+ be <type>float8</> or <type>float4</>, and the distance function's
+ result values must be comparable to those of the original ordering
+ operator, since the executor will sort using both distance function
+ results and recalculated ordering-operator results. Otherwise, the
+ distance function's result values can be any finite <type>float8</>
+ values, so long as the relative order of the result values matches the
+ order returned by the ordering operator. (Infinity and minus infinity
+ are used internally to handle cases such as nulls, so it is not
+ recommended that <function>distance</> functions return these values.)
</para>
</listitem>
@@ -833,13 +845,12 @@ LANGUAGE C STRICT;
struct, whose 'key' field contains the same datum in the original,
uncompressed form. If the opclass' compress function does nothing for
leaf entries, the fetch method can return the argument as is.
- </para>
+ </para>
<para>
The matching code in the C module could then follow this skeleton:
<programlisting>
-Datum my_fetch(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(my_fetch);
Datum
diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml
index a17f55545c..d2f7fec523 100644
--- a/doc/src/sgml/high-availability.sgml
+++ b/doc/src/sgml/high-availability.sgml
@@ -1220,6 +1220,46 @@ primary_slot_name = 'node_a_slot'
</sect3>
</sect2>
+
+ <sect2 id="continuous-archiving-in-standby">
+ <title>Continuous archiving in standby</title>
+
+ <indexterm>
+ <primary>continuous archiving</primary>
+ <secondary>in standby</secondary>
+ </indexterm>
+
+ <para>
+ When continuous WAL archiving is used in a standby, there are two
+ different scenarios: the WAL archive can be shared between the primary
+ and the standby, or the standby can have its own WAL archive. When
+ the standby has its own WAL archive, set <varname>archive_mode</varname>
+ to <literal>always</literal>, and the standby will call the archive
+ command for every WAL segment it receives, whether it's by restoring
+ from the archive or by streaming replication. The shared archive can
+ be handled similarly, but the <varname>archive_command</varname> must
+ test if the file being archived exists already, and if the existing file
+ has identical contents. This requires more care in the
+ <varname>archive_command</varname>, as it must
+ be careful to not overwrite an existing file with different contents,
+ but return success if the exactly same file is archived twice. And
+ all that must be done free of race conditions, if two servers attempt
+ to archive the same file at the same time.
+ </para>
+
+ <para>
+ If <varname>archive_mode</varname> is set to <literal>on</>, the
+ archiver is not enabled during recovery or standby mode. If the standby
+ server is promoted, it will start archiving after the promotion, but
+ will not archive any WAL it did not generate itself. To get a complete
+ series of WAL files in the archive, you must ensure that all WAL is
+ archived, before it reaches the standby. This is inherently true with
+ file-based log shipping, as the standby can only restore files that
+ are found in the archive, but not if streaming replication is enabled.
+ When a server is not in recovery mode, there is no difference between
+ <literal>on</literal> and <literal>always</literal> modes.
+ </para>
+ </sect2>
</sect1>
<sect1 id="warm-standby-failover">
diff --git a/doc/src/sgml/hstore.sgml b/doc/src/sgml/hstore.sgml
index fbe9543dfe..94f01f8dfe 100644
--- a/doc/src/sgml/hstore.sgml
+++ b/doc/src/sgml/hstore.sgml
@@ -597,6 +597,25 @@ ALTER TABLE tablename ALTER hstorecol TYPE hstore USING hstorecol || '';
</sect2>
<sect2>
+ <title>Transforms</title>
+
+ <para>
+ Additional extensions are available that implement transforms for
+ the <type>hstore</type> type for the languages PL/Perl and PL/Python. The
+ extensions for PL/Perl are called <literal>hstore_plperl</literal>
+ and <literal>hstore_plperlu</literal>, for trusted and untrusted PL/Perl.
+ If you install these transforms and specify them when creating a
+ function, <type>hstore</type> values are mapped to Perl hashes. The
+ extensions for PL/Python are
+ called <literal>hstore_plpythonu</literal>, <literal>hstore_plpython2u</literal>,
+ and <literal>hstore_plpython3u</literal>
+ (see <xref linkend="plpython-python23"> for the PL/Python naming
+ convention). If you use them, <type>hstore</type> values are mapped to
+ Python dictionaries.
+ </para>
+ </sect2>
+
+ <sect2>
<title>Authors</title>
<para>
diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml
index b73463a323..309fd1269b 100644
--- a/doc/src/sgml/indices.sgml
+++ b/doc/src/sgml/indices.sgml
@@ -189,7 +189,7 @@ CREATE INDEX test1_id_index ON test1 (id);
<literal>=</literal> operator.
The following command is used to create a hash index:
<synopsis>
-CREATE INDEX <replaceable>name</replaceable> ON <replaceable>table</replaceable> USING hash (<replaceable>column</replaceable>);
+CREATE INDEX <replaceable>name</replaceable> ON <replaceable>table</replaceable> USING HASH (<replaceable>column</replaceable>);
</synopsis>
</para>
diff --git a/doc/src/sgml/information_schema.sgml b/doc/src/sgml/information_schema.sgml
index 22f43c8a5b..ca1f20b338 100644
--- a/doc/src/sgml/information_schema.sgml
+++ b/doc/src/sgml/information_schema.sgml
@@ -5519,6 +5519,91 @@ ORDER BY c.ordinal_position;
</table>
</sect1>
+ <sect1 id="infoschema-transforms">
+ <title><literal>transforms</literal></title>
+
+ <para>
+ The view <literal>transforms</literal> contains information about the
+ transforms defined in the current database. More precisely, it contains a
+ row for each function contained in a transform (the <quote>from SQL</quote>
+ or <quote>to SQL</quote> function).
+ </para>
+
+ <table>
+ <title><literal>transforms</literal> Columns</title>
+
+ <tgroup cols="3">
+ <thead>
+ <row>
+ <entry>Name</entry>
+ <entry>Data Type</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+
+ <tbody>
+ <row>
+ <entry><literal>udt_catalog</literal></entry>
+ <entry><type>sql_identifier</type></entry>
+ <entry>Name of the database that contains the type the transform is for (always the current database)</entry>
+ </row>
+
+ <row>
+ <entry><literal>udt_schema</literal></entry>
+ <entry><type>sql_identifier</type></entry>
+ <entry>Name of the schema that contains the type the transform is for</entry>
+ </row>
+
+ <row>
+ <entry><literal>udt_name</literal></entry>
+ <entry><type>sql_identifier</type></entry>
+ <entry>Name of the type the transform is for</entry>
+ </row>
+
+ <row>
+ <entry><literal>specific_catalog</literal></entry>
+ <entry><literal>sql_identifier</literal></entry>
+ <entry>Name of the database containing the function (always the current database)</entry>
+ </row>
+
+ <row>
+ <entry><literal>specific_schema</literal></entry>
+ <entry><literal>sql_identifier</literal></entry>
+ <entry>Name of the schema containing the function</entry>
+ </row>
+
+ <row>
+ <entry><literal>specific_name</literal></entry>
+ <entry><literal>sql_identifier</literal></entry>
+ <entry>
+ The <quote>specific name</quote> of the function. See <xref
+ linkend="infoschema-routines"> for more information.
+ </entry>
+ </row>
+
+ <row>
+ <entry><literal>group_name</literal></entry>
+ <entry><literal>sql_identifier</literal></entry>
+ <entry>
+ The SQL standard allows defining transforms in <quote>groups</quote>,
+ and selecting a group at run time. PostgreSQL does not support this.
+ Instead, transforms are specific to a language. As a compromise, this
+ field contains the language the transform is for.
+ </entry>
+ </row>
+
+ <row>
+ <entry><literal>transform_type</literal></entry>
+ <entry><type>character_data</type></entry>
+ <entry>
+ <literal>FROM SQL</literal> or <literal>TO SQL</literal>
+ </entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </sect1>
+
<sect1 id="infoschema-triggered-update-columns">
<title><literal>triggered_update_columns</literal></title>
diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml
index 4968e09c99..89648349c2 100644
--- a/doc/src/sgml/installation.sgml
+++ b/doc/src/sgml/installation.sgml
@@ -169,32 +169,22 @@ su - postgres
also on most platforms. This appears to be the default in
recent <productname>Perl</productname> versions, but it was not
in earlier versions, and in any case it is the choice of whomever
- installed Perl at your site.
+ installed Perl at your site. <filename>configure</filename> will fail
+ if building <application>PL/Perl</application> is selected but it cannot
+ find a shared <filename>libperl</filename>. In that case, you will have
+ to rebuild and install <productname>Perl</productname> manually to be
+ able to build <application>PL/Perl</application>. During the
+ configuration process for <productname>Perl</productname>, request a
+ shared library.
+ </para>
+
+ <para>
If you intend to make more than incidental use of
<application>PL/Perl</application>, you should ensure that the
<productname>Perl</productname> installation was built with the
<literal>usemultiplicity</> option enabled (<literal>perl -V</>
will show whether this is the case).
</para>
-
- <para>
- If you don't have the shared library but you need one, a message
- like this will appear during the <productname>PostgreSQL</>
- build to point out this fact:
-<screen>
-*** Cannot build PL/Perl because libperl is not a shared library.
-*** You might have to rebuild your Perl installation. Refer to
-*** the documentation for details.
-</screen>
- (If you don't follow the on-screen output you will merely notice
- that the <application>PL/Perl</application> library object,
- <filename>plperl.so</filename> or similar, will not be
- installed.) If you see this, you will have to rebuild and
- install <productname>Perl</productname> manually to be able to
- build <application>PL/Perl</application>. During the
- configuration process for <productname>Perl</productname>,
- request a shared library.
- </para>
</listitem>
<listitem>
@@ -220,28 +210,15 @@ su - postgres
library, the <indexterm><primary>libpython</primary></indexterm>
<filename>libpython</filename> library must be a shared library
also on most platforms. This is not the case in a default
- <productname>Python</productname> installation. If after
- building and installing <productname>PostgreSQL</> you have a file called
- <filename>plpython.so</filename> (possibly a different
- extension), then everything went well. Otherwise you should
- have seen a notice like this flying by:
-<screen>
-*** Cannot build PL/Python because libpython is not a shared library.
-*** You might have to rebuild your Python installation. Refer to
-*** the documentation for details.
-</screen>
- That means you have to rebuild (part of) your
- <productname>Python</productname> installation to create this
- shared library.
- </para>
-
- <para>
- If you have problems, run <productname>Python</> 2.3 or later's
- configure using the <literal>--enable-shared</> flag. On some
- operating systems you don't have to build a shared library, but
- you will have to convince the <productname>PostgreSQL</> build
- system of this. Consult the <filename>Makefile</filename> in
- the <filename>src/pl/plpython</filename> directory for details.
+ <productname>Python</productname> installation built from source, but a
+ shared library is available in many operating system
+ distributions. <filename>configure</filename> will fail if
+ building <application>PL/Python</application> is selected but it cannot
+ find a shared <filename>libpython</filename>. That might mean that you
+ either have to install additional packages or rebuild (part of) your
+ <productname>Python</productname> installation to provide this shared
+ library. When building from source, run <productname>Python</>'s
+ configure with the <literal>--enable-shared</> flag.
</para>
</listitem>
diff --git a/doc/src/sgml/json.sgml b/doc/src/sgml/json.sgml
index 6282ab8853..1e78558e27 100644
--- a/doc/src/sgml/json.sgml
+++ b/doc/src/sgml/json.sgml
@@ -375,13 +375,13 @@ SELECT '"foo"'::jsonb ? 'foo';
implement, see <xref linkend="functions-jsonb-op-table">.)
An example of creating an index with this operator class is:
<programlisting>
-CREATE INDEX idxgin ON api USING gin (jdoc);
+CREATE INDEX idxgin ON api USING GIN (jdoc);
</programlisting>
The non-default GIN operator class <literal>jsonb_path_ops</>
supports indexing the <literal>@&gt;</> operator only.
An example of creating an index with this operator class is:
<programlisting>
-CREATE INDEX idxginp ON api USING gin (jdoc jsonb_path_ops);
+CREATE INDEX idxginp ON api USING GIN (jdoc jsonb_path_ops);
</programlisting>
</para>
@@ -426,7 +426,7 @@ SELECT jdoc-&gt;'guid', jdoc-&gt;'name' FROM api WHERE jdoc -&gt; 'tags' ? 'qui'
the <literal>"tags"</> key is common, defining an index like this
may be worthwhile:
<programlisting>
-CREATE INDEX idxgintags ON api USING gin ((jdoc -&gt; 'tags'));
+CREATE INDEX idxgintags ON api USING GIN ((jdoc -&gt; 'tags'));
</programlisting>
Now, the <literal>WHERE</> clause <literal>jdoc -&gt; 'tags' ? 'qui'</>
will be recognized as an application of the indexable
diff --git a/doc/src/sgml/keywords.sgml b/doc/src/sgml/keywords.sgml
index b0dfd5ff75..ea582116ab 100644
--- a/doc/src/sgml/keywords.sgml
+++ b/doc/src/sgml/keywords.sgml
@@ -854,6 +854,13 @@
<entry></entry>
</row>
<row>
+ <entry><token>CONFLICT</token></entry>
+ <entry>non-reserved</entry>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
<entry><token>CONNECT</token></entry>
<entry></entry>
<entry>reserved</entry>
diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml
index 3650567852..5fa2f77ea8 100644
--- a/doc/src/sgml/logicaldecoding.sgml
+++ b/doc/src/sgml/logicaldecoding.sgml
@@ -62,10 +62,10 @@ postgres=# SELECT * FROM pg_create_logical_replication_slot('regression_slot', '
regression_slot | 0/16B1970
(1 row)
-postgres=# SELECT * FROM pg_replication_slots;
- slot_name | plugin | slot_type | datoid | database | active | xmin | catalog_xmin | restart_lsn
------------------+---------------+-----------+--------+----------+--------+--------+--------------+-------------
- regression_slot | test_decoding | logical | 12052 | postgres | f | | 684 | 0/16A4408
+postgres=# SELECT slot_name, plugin, slot_type, database, active, restart_lsn FROM pg_replication_slots;
+ slot_name | plugin | slot_type | database | active | restart_lsn
+-----------------+---------------+-----------+----------+--------+-------------
+ regression_slot | test_decoding | logical | postgres | f | 0/16A4408
(1 row)
postgres=# -- There are no changes to see yet
@@ -363,6 +363,7 @@ typedef struct OutputPluginCallbacks
LogicalDecodeBeginCB begin_cb;
LogicalDecodeChangeCB change_cb;
LogicalDecodeCommitCB commit_cb;
+ LogicalDecodeFilterByOriginCB filter_by_origin_cb;
LogicalDecodeShutdownCB shutdown_cb;
} OutputPluginCallbacks;
@@ -370,7 +371,8 @@ typedef void (*LogicalOutputPluginInit)(struct OutputPluginCallbacks *cb);
</programlisting>
The <function>begin_cb</function>, <function>change_cb</function>
and <function>commit_cb</function> callbacks are required,
- while <function>startup_cb</function>
+ while <function>startup_cb</function>,
+ <function>filter_by_origin_cb</function>
and <function>shutdown_cb</function> are optional.
</para>
</sect2>
@@ -569,6 +571,37 @@ typedef void (*LogicalDecodeChangeCB) (
</para>
</note>
</sect3>
+
+ <sect3 id="logicaldecoding-output-plugin-filter-origin">
+ <title>Origin Filter Callback</title>
+
+ <para>
+ The optional <function>filter_by_origin_cb</function> callback
+ is called to determine wheter data that has been replayed
+ from <parameter>origin_id</parameter> is of interest to the
+ output plugin.
+<programlisting>
+typedef bool (*LogicalDecodeChangeCB) (
+ struct LogicalDecodingContext *ctx,
+ RepNodeId origin_id
+);
+</programlisting>
+ The <parameter>ctx</parameter> parameter has the same contents
+ as for the other callbacks. No information but the origin is
+ available. To signal that changes originating on the passed in
+ node are irrelevant, return true, causing them to be filtered
+ away; false otherwise. The other callbacks will not be called
+ for transactions and changes that have been filtered away.
+ </para>
+ <para>
+ This is useful when implementing cascading or multi directional
+ replication solutions. Filtering by the origin allows to
+ prevent replicating the same changes back and forth in such
+ setups. While transactions and changes also carry information
+ about the origin, filtering via this callback is noticeably
+ more efficient.
+ </para>
+ </sect3>
</sect2>
<sect2 id="logicaldecoding-output-plugin-output">
@@ -616,7 +649,7 @@ OutputPluginWrite(ctx, true);
<title>Synchronous Replication Support for Logical Decoding</title>
<para>
- Logical decoding can be used to to build
+ Logical decoding can be used to build
<link linkend="synchronous-replication">synchronous
replication</link> solutions with the same user interface as synchronous
replication for <link linkend="streaming-replication">streaming
diff --git a/doc/src/sgml/ltree.sgml b/doc/src/sgml/ltree.sgml
index cd8a061c94..8a7a36390d 100644
--- a/doc/src/sgml/ltree.sgml
+++ b/doc/src/sgml/ltree.sgml
@@ -550,8 +550,8 @@ INSERT INTO test VALUES ('Top.Collections.Pictures.Astronomy');
INSERT INTO test VALUES ('Top.Collections.Pictures.Astronomy.Stars');
INSERT INTO test VALUES ('Top.Collections.Pictures.Astronomy.Galaxies');
INSERT INTO test VALUES ('Top.Collections.Pictures.Astronomy.Astronauts');
-CREATE INDEX path_gist_idx ON test USING gist(path);
-CREATE INDEX path_idx ON test USING btree(path);
+CREATE INDEX path_gist_idx ON test USING GIST (path);
+CREATE INDEX path_idx ON test USING BTREE (path);
</programlisting>
<para>
@@ -665,6 +665,21 @@ ltreetest=&gt; SELECT ins_label(path,2,'Space') FROM test WHERE path &lt;@ 'Top.
</sect2>
<sect2>
+ <title>Transforms</title>
+
+ <para>
+ Additional extensions are available that implement transforms for
+ the <type>ltree</type> type for PL/Python. The extensions are
+ called <literal>ltree_plpythonu</literal>, <literal>ltree_plpython2u</literal>,
+ and <literal>ltree_plpython3u</literal>
+ (see <xref linkend="plpython-python23"> for the PL/Python naming
+ convention). If you install these transforms and specify them when
+ creating a function, <type>ltree</type> values are mapped to Python lists.
+ (The reverse is currently not supported, however.)
+ </para>
+ </sect2>
+
+ <sect2>
<title>Authors</title>
<para>
diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml
index 8764e0091a..e34426ff18 100644
--- a/doc/src/sgml/maintenance.sgml
+++ b/doc/src/sgml/maintenance.sgml
@@ -628,6 +628,9 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
Like transaction IDs, multixact IDs are implemented as a
32-bit counter and corresponding storage, all of which requires
careful aging management, storage cleanup, and wraparound handling.
+ There is a separate storage area which holds the list of members in
+ each multixact, which also uses a 32-bit counter and which must also
+ be managed.
</para>
<para>
@@ -655,8 +658,12 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
<para>
As a safety device, a whole-table vacuum scan will occur for any table
whose multixact-age is greater than
- <xref linkend="guc-autovacuum-multixact-freeze-max-age">.
- This will occur even if autovacuum is nominally disabled.
+ <xref linkend="guc-autovacuum-multixact-freeze-max-age">. Whole-table
+ vacuum scans will also occur progressively for all tables, starting with
+ those that have the oldest multixact-age, if the amount of used member
+ storage space exceeds the amount 50% of the addressible storage space.
+ Both of these kinds of whole-table scans will occur even if autovacuum is
+ nominally disabled.
</para>
</sect3>
</sect2>
diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml
index f88b16e778..385691e21e 100644
--- a/doc/src/sgml/mvcc.sgml
+++ b/doc/src/sgml/mvcc.sgml
@@ -143,6 +143,20 @@
</para>
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term>
+ serialization anomaly
+ <indexterm><primary>serialization anomaly</primary></indexterm>
+ </term>
+ <listitem>
+ <para>
+ The result of successfully committing a group of transactions
+ is inconsistent with all possible orderings of running those
+ transactions one at a time.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
</para>
@@ -150,13 +164,13 @@
<indexterm>
<primary>transaction isolation level</primary>
</indexterm>
- The four transaction isolation levels and the corresponding
- behaviors are described in <xref linkend="mvcc-isolevel-table">.
+ The SQL standard and PostgreSQL-implemented transaction isolation levels
+ are described in <xref linkend="mvcc-isolevel-table">.
</para>
<table tocentry="1" id="mvcc-isolevel-table">
- <title>Standard <acronym>SQL</acronym> Transaction Isolation Levels</title>
- <tgroup cols="4">
+ <title>Transaction Isolation Levels</title>
+ <tgroup cols="5">
<thead>
<row>
<entry>
@@ -171,6 +185,9 @@
<entry>
Phantom Read
</entry>
+ <entry>
+ Serialization Anomaly
+ </entry>
</row>
</thead>
<tbody>
@@ -179,6 +196,9 @@
Read uncommitted
</entry>
<entry>
+ Allowed, but not in PG
+ </entry>
+ <entry>
Possible
</entry>
<entry>
@@ -202,6 +222,9 @@
<entry>
Possible
</entry>
+ <entry>
+ Possible
+ </entry>
</row>
<row>
@@ -215,6 +238,9 @@
Not possible
</entry>
<entry>
+ Allowed, but not in PG
+ </entry>
+ <entry>
Possible
</entry>
</row>
@@ -232,27 +258,30 @@
<entry>
Not possible
</entry>
+ <entry>
+ Not possible
+ </entry>
</row>
</tbody>
</tgroup>
</table>
<para>
- In <productname>PostgreSQL</productname>, you can request any of the
- four standard transaction isolation levels. But internally, there are
- only three distinct isolation levels, which correspond to the levels Read
- Committed, Repeatable Read, and Serializable. When you select the level Read
- Uncommitted you really get Read Committed, and phantom reads are not possible
- in the <productname>PostgreSQL</productname> implementation of Repeatable
- Read, so the actual
- isolation level might be stricter than what you select. This is
- permitted by the SQL standard: the four isolation levels only
- define which phenomena must not happen, they do not define which
- phenomena must happen. The reason that <productname>PostgreSQL</>
- only provides three isolation levels is that this is the only
- sensible way to map the standard isolation levels to the multiversion
- concurrency control architecture. The behavior of the available
- isolation levels is detailed in the following subsections.
+ In <productname>PostgreSQL</productname>, you can request any of
+ the four standard transaction isolation levels, but internally only
+ three distinct isolation levels are implemented, i.e. PostgreSQL's
+ Read Uncommitted mode behaves like Read Committed. This is because
+ it is the only sensible way to map the standard isolation levels to
+ PostgreSQL's multiversion concurrency control architecture.
+ </para>
+
+ <para>
+ The table also shows that PostgreSQL's Repeatable Read implementation
+ does not allow phantom reads. Stricter behavior is permitted by the
+ SQL standard: the four isolation levels only define which phenomena
+ must not happen, not which phenomena <emphasis>must</> happen.
+ The behavior of the available isolation levels is detailed in the
+ following subsections.
</para>
<para>
@@ -326,8 +355,27 @@
</para>
<para>
- Because of the above rule, it is possible for an updating command to see an
- inconsistent snapshot: it can see the effects of concurrent updating
+ <command>INSERT</command> with an <literal>ON CONFLICT DO UPDATE</> clause
+ behaves similarly. In Read Committed mode, each row proposed for insertion
+ will either insert or update. Unless there are unrelated errors, one of
+ those two outcomes is guaranteed. If a conflict originates in another
+ transaction whose effects are not yet visible to the <command>INSERT
+ </command>, the <command>UPDATE</command> clause will affect that row,
+ even though possibly <emphasis>no</> version of that row is
+ conventionally visible to the command.
+ </para>
+
+ <para>
+ <command>INSERT</command> with an <literal>ON CONFLICT DO
+ NOTHING</> clause may have insertion not proceed for a row due to
+ the outcome of another transaction whose effects are not visible
+ to the <command>INSERT</command> snapshot. Again, this is only
+ the case in Read Committed mode.
+ </para>
+
+ <para>
+ Because of the above rules, it is possible for an updating command to see
+ an inconsistent snapshot: it can see the effects of concurrent updating
commands on the same rows it is trying to update, but it
does not see effects of those commands on other rows in the database.
This behavior makes Read Committed mode unsuitable for commands that
diff --git a/doc/src/sgml/pgcrypto.sgml b/doc/src/sgml/pgcrypto.sgml
index d409446c37..bfcbe02f85 100644
--- a/doc/src/sgml/pgcrypto.sgml
+++ b/doc/src/sgml/pgcrypto.sgml
@@ -1270,6 +1270,14 @@ gen_random_uuid() returns uuid
<para>
If you cannot, then better do crypto inside client application.
</para>
+
+ <para>
+ The implementation does not resist
+ <ulink url="http://en.wikipedia.org/wiki/Side-channel_attack">side-channel
+ attacks</ulink>. For example, the time required for
+ a <filename>pgcrypto</> decryption function to complete varies among
+ ciphertexts of a given size.
+ </para>
</sect3>
<sect3>
diff --git a/doc/src/sgml/pgstatstatements.sgml b/doc/src/sgml/pgstatstatements.sgml
index 04b3f01ed5..4d7a6e68ea 100644
--- a/doc/src/sgml/pgstatstatements.sgml
+++ b/doc/src/sgml/pgstatstatements.sgml
@@ -19,12 +19,22 @@
This means that a server restart is needed to add or remove the module.
</para>
+ <para>
+ When <filename>pg_stat_statements</filename> is loaded, it tracks
+ statistics across all databases of the server. To access and manipulate
+ these statistics, the module provides a view, <structname>pg_stat_statements</>,
+ and the utility functions <function>pg_stat_statements_reset</> and
+ <function>pg_stat_statements</>. These are not available globally but
+ can be enabled for a specific database with
+ <command>CREATE EXTENSION pg_stat_statements</>.
+ </para>
+
<sect2>
<title>The <structname>pg_stat_statements</structname> View</title>
<para>
The statistics gathered by the module are made available via a
- system view named <structname>pg_stat_statements</>. This view
+ view named <structname>pg_stat_statements</>. This view
contains one row for each distinct database ID, user ID and query
ID (up to the maximum number of distinct statements that the module
can track). The columns of the view are shown in
@@ -216,19 +226,9 @@
</table>
<para>
- This view, and the functions <function>pg_stat_statements_reset</>
- and <function>pg_stat_statements</>, are available only in
- databases they have been specifically installed into by installing
- the <literal>pg_stat_statements</> extension.
- However, statistics are tracked across all databases of the server
- whenever the <filename>pg_stat_statements</filename> module is loaded
- into the server, regardless of presence of the view.
- </para>
-
- <para>
For security reasons, non-superusers are not allowed to see the SQL
- text or <structfield>queryid</structfield> of queries executed by other users. They can see
- the statistics, however, if the view has been installed in their
+ text or <structfield>queryid</structfield> of queries executed by other users.
+ They can see the statistics, however, if the view has been installed in their
database.
</para>
diff --git a/doc/src/sgml/pgstattuple.sgml b/doc/src/sgml/pgstattuple.sgml
index 9cabd71166..40a9669c90 100644
--- a/doc/src/sgml/pgstattuple.sgml
+++ b/doc/src/sgml/pgstattuple.sgml
@@ -358,6 +358,140 @@ pending_tuples | 0
</listitem>
</varlistentry>
+ <varlistentry>
+ <term>
+ <indexterm>
+ <primary>pgstattuple_approx</primary>
+ </indexterm>
+ <function>pgstattuple_approx(regclass) returns record</>
+ </term>
+
+ <listitem>
+ <para>
+ <function>pgstattuple_approx</function> is a faster alternative to
+ <function>pgstattuple</function> that returns approximate results.
+ The argument is the target relation's OID.
+ For example:
+<programlisting>
+test=> SELECT * FROM pgstattuple_approx('pg_catalog.pg_proc'::regclass);
+-[ RECORD 1 ]--------+-------
+table_len | 573440
+scanned_percent | 2
+approx_tuple_count | 2740
+approx_tuple_len | 561210
+approx_tuple_percent | 97.87
+dead_tuple_count | 0
+dead_tuple_len | 0
+dead_tuple_percent | 0
+approx_free_space | 11996
+approx_free_percent | 2.09
+</programlisting>
+ The output columns are described in <xref linkend="pgstatapprox-columns">.
+ </para>
+
+ <para>
+ Whereas <function>pgstattuple</function> always performs a
+ full-table scan and returns an exact count of live and dead tuples
+ (and their sizes) and free space, <function>pgstattuple_approx</function>
+ tries to avoid the full-table scan and returns exact dead tuple
+ statistics along with an approximation of the number and
+ size of live tuples and free space.
+ </para>
+
+ <para>
+ It does this by skipping pages that have only visible tuples
+ according to the visibility map (if a page has the corresponding VM
+ bit set, then it is assumed to contain no dead tuples). For such
+ pages, it derives the free space value from the free space map, and
+ assumes that the rest of the space on the page is taken up by live
+ tuples.
+ </para>
+
+ <para>
+ For pages that cannot be skipped, it scans each tuple, recording its
+ presence and size in the appropriate counters, and adding up the
+ free space on the page. At the end, it estimates the total number of
+ live tuples based on the number of pages and tuples scanned (in the
+ same way that VACUUM estimates pg_class.reltuples).
+ </para>
+
+ <table id="pgstatapprox-columns">
+ <title><function>pgstattuple_approx</function> Output Columns</title>
+ <tgroup cols="3">
+ <thead>
+ <row>
+ <entry>Column</entry>
+ <entry>Type</entry>
+ <entry>Description</entry>
+ </row>
+ </thead>
+
+ <tbody>
+ <row>
+ <entry><structfield>table_len</structfield></entry>
+ <entry><type>bigint</type></entry>
+ <entry>Physical relation length in bytes (exact)</entry>
+ </row>
+ <row>
+ <entry><structfield>scanned_percent</structfield></entry>
+ <entry><type>float8</type></entry>
+ <entry>Percentage of table scanned</entry>
+ </row>
+ <row>
+ <entry><structfield>approx_tuple_count</structfield></entry>
+ <entry><type>bigint</type></entry>
+ <entry>Number of live tuples (estimated)</entry>
+ </row>
+ <row>
+ <entry><structfield>approx_tuple_len</structfield></entry>
+ <entry><type>bigint</type></entry>
+ <entry>Total length of live tuples in bytes (estimated)</entry>
+ </row>
+ <row>
+ <entry><structfield>approx_tuple_percent</structfield></entry>
+ <entry><type>float8</type></entry>
+ <entry>Percentage of live tuples</entry>
+ </row>
+ <row>
+ <entry><structfield>dead_tuple_count</structfield></entry>
+ <entry><type>bigint</type></entry>
+ <entry>Number of dead tuples (exact)</entry>
+ </row>
+ <row>
+ <entry><structfield>dead_tuple_len</structfield></entry>
+ <entry><type>bigint</type></entry>
+ <entry>Total length of dead tuples in bytes (exact)</entry>
+ </row>
+ <row>
+ <entry><structfield>dead_tuple_percent</structfield></entry>
+ <entry><type>float8</type></entry>
+ <entry>Percentage of dead tuples</entry>
+ </row>
+ <row>
+ <entry><structfield>approx_free_space</structfield></entry>
+ <entry><type>bigint</type></entry>
+ <entry>Total free space in bytes (estimated)</entry>
+ </row>
+ <row>
+ <entry><structfield>approx_free_percent</structfield></entry>
+ <entry><type>float8</type></entry>
+ <entry>Percentage of free space</entry>
+ </row>
+
+ </tbody>
+ </tgroup>
+ </table>
+
+ <para>
+ In the above output, the free space figures may not match the
+ <function>pgstattuple</function> output exactly, because the free
+ space map gives us an exact figure, but is not guaranteed to be
+ accurate to the byte.
+ </para>
+
+ </listitem>
+ </varlistentry>
+
</variablelist>
</sect2>
@@ -365,7 +499,7 @@ pending_tuples | 0
<title>Authors</title>
<para>
- Tatsuo Ishii and Satoshi Nagayasu
+ Tatsuo Ishii, Satoshi Nagayasu and Abhijit Menon-Sen
</para>
</sect2>
diff --git a/doc/src/sgml/pgtrgm.sgml b/doc/src/sgml/pgtrgm.sgml
index f66439523a..9eb2a6742e 100644
--- a/doc/src/sgml/pgtrgm.sgml
+++ b/doc/src/sgml/pgtrgm.sgml
@@ -168,11 +168,11 @@
<programlisting>
CREATE TABLE test_trgm (t text);
-CREATE INDEX trgm_idx ON test_trgm USING gist (t gist_trgm_ops);
+CREATE INDEX trgm_idx ON test_trgm USING GIST (t gist_trgm_ops);
</programlisting>
or
<programlisting>
-CREATE INDEX trgm_idx ON test_trgm USING gin (t gin_trgm_ops);
+CREATE INDEX trgm_idx ON test_trgm USING GIN (t gin_trgm_ops);
</programlisting>
</para>
@@ -274,7 +274,7 @@ CREATE TABLE words AS SELECT word FROM
Next, create a trigram index on the word column:
<programlisting>
-CREATE INDEX words_idx ON words USING gin(word gin_trgm_ops);
+CREATE INDEX words_idx ON words USING GIN (word gin_trgm_ops);
</programlisting>
Now, a <command>SELECT</command> query similar to the previous example can
diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml
index d36acf6d99..9a7763d18c 100644
--- a/doc/src/sgml/plpgsql.sgml
+++ b/doc/src/sgml/plpgsql.sgml
@@ -2623,7 +2623,11 @@ END;
<para>
This example uses exception handling to perform either
- <command>UPDATE</> or <command>INSERT</>, as appropriate:
+ <command>UPDATE</> or <command>INSERT</>, as appropriate. It is
+ recommended that applications use <command>INSERT</> with
+ <literal>ON CONFLICT DO UPDATE</> rather than actually using
+ this pattern. This example serves primarily to illustrate use of
+ <application>PL/pgSQL</application> control flow structures:
<programlisting>
CREATE TABLE db (a INT PRIMARY KEY, b TEXT);
@@ -3852,9 +3856,11 @@ ASSERT <replaceable class="parameter">condition</replaceable> <optional> , <repl
<command>INSERT</> and <command>UPDATE</> operations, the return value
should be <varname>NEW</>, which the trigger function may modify to
support <command>INSERT RETURNING</> and <command>UPDATE RETURNING</>
- (this will also affect the row value passed to any subsequent triggers).
- For <command>DELETE</> operations, the return value should be
- <varname>OLD</>.
+ (this will also affect the row value passed to any subsequent triggers,
+ or passed to a special <varname>EXCLUDED</> alias reference within
+ an <command>INSERT</> statement with an <literal>ON CONFLICT DO
+ UPDATE</> clause). For <command>DELETE</> operations, the return
+ value should be <varname>OLD</>.
</para>
<para>
diff --git a/doc/src/sgml/postgres-fdw.sgml b/doc/src/sgml/postgres-fdw.sgml
index 43adb61455..14b12e37dc 100644
--- a/doc/src/sgml/postgres-fdw.sgml
+++ b/doc/src/sgml/postgres-fdw.sgml
@@ -69,6 +69,14 @@
</para>
<para>
+ Note that <filename>postgres_fdw</> currently lacks support for
+ <command>INSERT</command> statements with an <literal>ON CONFLICT DO
+ UPDATE</> clause. However, the <literal>ON CONFLICT DO NOTHING</>
+ clause is supported, provided a unique index inference specification
+ is omitted.
+ </para>
+
+ <para>
It is generally recommended that the columns of a foreign table be declared
with exactly the same data types, and collations if applicable, as the
referenced columns of the remote table. Although <filename>postgres_fdw</>
@@ -301,8 +309,8 @@
using <xref linkend="sql-importforeignschema">. This command creates
foreign table definitions on the local server that match tables or
views present on the remote server. If the remote tables to be imported
- have columns of user-defined data types, the local server must have types
- of the same names.
+ have columns of user-defined data types, the local server must have
+ compatible types of the same names.
</para>
<para>
@@ -353,9 +361,16 @@
<para>
Note that constraints other than <literal>NOT NULL</> will never be
- imported from the remote tables, since <productname>PostgreSQL</>
- does not support any other type of constraint on a foreign table.
- Checking other types of constraints is always left to the remote server.
+ imported from the remote tables. Although <productname>PostgreSQL</>
+ does support <literal>CHECK</> constraints on foreign tables, there is no
+ provision for importing them automatically, because of the risk that a
+ constraint expression could evaluate differently on the local and remote
+ servers. Any such inconsistency in the behavior of a <literal>CHECK</>
+ constraint could lead to hard-to-detect errors in query optimization.
+ So if you wish to import <literal>CHECK</> constraints, you must do so
+ manually, and you should verify the semantics of each one carefully.
+ For more detail about the treatment of <literal>CHECK</> constraints on
+ foreign tables, see <xref linkend="sql-createforeigntable">.
</para>
</sect3>
</sect2>
diff --git a/doc/src/sgml/postgres.sgml b/doc/src/sgml/postgres.sgml
index e378d6978d..d1703e9c01 100644
--- a/doc/src/sgml/postgres.sgml
+++ b/doc/src/sgml/postgres.sgml
@@ -220,6 +220,7 @@
&spi;
&bgworker;
&logicaldecoding;
+ &replication-origins;
</part>
@@ -249,6 +250,7 @@
&spgist;
&gin;
&brin;
+ &tablesample-method;
&storage;
&bki;
&planstats;
diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml
index 3a753a0b9b..c7df697845 100644
--- a/doc/src/sgml/protocol.sgml
+++ b/doc/src/sgml/protocol.sgml
@@ -1882,7 +1882,7 @@ The commands accepted in walsender mode are:
</varlistentry>
<varlistentry>
- <term>BASE_BACKUP [<literal>LABEL</literal> <replaceable>'label'</replaceable>] [<literal>PROGRESS</literal>] [<literal>FAST</literal>] [<literal>WAL</literal>] [<literal>NOWAIT</literal>] [<literal>MAX_RATE</literal> <replaceable>rate</replaceable>]
+ <term>BASE_BACKUP [<literal>LABEL</literal> <replaceable>'label'</replaceable>] [<literal>PROGRESS</literal>] [<literal>FAST</literal>] [<literal>WAL</literal>] [<literal>NOWAIT</literal>] [<literal>MAX_RATE</literal> <replaceable>rate</replaceable>] [<literal>TABLESPACE_MAP</literal>]
<indexterm><primary>BASE_BACKUP</primary></indexterm>
</term>
<listitem>
@@ -1968,6 +1968,19 @@ The commands accepted in walsender mode are:
</para>
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term><literal>TABLESPACE_MAP</literal></term>
+ <listitem>
+ <para>
+ Include information about symbolic links present in the directory
+ <filename>pg_tblspc</filename> in a file named
+ <filename>tablespace_map</filename>. The tablespace map file includes
+ each symbolic link name as it exists in the directory
+ <filename>pg_tblspc/</> and the full path of that symbolic link.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
</para>
<para>
diff --git a/doc/src/sgml/queries.sgml b/doc/src/sgml/queries.sgml
index 7dbad462a5..ab49bd7e91 100644
--- a/doc/src/sgml/queries.sgml
+++ b/doc/src/sgml/queries.sgml
@@ -1183,6 +1183,181 @@ SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit
</para>
</sect2>
+ <sect2 id="queries-grouping-sets">
+ <title><literal>GROUPING SETS</>, <literal>CUBE</>, and <literal>ROLLUP</></title>
+
+ <indexterm zone="queries-grouping-sets">
+ <primary>GROUPING SETS</primary>
+ </indexterm>
+ <indexterm zone="queries-grouping-sets">
+ <primary>CUBE</primary>
+ </indexterm>
+ <indexterm zone="queries-grouping-sets">
+ <primary>ROLLUP</primary>
+ </indexterm>
+
+ <para>
+ More complex grouping operations than those described above are possible
+ using the concept of <firstterm>grouping sets</>. The data selected by
+ the <literal>FROM</> and <literal>WHERE</> clauses is grouped separately
+ by each specified grouping set, aggregates computed for each group just as
+ for simple <literal>GROUP BY</> clauses, and then the results returned.
+ For example:
+<screen>
+<prompt>=&gt;</> <userinput>SELECT * FROM items_sold;</>
+ brand | size | sales
+-------+------+-------
+ Foo | L | 10
+ Foo | M | 20
+ Bar | M | 15
+ Bar | L | 5
+(4 rows)
+
+<prompt>=&gt;</> <userinput>SELECT brand, size, sum(sales) FROM items_sold GROUP BY GROUPING SETS ((brand), (size), ());</>
+ brand | size | sum
+-------+------+-----
+ Foo | | 30
+ Bar | | 20
+ | L | 15
+ | M | 35
+ | | 50
+(5 rows)
+</screen>
+ </para>
+
+ <para>
+ Each sublist of <literal>GROUPING SETS</> may specify zero or more columns
+ or expressions and is interpreted the same way as though it were directly
+ in the <literal>GROUP BY</> clause. An empty grouping set means that all
+ rows are aggregated down to a single group (which is output even if no
+ input rows were present), as described above for the case of aggregate
+ functions with no <literal>GROUP BY</> clause.
+ </para>
+
+ <para>
+ References to the grouping columns or expressions are replaced
+ by <literal>NULL</> values in result rows for grouping sets in which those
+ columns do not appear. To distinguish which grouping a particular output
+ row resulted from, see <xref linkend="functions-grouping-table">.
+ </para>
+
+ <para>
+ A shorthand notation is provided for specifying two common types of grouping set.
+ A clause of the form
+<programlisting>
+ROLLUP ( <replaceable>e1</>, <replaceable>e2</>, <replaceable>e3</>, ... )
+</programlisting>
+ represents the given list of expressions and all prefixes of the list including
+ the empty list; thus it is equivalent to
+<programlisting>
+GROUPING SETS (
+ ( <replaceable>e1</>, <replaceable>e2</>, <replaceable>e3</>, ... ),
+ ...
+ ( <replaceable>e1</>, <replaceable>e2</> )
+ ( <replaceable>e1</> )
+ ( )
+)
+</programlisting>
+ This is commonly used for analysis over hierarchical data; e.g. total
+ salary by department, division, and company-wide total.
+ </para>
+
+ <para>
+ A clause of the form
+<programlisting>
+CUBE ( <replaceable>e1</>, <replaceable>e2</>, ... )
+</programlisting>
+ represents the given list and all of its possible subsets (i.e. the power
+ set). Thus
+<programlisting>
+CUBE ( a, b, c )
+</programlisting>
+ is equivalent to
+<programlisting>
+GROUPING SETS (
+ ( a, b, c ),
+ ( a, b ),
+ ( a, c ),
+ ( a ),
+ ( b, c ),
+ ( b ),
+ ( c ),
+ ( ),
+)
+</programlisting>
+ </para>
+
+ <para>
+ The individual elements of a <literal>CUBE</> or <literal>ROLLUP</>
+ clause may be either individual expressions, or sub-lists of elements in
+ parentheses. In the latter case, the sub-lists are treated as single
+ units for the purposes of generating the individual grouping sets.
+ For example:
+<programlisting>
+CUBE ( (a,b), (c,d) )
+</programlisting>
+ is equivalent to
+<programlisting>
+GROUPING SETS (
+ ( a, b, c, d )
+ ( a, b )
+ ( c, d )
+ ( )
+)
+</programlisting>
+ and
+<programlisting>
+ROLLUP ( a, (b,c), d )
+</programlisting>
+ is equivalent to
+<programlisting>
+GROUPING SETS (
+ ( a, b, c, d )
+ ( a, b, c )
+ ( a )
+ ( )
+)
+</programlisting>
+ </para>
+
+ <para>
+ The <literal>CUBE</> and <literal>ROLLUP</> constructs can be used either
+ directly in the <literal>GROUP BY</> clause, or nested inside a
+ <literal>GROUPING SETS</> clause. If one <literal>GROUPING SETS</> clause
+ is nested inside another, the effect is the same as if all the elements of
+ the inner clause had been written directly in the outer clause.
+ </para>
+
+ <para>
+ If multiple grouping items are specified in a single <literal>GROUP BY</>
+ clause, then the final list of grouping sets is the cross product of the
+ individual items. For example:
+<programlisting>
+GROUP BY a, CUBE(b,c), GROUPING SETS ((d), (e))
+</programlisting>
+ is equivalent to
+<programlisting>
+GROUP BY GROUPING SETS (
+ (a,b,c,d), (a,b,c,e),
+ (a,b,d), (a,b,e),
+ (a,c,d), (a,c,e),
+ (a,d), (a,e)
+)
+</programlisting>
+ </para>
+
+ <note>
+ <para>
+ The construct <literal>(a,b)</> is normally recognized in expressions as
+ a <link linkend="sql-syntax-row-constructors">row constructor</link>.
+ Within the <literal>GROUP BY</> clause, this does not apply at the top
+ levels of expressions, and <literal>(a,b)</> is parsed as a list of
+ expressions as described above. If for some reason you <emphasis>need</>
+ a row constructor in a grouping expression, use <literal>ROW(a,b)</>.
+ </para>
+ </note>
+ </sect2>
+
<sect2 id="queries-window">
<title>Window Function Processing</title>
diff --git a/doc/src/sgml/rangetypes.sgml b/doc/src/sgml/rangetypes.sgml
index d1125618b4..260545711b 100644
--- a/doc/src/sgml/rangetypes.sgml
+++ b/doc/src/sgml/rangetypes.sgml
@@ -406,7 +406,7 @@ SELECT '[1.234, 5.678]'::floatrange;
GiST and SP-GiST indexes can be created for table columns of range types.
For instance, to create a GiST index:
<programlisting>
-CREATE INDEX reservation_idx ON reservation USING gist (during);
+CREATE INDEX reservation_idx ON reservation USING GIST (during);
</programlisting>
A GiST or SP-GiST index can accelerate queries involving these range operators:
<literal>=</>,
@@ -453,7 +453,7 @@ CREATE INDEX reservation_idx ON reservation USING gist (during);
<programlisting>
CREATE TABLE reservation (
during tsrange,
- EXCLUDE USING gist (during WITH &amp;&amp;)
+ EXCLUDE USING GIST (during WITH &amp;&amp;)
);
</programlisting>
@@ -486,7 +486,7 @@ CREATE EXTENSION btree_gist;
CREATE TABLE room_reservation (
room text,
during tsrange,
- EXCLUDE USING gist (room WITH =, during WITH &amp;&amp;)
+ EXCLUDE USING GIST (room WITH =, during WITH &amp;&amp;)
);
INSERT INTO room_reservation VALUES
diff --git a/doc/src/sgml/ref/allfiles.sgml b/doc/src/sgml/ref/allfiles.sgml
index 211a3c42bd..bf95453b6c 100644
--- a/doc/src/sgml/ref/allfiles.sgml
+++ b/doc/src/sgml/ref/allfiles.sgml
@@ -79,6 +79,7 @@ Complete list of usable sgml source files in this directory.
<!ENTITY createTable SYSTEM "create_table.sgml">
<!ENTITY createTableAs SYSTEM "create_table_as.sgml">
<!ENTITY createTableSpace SYSTEM "create_tablespace.sgml">
+<!ENTITY createTransform SYSTEM "create_transform.sgml">
<!ENTITY createTrigger SYSTEM "create_trigger.sgml">
<!ENTITY createTSConfig SYSTEM "create_tsconfig.sgml">
<!ENTITY createTSDictionary SYSTEM "create_tsdictionary.sgml">
@@ -120,6 +121,7 @@ Complete list of usable sgml source files in this directory.
<!ENTITY dropServer SYSTEM "drop_server.sgml">
<!ENTITY dropTable SYSTEM "drop_table.sgml">
<!ENTITY dropTableSpace SYSTEM "drop_tablespace.sgml">
+<!ENTITY dropTransform SYSTEM "drop_transform.sgml">
<!ENTITY dropTrigger SYSTEM "drop_trigger.sgml">
<!ENTITY dropTSConfig SYSTEM "drop_tsconfig.sgml">
<!ENTITY dropTSDictionary SYSTEM "drop_tsdictionary.sgml">
@@ -193,7 +195,10 @@ Complete list of usable sgml source files in this directory.
<!ENTITY pgResetxlog SYSTEM "pg_resetxlog.sgml">
<!ENTITY pgRestore SYSTEM "pg_restore.sgml">
<!ENTITY pgRewind SYSTEM "pg_rewind.sgml">
+<!ENTITY pgtestfsync SYSTEM "pgtestfsync.sgml">
+<!ENTITY pgtesttiming SYSTEM "pgtesttiming.sgml">
<!ENTITY pgupgrade SYSTEM "pgupgrade.sgml">
+<!ENTITY pgxlogdump SYSTEM "pg_xlogdump.sgml">
<!ENTITY postgres SYSTEM "postgres-ref.sgml">
<!ENTITY postmaster SYSTEM "postmaster.sgml">
<!ENTITY psqlRef SYSTEM "psql-ref.sgml">
diff --git a/doc/src/sgml/ref/alter_extension.sgml b/doc/src/sgml/ref/alter_extension.sgml
index 0d479c8ca2..7141ee352e 100644
--- a/doc/src/sgml/ref/alter_extension.sgml
+++ b/doc/src/sgml/ref/alter_extension.sgml
@@ -52,6 +52,7 @@ ALTER EXTENSION <replaceable class="PARAMETER">name</replaceable> DROP <replacea
TEXT SEARCH DICTIONARY <replaceable class="PARAMETER">object_name</replaceable> |
TEXT SEARCH PARSER <replaceable class="PARAMETER">object_name</replaceable> |
TEXT SEARCH TEMPLATE <replaceable class="PARAMETER">object_name</replaceable> |
+ TRANSFORM FOR <replaceable>type_name</replaceable> LANGUAGE <replaceable>lang_name</replaceable> |
TYPE <replaceable class="PARAMETER">object_name</replaceable> |
VIEW <replaceable class="PARAMETER">object_name</replaceable>
@@ -259,6 +260,26 @@ ALTER EXTENSION <replaceable class="PARAMETER">name</replaceable> DROP <replacea
</para>
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term><replaceable>type_name</replaceable></term>
+
+ <listitem>
+ <para>
+ The name of the data type of the transform.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable>lang_name</replaceable></term>
+
+ <listitem>
+ <para>
+ The name of the language of the transform.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
</para>
</refsect1>
diff --git a/doc/src/sgml/ref/alter_foreign_table.sgml b/doc/src/sgml/ref/alter_foreign_table.sgml
index ace0040a9b..4329d43a1e 100644
--- a/doc/src/sgml/ref/alter_foreign_table.sgml
+++ b/doc/src/sgml/ref/alter_foreign_table.sgml
@@ -169,7 +169,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceab
</varlistentry>
<varlistentry>
- <term><literal>ADD <replaceable class="PARAMETER">table_constraint</replaceable></literal> [ NOT VALID ]</term>
+ <term><literal>ADD <replaceable class="PARAMETER">table_constraint</replaceable> [ NOT VALID ]</literal></term>
<listitem>
<para>
This form adds a new constraint to a foreign table, using the same
@@ -541,7 +541,7 @@ ALTER FOREIGN TABLE distributors ALTER COLUMN street SET NOT NULL;
<para>
To change options of a foreign table:
<programlisting>
-ALTER FOREIGN TABLE myschema.distributors OPTIONS (ADD opt1 'value', SET opt2, 'value2', DROP opt3 'value3');
+ALTER FOREIGN TABLE myschema.distributors OPTIONS (ADD opt1 'value', SET opt2 'value2', DROP opt3 'value3');
</programlisting></para>
</refsect1>
diff --git a/doc/src/sgml/ref/alter_policy.sgml b/doc/src/sgml/ref/alter_policy.sgml
index 6d03db5547..2e63206014 100644
--- a/doc/src/sgml/ref/alter_policy.sgml
+++ b/doc/src/sgml/ref/alter_policy.sgml
@@ -23,7 +23,7 @@ PostgreSQL documentation
<synopsis>
ALTER POLICY <replaceable class="parameter">name</replaceable> ON <replaceable class="parameter">table_name</replaceable>
[ RENAME TO <replaceable class="PARAMETER">new_name</replaceable> ]
- [ TO { <replaceable class="parameter">role_name</replaceable> | PUBLIC } [, ...] ]
+ [ TO { <replaceable class="parameter">role_name</replaceable> | PUBLIC | CURRENT_USER | SESSION_USER } [, ...] ]
[ USING ( <replaceable class="parameter">using_expression</replaceable> ) ]
[ WITH CHECK ( <replaceable class="parameter">check_expression</replaceable> ) ]
</synopsis>
diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index 6a82730a4b..207fec1758 100644
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -66,7 +66,7 @@ ALTER TABLE ALL IN TABLESPACE <replaceable class="PARAMETER">name</replaceable>
SET WITH OIDS
SET WITHOUT OIDS
SET TABLESPACE <replaceable class="PARAMETER">new_tablespace</replaceable>
- SET {LOGGED | UNLOGGED}
+ SET { LOGGED | UNLOGGED }
SET ( <replaceable class="PARAMETER">storage_parameter</replaceable> = <replaceable class="PARAMETER">value</replaceable> [, ... ] )
RESET ( <replaceable class="PARAMETER">storage_parameter</replaceable> [, ... ] )
INHERIT <replaceable class="PARAMETER">parent_table</replaceable>
@@ -74,7 +74,7 @@ ALTER TABLE ALL IN TABLESPACE <replaceable class="PARAMETER">name</replaceable>
OF <replaceable class="PARAMETER">type_name</replaceable>
NOT OF
OWNER TO { <replaceable class="PARAMETER">new_owner</replaceable> | CURRENT_USER | SESSION_USER }
- REPLICA IDENTITY {DEFAULT | USING INDEX <replaceable class="PARAMETER">index_name</replaceable> | FULL | NOTHING}
+ REPLICA IDENTITY { DEFAULT | USING INDEX <replaceable class="PARAMETER">index_name</replaceable> | FULL | NOTHING }
<phrase>and <replaceable class="PARAMETER">table_constraint_using_index</replaceable> is:</phrase>
@@ -122,16 +122,6 @@ ALTER TABLE ALL IN TABLESPACE <replaceable class="PARAMETER">name</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>IF EXISTS</literal></term>
- <listitem>
- <para>
- Do not throw an error if the table does not exist. A notice is issued
- in this case.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><literal>SET DATA TYPE</literal></term>
<listitem>
<para>
@@ -526,7 +516,7 @@ ALTER TABLE ALL IN TABLESPACE <replaceable class="PARAMETER">name</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>SET {LOGGED | UNLOGGED}</literal></term>
+ <term><literal>SET { LOGGED | UNLOGGED }</literal></term>
<listitem>
<para>
This form changes the table from unlogged to logged or vice-versa
@@ -727,6 +717,16 @@ ALTER TABLE ALL IN TABLESPACE <replaceable class="PARAMETER">name</replaceable>
<variablelist>
<varlistentry>
+ <term><literal>IF EXISTS</literal></term>
+ <listitem>
+ <para>
+ Do not throw an error if the table does not exist. A notice is issued
+ in this case.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><replaceable class="PARAMETER">name</replaceable></term>
<listitem>
<para>
diff --git a/doc/src/sgml/ref/comment.sgml b/doc/src/sgml/ref/comment.sgml
index 62e1968c08..656f5aae5b 100644
--- a/doc/src/sgml/ref/comment.sgml
+++ b/doc/src/sgml/ref/comment.sgml
@@ -55,6 +55,7 @@ COMMENT ON
TEXT SEARCH DICTIONARY <replaceable class="PARAMETER">object_name</replaceable> |
TEXT SEARCH PARSER <replaceable class="PARAMETER">object_name</replaceable> |
TEXT SEARCH TEMPLATE <replaceable class="PARAMETER">object_name</replaceable> |
+ TRANSFORM FOR <replaceable>type_name</replaceable> LANGUAGE <replaceable>lang_name</replaceable> |
TRIGGER <replaceable class="PARAMETER">trigger_name</replaceable> ON <replaceable class="PARAMETER">table_name</replaceable> |
TYPE <replaceable class="PARAMETER">object_name</replaceable> |
VIEW <replaceable class="PARAMETER">object_name</replaceable>
@@ -225,6 +226,26 @@ COMMENT ON
</listitem>
</varlistentry>
+ <varlistentry>
+ <term><replaceable>type_name</replaceable></term>
+
+ <listitem>
+ <para>
+ The name of the data type of the transform.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable>lang_name</replaceable></term>
+
+ <listitem>
+ <para>
+ The name of the language of the transform.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry>
<term><replaceable class="parameter">text</replaceable></term>
<listitem>
@@ -305,6 +326,7 @@ COMMENT ON TEXT SEARCH CONFIGURATION my_config IS 'Special word filtering';
COMMENT ON TEXT SEARCH DICTIONARY swedish IS 'Snowball stemmer for Swedish language';
COMMENT ON TEXT SEARCH PARSER my_parser IS 'Splits text into words';
COMMENT ON TEXT SEARCH TEMPLATE snowball IS 'Snowball stemmer';
+COMMENT ON TRANSFORM FOR hstore LANGUAGE plpythonu IS 'Transform between hstore and Python dict';
COMMENT ON TRIGGER my_trigger ON my_table IS 'Used for RI';
COMMENT ON TYPE complex IS 'Complex number data type';
COMMENT ON VIEW my_view IS 'View of departmental costs';
diff --git a/doc/src/sgml/ref/create_foreign_table.sgml b/doc/src/sgml/ref/create_foreign_table.sgml
index abadd83fc3..413b033cb5 100644
--- a/doc/src/sgml/ref/create_foreign_table.sgml
+++ b/doc/src/sgml/ref/create_foreign_table.sgml
@@ -32,13 +32,13 @@ CREATE FOREIGN TABLE [ IF NOT EXISTS ] <replaceable class="PARAMETER">table_name
[ CONSTRAINT <replaceable class="PARAMETER">constraint_name</replaceable> ]
{ NOT NULL |
NULL |
- CHECK ( <replaceable class="PARAMETER">expression</replaceable> ) |
+ CHECK ( <replaceable class="PARAMETER">expression</replaceable> ) [ NO INHERIT ] |
DEFAULT <replaceable>default_expr</replaceable> }
<phrase>and <replaceable class="PARAMETER">table_constraint</replaceable> is:</phrase>
[ CONSTRAINT <replaceable class="PARAMETER">constraint_name</replaceable> ]
-CHECK ( <replaceable class="PARAMETER">expression</replaceable> )
+CHECK ( <replaceable class="PARAMETER">expression</replaceable> ) [ NO INHERIT ]
</synopsis>
</refsynopsisdiv>
diff --git a/doc/src/sgml/ref/create_function.sgml b/doc/src/sgml/ref/create_function.sgml
index 2001921535..c5beb166cf 100644
--- a/doc/src/sgml/ref/create_function.sgml
+++ b/doc/src/sgml/ref/create_function.sgml
@@ -25,6 +25,7 @@ CREATE [ OR REPLACE ] FUNCTION
[ RETURNS <replaceable class="parameter">rettype</replaceable>
| RETURNS TABLE ( <replaceable class="parameter">column_name</replaceable> <replaceable class="parameter">column_type</replaceable> [, ...] ) ]
{ LANGUAGE <replaceable class="parameter">lang_name</replaceable>
+ | TRANSFORM { FOR TYPE <replaceable class="parameter">type_name</replaceable> } [, ... ]
| WINDOW
| IMMUTABLE | STABLE | VOLATILE | [ NOT ] LEAKPROOF
| CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT
@@ -261,6 +262,23 @@ CREATE [ OR REPLACE ] FUNCTION
</varlistentry>
<varlistentry>
+ <term><literal>TRANSFORM { FOR TYPE <replaceable class="parameter">type_name</replaceable> } [, ... ] }</literal></term>
+
+ <listitem>
+ <para>
+ Lists which transforms a call to the function should apply. Transforms
+ convert between SQL types and language-specific data types;
+ see <xref linkend="sql-createtransform">. Procedural language
+ implementations usually have hardcoded knowledge of the built-in types,
+ so those don't need to be listed here. If a procedural language
+ implementation does not know how to handle a type and no transform is
+ supplied, it will fall back to a default behavior for converting data
+ types, but this depends on the implementation.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><literal>WINDOW</literal></term>
<listitem>
diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml
index 6b2ee28181..ce36a1ba48 100644
--- a/doc/src/sgml/ref/create_index.sgml
+++ b/doc/src/sgml/ref/create_index.sgml
@@ -637,7 +637,7 @@ CREATE UNIQUE INDEX title_idx ON films (title) WITH (fillfactor = 70);
<para>
To create a <acronym>GIN</> index with fast updates disabled:
<programlisting>
-CREATE INDEX gin_idx ON documents_table USING gin (locations) WITH (fastupdate = off);
+CREATE INDEX gin_idx ON documents_table USING GIN (locations) WITH (fastupdate = off);
</programlisting>
</para>
diff --git a/doc/src/sgml/ref/create_policy.sgml b/doc/src/sgml/ref/create_policy.sgml
index 868a6c1cd3..e826984633 100644
--- a/doc/src/sgml/ref/create_policy.sgml
+++ b/doc/src/sgml/ref/create_policy.sgml
@@ -23,7 +23,7 @@ PostgreSQL documentation
<synopsis>
CREATE POLICY <replaceable class="parameter">name</replaceable> ON <replaceable class="parameter">table_name</replaceable>
[ FOR { ALL | SELECT | INSERT | UPDATE | DELETE } ]
- [ TO { <replaceable class="parameter">role_name</replaceable> | PUBLIC } [, ...] ]
+ [ TO { <replaceable class="parameter">role_name</replaceable> | PUBLIC | CURRENT_USER | SESSION_USER } [, ...] ]
[ USING ( <replaceable class="parameter">using_expression</replaceable> ) ]
[ WITH CHECK ( <replaceable class="parameter">check_expression</replaceable> ) ]
</synopsis>
@@ -61,6 +61,14 @@ CREATE POLICY <replaceable class="parameter">name</replaceable> ON <replaceable
</para>
<para>
+ For INSERT and UPDATE queries, WITH CHECK expressions are enforced after
+ BEFORE triggers are fired, and before any data modifications are made.
+ Thus a BEFORE ROW trigger may modify the data to be inserted, affecting
+ the result of the security policy check. WITH CHECK expressions are
+ enforced before any other constraints.
+ </para>
+
+ <para>
Policy names are per-table, therefore one policy name can be used for many
different tables and have a definition for each table which is appropriate to
that table.
@@ -70,11 +78,13 @@ CREATE POLICY <replaceable class="parameter">name</replaceable> ON <replaceable
Policies can be applied for specific commands or for specific roles. The
default for newly created policies is that they apply for all commands and
roles, unless otherwise specified. If multiple policies apply to a given
- query, they will be combined using OR. Further, for commands which can have
- both USING and WITH CHECK policies (ALL and UPDATE), if no WITH CHECK policy
- is defined then the USING policy will be used for both what rows are visible
- (normal USING case) and which rows will be allowed to be added (WITH CHECK
- case).
+ query, they will be combined using OR (although <literal>ON CONFLICT DO
+ UPDATE</> and <literal>INSERT</> policies are not combined in this way, but
+ rather enforced as noted at each stage of <literal>ON CONFLICT</> execution).
+ Further, for commands which can have both USING and WITH CHECK policies (ALL
+ and UPDATE), if no WITH CHECK policy is defined then the USING policy will be
+ used for both what rows are visible (normal USING case) and which rows will
+ be allowed to be added (WITH CHECK case).
</para>
<para>
@@ -255,6 +265,12 @@ CREATE POLICY <replaceable class="parameter">name</replaceable> ON <replaceable
as it only ever applies in cases where records are being added to the
relation.
</para>
+ <para>
+ Note that <literal>INSERT</literal> with <literal>ON CONFLICT DO
+ UPDATE</literal> requires that any <literal>INSERT</literal> policy
+ WITH CHECK expression passes for any rows appended to the relation by
+ the INSERT path only.
+ </para>
</listitem>
</varlistentry>
@@ -263,22 +279,39 @@ CREATE POLICY <replaceable class="parameter">name</replaceable> ON <replaceable
<listitem>
<para>
Using <literal>UPDATE</literal> for a policy means that it will apply
- to <literal>UPDATE</literal> commands. As <literal>UPDATE</literal>
- involves pulling an existing record and then making changes to some
- portion (but possibly not all) of the record, the
- <literal>UPDATE</literal> policy accepts both a USING expression and
- a WITH CHECK expression. The USING expression will be used to
- determine which records the <literal>UPDATE</literal> command will
- see to operate against, while the <literal>WITH CHECK</literal>
- expression defines what rows are allowed to be added back into the
- relation (similar to the <literal>INSERT</literal> policy).
- Any rows whose resulting values do not pass the
- <literal>WITH CHECK</literal> expression will cause an ERROR and the
- entire command will be aborted. Note that if only a
- <literal>USING</literal> clause is specified then that clause will be
- used for both <literal>USING</literal> and
+ to <literal>UPDATE</literal> commands (or auxiliary <literal>ON
+ CONFLICT DO UPDATE</literal> clauses of <literal>INSERT</literal>
+ commands). As <literal>UPDATE</literal> involves pulling an existing
+ record and then making changes to some portion (but possibly not all)
+ of the record, the <literal>UPDATE</literal> policy accepts both a
+ <literal>USING</literal> expression and a <literal>WITH CHECK</literal>
+ expression. The <literal>USING</literal> expression will be used to
+ determine which records the <literal>UPDATE</literal> command will see
+ to operate against, while the <literal>WITH CHECK</literal> expression
+ defines what rows are allowed to be added back into the relation
+ (similar to the <literal>INSERT</literal> policy). Any rows whose
+ resulting values do not pass the <literal>WITH CHECK</literal>
+ expression will cause an ERROR and the entire command will be aborted.
+ Note that if only a <literal>USING</literal> clause is specified then
+ that clause will be used for both <literal>USING</literal> and
<literal>WITH CHECK</literal> cases.
</para>
+ <para>
+ Note, however, that <literal>INSERT</literal> with <literal>ON CONFLICT
+ DO UPDATE</literal> requires that an <literal>UPDATE</literal> policy
+ <literal>USING</literal> expression always be enforced as a
+ <literal>WITH CHECK</literal> expression. This
+ <literal>UPDATE</literal> policy must always pass when the
+ <literal>UPDATE</literal> path is taken. Any existing row that
+ necessitates that the <literal>UPDATE</literal> path be taken must pass
+ the (UPDATE or ALL) <literal>USING</literal> qualifications (combined
+ using <literal>OR</literal>), which are always enforced as WTIH CHECK
+ options in this context (the <literal>UPDATE</literal> path will
+ <emphasis>never</> be silently avoided; an error will be thrown
+ instead). Finally, the final row appended to the relation must pass
+ any <literal>WITH CHECK</literal> options that a conventional
+ <literal>UPDATE</literal> is required to pass.
+ </para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/create_rule.sgml b/doc/src/sgml/ref/create_rule.sgml
index 677766a2d5..53fdf56621 100644
--- a/doc/src/sgml/ref/create_rule.sgml
+++ b/doc/src/sgml/ref/create_rule.sgml
@@ -136,7 +136,11 @@ CREATE [ OR REPLACE ] RULE <replaceable class="parameter">name</replaceable> AS
<para>
The event is one of <literal>SELECT</literal>,
<literal>INSERT</literal>, <literal>UPDATE</literal>, or
- <literal>DELETE</literal>.
+ <literal>DELETE</literal>. Note that an
+ <command>INSERT</command> containing an <literal>ON
+ CONFLICT</literal> clause cannot be used on tables that have
+ either <literal>INSERT</literal> or <literal>UPDATE</literal>
+ rules. Consider using an updatable view instead.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index be7ebd5f54..fac7e1ec5e 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -717,7 +717,9 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
<literal>EXCLUDE</>, and
<literal>REFERENCES</> (foreign key) constraints accept this
clause. <literal>NOT NULL</> and <literal>CHECK</> constraints are not
- deferrable.
+ deferrable. Note that deferrable constraints cannot be used as
+ conflict arbitrators in an <command>INSERT</command> statement that
+ includes an <literal>ON CONFLICT DO UPDATE</> clause.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/create_tablespace.sgml b/doc/src/sgml/ref/create_tablespace.sgml
index 9072d077cd..5756c3e080 100644
--- a/doc/src/sgml/ref/create_tablespace.sgml
+++ b/doc/src/sgml/ref/create_tablespace.sgml
@@ -22,7 +22,7 @@ PostgreSQL documentation
<refsynopsisdiv>
<synopsis>
CREATE TABLESPACE <replaceable class="parameter">tablespace_name</replaceable>
- [ OWNER <replaceable class="parameter">user_name</replaceable> ]
+ [ OWNER { <replaceable>new_owner</replaceable> | CURRENT_USER | SESSION_USER } ]
LOCATION '<replaceable class="parameter">directory</replaceable>'
[ WITH ( <replaceable class="PARAMETER">tablespace_option</replaceable> = <replaceable class="PARAMETER">value</replaceable> [, ... ] ) ]
</synopsis>
diff --git a/doc/src/sgml/ref/create_transform.sgml b/doc/src/sgml/ref/create_transform.sgml
new file mode 100644
index 0000000000..d321dad7a5
--- /dev/null
+++ b/doc/src/sgml/ref/create_transform.sgml
@@ -0,0 +1,207 @@
+<!-- doc/src/sgml/ref/create_transform.sgml -->
+
+<refentry id="SQL-CREATETRANSFORM">
+ <indexterm zone="sql-createtransform">
+ <primary>CREATE TRANSFORM</primary>
+ </indexterm>
+
+ <refmeta>
+ <refentrytitle>CREATE TRANSFORM</refentrytitle>
+ <manvolnum>7</manvolnum>
+ <refmiscinfo>SQL - Language Statements</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>CREATE TRANSFORM</refname>
+ <refpurpose>define a new transform</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+<synopsis>
+CREATE [ OR REPLACE ] TRANSFORM FOR <replaceable>type_name</replaceable> LANGUAGE <replaceable>lang_name</replaceable> (
+ FROM SQL WITH FUNCTION <replaceable>from_sql_function_name</replaceable> (<replaceable>argument_type</replaceable> [, ...]),
+ TO SQL WITH FUNCTION <replaceable>to_sql_function_name</replaceable> (<replaceable>argument_type</replaceable> [, ...])
+);
+</synopsis>
+ </refsynopsisdiv>
+
+ <refsect1 id="sql-createtransform-description">
+ <title>Description</title>
+
+ <para>
+ <command>CREATE TRANSFORM</command> defines a new transform.
+ <command>CREATE OR REPLACE TRANSFORM</command> will either create a new
+ transform, or replace an existing definition.
+ </para>
+
+ <para>
+ A transform specifies how to adapt a data type to a procedural language.
+ For example, when writing a function in PL/Python using
+ the <type>hstore</type> type, PL/Python has no prior knowledge how to
+ present <type>hstore</type> values in the Python environment. Language
+ implementations usually default to using the text representation, but that
+ is inconvenient when, for example, an associative array or a list would be
+ more appropriate.
+ </para>
+
+ <para>
+ A transform specifies two functions:
+ <itemizedlist>
+ <listitem>
+ <para>
+ A <quote>from SQL</quote> function that converts the type from the SQL
+ environment to the language. This function will be invoked on the
+ arguments of a function written in the language.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ A <quote>to SQL</quote> function that converts the type from the
+ language to the SQL environment. This function will be invoked on the
+ return value of a function written in the language.
+ </para>
+ </listitem>
+ </itemizedlist>
+ It is not necessary to provide both of these functions. If one is not
+ specified, the language-specific default behavior will be used if
+ necessary. (To prevent a transformation in a certain direction from
+ happening at all, you could also write a transform function that always
+ errors out.)
+ </para>
+
+ <para>
+ To be able to create a transform, you must own and
+ have <literal>USAGE</literal> privilege on the type, have
+ <literal>USAGE</literal> privilege on the language, and own and
+ have <literal>EXECUTE</literal> privilege on the from-SQL and to-SQL
+ functions, if specified.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>Parameters</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><replaceable>type_name</replaceable></term>
+
+ <listitem>
+ <para>
+ The name of the data type of the transform.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable>lang_name</replaceable></term>
+
+ <listitem>
+ <para>
+ The name of the language of the transform.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable>from_sql_function_name</replaceable>(<replaceable>argument_type</replaceable> [, ...])</term>
+
+ <listitem>
+ <para>
+ The name of the function for converting the type from the SQL
+ environment to the language. It must take one argument of
+ type <type>internal</type> and return type <type>internal</type>. The
+ actual argument will be of the type for the transform, and the function
+ should be coded as if it were. (But it is not allowed to declare an
+ SQL-level function function returning <type>internal</type> without at
+ least one argument of type <type>internal</type>.) The actual return
+ value will be something specific to the language implementation.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable>to_sql_function_name</replaceable>(<replaceable>argument_type</replaceable> [, ...])</term>
+
+ <listitem>
+ <para>
+ The name of the function for converting the type from the language to
+ the SQL environment. It must take one argument of type
+ <type>internal</type> and return the type that is the type for the
+ transform. The actual argument value will be something specific to the
+ language implementation.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1 id="sql-createtransform-notes">
+ <title>Notes</title>
+
+ <para>
+ Use <xref linkend="sql-droptransform"> to remove transforms.
+ </para>
+ </refsect1>
+
+ <refsect1 id="sql-createtransform-examples">
+ <title>Examples</title>
+
+ <para>
+ To create a transform for type <type>hstore</type> and language
+ <literal>plpythonu</literal>, first set up the type and the language:
+<programlisting>
+CREATE TYPE hstore ...;
+
+CREATE LANGUAGE plpythonu ...;
+</programlisting>
+ Then create the necessary functions:
+<programlisting>
+CREATE FUNCTION hstore_to_plpython(val internal) RETURNS internal
+LANGUAGE C STRICT IMMUTABLE
+AS ...;
+
+CREATE FUNCTION plpython_to_hstore(val internal) RETURNS hstore
+LANGUAGE C STRICT IMMUTABLE
+AS ...;
+</programlisting>
+ And finally create the transform to connect them all together:
+<programlisting>
+CREATE TRANSFORM FOR hstore LANGUAGE plpythonu (
+ FROM SQL WITH FUNCTION hstore_to_plpython(internal),
+ TO SQL WITH FUNCTION plpython_to_hstore(internal)
+);
+</programlisting>
+ In practice, these commands would be wrapped up in extensions.
+ </para>
+
+ <para>
+ The <filename>contrib</filename> section contains a number of extensions
+ that provide transforms, which can serve as real-world examples.
+ </para>
+ </refsect1>
+
+ <refsect1 id="sql-createtransform-compat">
+ <title>Compatibility</title>
+
+ <para>
+ This form of <command>CREATE TRANSFORM</command> is a
+ <productname>PostgreSQL</productname> extension. There is a <command>CREATE
+ TRANSFORM</command> command in the <acronym>SQL</acronym> standard, but it
+ is for adapting data types to client languages. That usage is not supported
+ by <productname>PostgreSQL</productname>.
+ </para>
+ </refsect1>
+
+ <refsect1 id="sql-createtransform-seealso">
+ <title>See Also</title>
+
+ <para>
+ <xref linkend="sql-createfunction">,
+ <xref linkend="sql-createlanguage">,
+ <xref linkend="sql-createtype">,
+ <xref linkend="sql-droptransform">
+ </para>
+ </refsect1>
+
+</refentry>
diff --git a/doc/src/sgml/ref/create_trigger.sgml b/doc/src/sgml/ref/create_trigger.sgml
index aae0b41cd2..4bde815012 100644
--- a/doc/src/sgml/ref/create_trigger.sgml
+++ b/doc/src/sgml/ref/create_trigger.sgml
@@ -76,7 +76,10 @@ CREATE [ CONSTRAINT ] TRIGGER <replaceable class="PARAMETER">name</replaceable>
executes once for any given operation, regardless of how many rows
it modifies (in particular, an operation that modifies zero rows
will still result in the execution of any applicable <literal>FOR
- EACH STATEMENT</literal> triggers).
+ EACH STATEMENT</literal> triggers). Note that with an
+ <command>INSERT</command> with an <literal>ON CONFLICT DO UPDATE</>
+ clause, both <command>INSERT</command> and
+ <command>UPDATE</command> statement level trigger will be fired.
</para>
<para>
diff --git a/doc/src/sgml/ref/create_view.sgml b/doc/src/sgml/ref/create_view.sgml
index 5dadab1dee..8fa3564021 100644
--- a/doc/src/sgml/ref/create_view.sgml
+++ b/doc/src/sgml/ref/create_view.sgml
@@ -333,7 +333,8 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello;
If the view is automatically updatable the system will convert any
<command>INSERT</>, <command>UPDATE</> or <command>DELETE</> statement
on the view into the corresponding statement on the underlying base
- relation.
+ relation. <command>INSERT</> statements that have an <literal>ON
+ CONFLICT UPDATE</> clause are fully supported.
</para>
<para>
@@ -345,8 +346,10 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello;
condition, and thus is no longer visible through the view. Similarly,
an <command>INSERT</> command can potentially insert base-relation rows
that do not satisfy the <literal>WHERE</> condition and thus are not
- visible through the view. The <literal>CHECK OPTION</> may be used to
- prevent <command>INSERT</> and <command>UPDATE</> commands from creating
+ visible through the view (<literal>ON CONFLICT UPDATE</> may
+ similarly affect an existing row not visible through the view).
+ The <literal>CHECK OPTION</> may be used to prevent
+ <command>INSERT</> and <command>UPDATE</> commands from creating
such rows that are not visible through the view.
</para>
diff --git a/doc/src/sgml/ref/drop_owned.sgml b/doc/src/sgml/ref/drop_owned.sgml
index 1cd8e60e40..64906efd3d 100644
--- a/doc/src/sgml/ref/drop_owned.sgml
+++ b/doc/src/sgml/ref/drop_owned.sgml
@@ -21,7 +21,7 @@ PostgreSQL documentation
<refsynopsisdiv>
<synopsis>
-DROP OWNED BY <replaceable class="PARAMETER">name</replaceable> [, ...] [ CASCADE | RESTRICT ]
+DROP OWNED BY { <replaceable class="PARAMETER">name</replaceable> | CURRENT_USER | SESSION_USER } [, ...] [ CASCADE | RESTRICT ]
</synopsis>
</refsynopsisdiv>
diff --git a/doc/src/sgml/ref/drop_transform.sgml b/doc/src/sgml/ref/drop_transform.sgml
new file mode 100644
index 0000000000..59ff87cfe4
--- /dev/null
+++ b/doc/src/sgml/ref/drop_transform.sgml
@@ -0,0 +1,123 @@
+<!-- doc/src/sgml/ref/drop_transform.sgml -->
+
+<refentry id="SQL-DROPTRANSFORM">
+ <indexterm zone="sql-droptransform">
+ <primary>DROP TRANSFORM</primary>
+ </indexterm>
+
+ <refmeta>
+ <refentrytitle>DROP TRANSFORM</refentrytitle>
+ <manvolnum>7</manvolnum>
+ <refmiscinfo>SQL - Language Statements</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>DROP TRANSFORM</refname>
+ <refpurpose>remove a transform</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+<synopsis>
+DROP TRANSFORM [ IF EXISTS ] FOR <replaceable>type_name</replaceable> LANGUAGE <replaceable>lang_name</replaceable>
+</synopsis>
+ </refsynopsisdiv>
+
+ <refsect1 id="sql-droptransform-description">
+ <title>Description</title>
+
+ <para>
+ <command>DROP TRANSFORM</command> removes a previously defined transform.
+ </para>
+
+ <para>
+ To be able to drop a transform, you must own the type and the language.
+ These are the same privileges that are required to create a transform.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>Parameters</title>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><literal>IF EXISTS</literal></term>
+ <listitem>
+ <para>
+ Do not throw an error if the transform does not exist. A notice is issued
+ in this case.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable>type_name</replaceable></term>
+
+ <listitem>
+ <para>
+ The name of the data type of the transform.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable>lang_name</replaceable></term>
+
+ <listitem>
+ <para>
+ The name of the language of the transform.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>CASCADE</literal></term>
+ <listitem>
+ <para>
+ Automatically drop objects that depend on the transform.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>RESTRICT</literal></term>
+ <listitem>
+ <para>
+ Refuse to drop the transform if any objects depend on it. This is the
+ default.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1 id="sql-droptransform-examples">
+ <title>Examples</title>
+
+ <para>
+ To drop the transform for type <type>hstore</type> and language
+ <literal>plpythonu</literal>:
+<programlisting>
+DROP TRANSFORM FOR hstore LANGUAGE plpythonu;
+</programlisting></para>
+ </refsect1>
+
+ <refsect1 id="sql-droptransform-compat">
+ <title>Compatibility</title>
+
+ <para>
+ This form of <command>DROP TRANSFORM</command> is a
+ <productname>PostgreSQL</productname> extension. See <xref
+ linkend="sql-createtransform"> for details.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>See Also</title>
+
+ <simplelist type="inline">
+ <member><xref linkend="sql-createtransform"></member>
+ </simplelist>
+ </refsect1>
+
+</refentry>
diff --git a/doc/src/sgml/ref/insert.sgml b/doc/src/sgml/ref/insert.sgml
index a3cccb9f7c..7cd4577f1e 100644
--- a/doc/src/sgml/ref/insert.sgml
+++ b/doc/src/sgml/ref/insert.sgml
@@ -22,9 +22,24 @@ PostgreSQL documentation
<refsynopsisdiv>
<synopsis>
[ WITH [ RECURSIVE ] <replaceable class="parameter">with_query</replaceable> [, ...] ]
-INSERT INTO <replaceable class="PARAMETER">table_name</replaceable> [ ( <replaceable class="PARAMETER">column_name</replaceable> [, ...] ) ]
+INSERT INTO <replaceable class="PARAMETER">table_name</replaceable> [ AS <replaceable class="parameter">alias</replaceable> ] [ ( <replaceable class="PARAMETER">column_name</replaceable> [, ...] ) ]
{ DEFAULT VALUES | VALUES ( { <replaceable class="PARAMETER">expression</replaceable> | DEFAULT } [, ...] ) [, ...] | <replaceable class="PARAMETER">query</replaceable> }
+ [ ON CONFLICT [ <replaceable class="parameter">conflict_target</replaceable> ] <replaceable class="parameter">conflict_action</replaceable> ]
[ RETURNING * | <replaceable class="parameter">output_expression</replaceable> [ [ AS ] <replaceable class="parameter">output_name</replaceable> ] [, ...] ]
+
+<phrase>where <replaceable class="parameter">conflict_target</replaceable> can be one of:</phrase>
+
+ ( { <replaceable class="parameter">column_name_index</replaceable> | ( <replaceable class="parameter">expression_index</replaceable> ) } [ COLLATE <replaceable class="parameter">collation</replaceable> ] [ <replaceable class="parameter">opclass</replaceable> ] [, ...] ) [ WHERE <replaceable class="PARAMETER">index_predicate</replaceable> ]
+ ON CONSTRAINT <replaceable class="PARAMETER">constraint_name</replaceable>
+
+<phrase>and <replaceable class="parameter">conflict_action</replaceable> is one of:</phrase>
+
+ DO NOTHING
+ DO UPDATE SET { <replaceable class="PARAMETER">column_name</replaceable> = { <replaceable class="PARAMETER">expression</replaceable> | DEFAULT } |
+ ( <replaceable class="PARAMETER">column_name</replaceable> [, ...] ) = ( { <replaceable class="PARAMETER">expression</replaceable> | DEFAULT } [, ...] ) |
+ ( <replaceable class="PARAMETER">column_name</replaceable> [, ...] ) = ( <replaceable class="PARAMETER">sub-SELECT</replaceable> )
+ } [, ...]
+ [ WHERE <replaceable class="PARAMETER">condition</replaceable> ]
</synopsis>
</refsynopsisdiv>
@@ -59,19 +74,46 @@ INSERT INTO <replaceable class="PARAMETER">table_name</replaceable> [ ( <replace
</para>
<para>
+ <literal>ON CONFLICT</> can be used to specify an alternative
+ action to raising a unique constraint or exclusion constraint
+ violation error. (See <xref linkend="sql-on-conflict"
+ endterm="sql-on-conflict-title"> below.)
+ </para>
+
+ <para>
The optional <literal>RETURNING</> clause causes <command>INSERT</>
- to compute and return value(s) based on each row actually inserted.
- This is primarily useful for obtaining values that were supplied by
- defaults, such as a serial sequence number. However, any expression
- using the table's columns is allowed. The syntax of the
- <literal>RETURNING</> list is identical to that of the output list
- of <command>SELECT</>.
+ to compute and return value(s) based on each row actually inserted
+ (or updated, if an <literal>ON CONFLICT DO UPDATE</> clause was
+ used). This is primarily useful for obtaining values that were
+ supplied by defaults, such as a serial sequence number. However,
+ any expression using the table's columns is allowed. The syntax of
+ the <literal>RETURNING</> list is identical to that of the output
+ list of <command>SELECT</>. Only rows that were successfully
+ inserted or updated will be returned. For example, if a row was
+ locked but not updated because an <literal>ON CONFLICT DO UPDATE
+ ... WHERE</literal> clause <replaceable
+ class="PARAMETER">condition</replaceable> was not satisfied, the
+ row will not be returned.
</para>
<para>
You must have <literal>INSERT</literal> privilege on a table in
- order to insert into it. If a column list is specified, you only
- need <literal>INSERT</literal> privilege on the listed columns.
+ order to insert into it. If <literal>ON CONFLICT DO UPDATE</> is
+ present the <literal>UPDATE</literal> privilege is also required.
+ </para>
+
+ <para>
+ If a column list is specified, you only need
+ <literal>INSERT</literal> privilege on the listed columns.
+ Similarly, when <literal>ON CONFLICT DO UPDATE</> is specified, you
+ only need <literal>UPDATE</> privilege on the column(s) that are
+ listed to be updated. However, <literal>ON CONFLICT DO UPDATE</>
+ also requires <literal>SELECT</> privilege on any column whose
+ values are read in the <literal>ON CONFLICT DO UPDATE</>
+ expressions or <replaceable>condition</>.
+ </para>
+
+ <para>
Use of the <literal>RETURNING</> clause requires <literal>SELECT</>
privilege on all columns mentioned in <literal>RETURNING</>.
If you use the <replaceable
@@ -115,13 +157,32 @@ INSERT INTO <replaceable class="PARAMETER">table_name</replaceable> [ ( <replace
</varlistentry>
<varlistentry>
+ <term><replaceable class="parameter">alias</replaceable></term>
+ <listitem>
+ <para>
+ A substitute name for the target table. When an alias is provided, it
+ completely hides the actual name of the table. This is particularly
+ useful when using <literal>ON CONFLICT DO UPDATE</literal> into a table
+ named <literal>excluded</literal> as that's also the name of the
+ pseudo-relation containing the proposed row.
+ </para>
+ </listitem>
+ </varlistentry>
+
+
+ <varlistentry>
<term><replaceable class="PARAMETER">column_name</replaceable></term>
<listitem>
<para>
The name of a column in the table named by <replaceable class="PARAMETER">table_name</replaceable>.
The column name can be qualified with a subfield name or array
subscript, if needed. (Inserting into only some fields of a
- composite column leaves the other fields null.)
+ composite column leaves the other fields null.) When
+ referencing a column with <literal>ON CONFLICT DO UPDATE</>, do
+ not include the table's name in the specification of a target
+ column. For example, <literal>INSERT ... ON CONFLICT DO UPDATE
+ tab SET table_name.col = 1</> is invalid (this follows the general
+ behavior for <command>UPDATE</>).
</para>
</listitem>
</varlistentry>
@@ -171,14 +232,35 @@ INSERT INTO <replaceable class="PARAMETER">table_name</replaceable> [ ( <replace
<listitem>
<para>
An expression to be computed and returned by the <command>INSERT</>
- command after each row is inserted. The expression can use any
- column names of the table named by <replaceable class="PARAMETER">table_name</replaceable>.
+ command after each row is inserted (not updated). The
+ expression can use any column names of the table named by
+ <replaceable class="PARAMETER">table_name</replaceable>.
Write <literal>*</> to return all columns of the inserted row(s).
</para>
</listitem>
</varlistentry>
<varlistentry>
+ <term><literal>conflict_target</literal></term>
+ <listitem>
+ <para>
+ Specify which conflicts <literal>ON CONFLICT</literal> refers to.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>conflict_action</literal></term>
+ <listitem>
+ <para>
+ <literal>DO NOTHING</literal> or <literal>DO UPDATE
+ SET</literal> clause specifying the action to be performed in
+ case of a conflict.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><replaceable class="PARAMETER">output_name</replaceable></term>
<listitem>
<para>
@@ -186,9 +268,226 @@ INSERT INTO <replaceable class="PARAMETER">table_name</replaceable> [ ( <replace
</para>
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term><replaceable class="PARAMETER">column_name_index</replaceable></term>
+ <listitem>
+ <para>
+ The name of a <replaceable
+ class="PARAMETER">table_name</replaceable> column. Part of a
+ unique index inference clause. Follows <command>CREATE
+ INDEX</command> format. <literal>SELECT</> privilege on
+ <replaceable class="PARAMETER">column_name_index</replaceable>
+ is required.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable class="PARAMETER">expression_index</replaceable></term>
+ <listitem>
+ <para>
+ Similar to <replaceable
+ class="PARAMETER">column_name_index</replaceable>, but used to
+ infer expressions on <replaceable
+ class="PARAMETER">table_name</replaceable> columns appearing
+ within index definitions (not simple columns). Part of unique
+ index inference clause. Follows <command>CREATE INDEX</command>
+ format. <literal>SELECT</> privilege on any column appearing
+ within <replaceable
+ class="PARAMETER">expression_index</replaceable> is required.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable class="PARAMETER">collation</replaceable></term>
+ <listitem>
+ <para>
+ When specified, mandates that corresponding <replaceable
+ class="PARAMETER">column_name_index</replaceable> or
+ <replaceable class="PARAMETER">expression_index</replaceable> use a
+ particular collation in order to be matched in the inference clause.
+ Typically this is omitted, as collations usually do not affect wether or
+ not a constraint violation occurs. Follows <command>CREATE
+ INDEX</command> format.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable class="PARAMETER">opclass</replaceable></term>
+ <listitem>
+ <para>
+ When specified, mandates that corresponding <replaceable
+ class="PARAMETER">column_name_index</replaceable> or
+ <replaceable class="PARAMETER">expression_index</replaceable> use
+ particular operator class in order to be matched by the inference
+ clause. Sometimes this is omitted because the
+ <emphasis>equality</emphasis> semantics are often equivalent across a
+ type's operator classes anyway, or because it's sufficient to trust that
+ the defined unique indexes have the pertinent definition of equality.
+ Follows <command>CREATE INDEX</command> format.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable class="PARAMETER">index_predicate</replaceable></term>
+ <listitem>
+ <para>
+ Used to allow inference of partial unique indexes. Any indexes
+ that satisfy the predicate (which need not actually be partial
+ indexes) can be matched by the rest of the inference clause.
+ Follows <command>CREATE INDEX</command> format.
+ <literal>SELECT</> privilege on any column appearing within
+ <replaceable class="PARAMETER">index_predicate</replaceable> is
+ required.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable class="PARAMETER">constraint_name</replaceable></term>
+ <listitem>
+ <para>
+ Explicitly specifies an arbiter <emphasis>constraint</emphasis>
+ by name, rather than inferring a constraint or index. This is
+ mostly useful for exclusion constraints, that cannot be chosen
+ in the conventional way (with an inference clause).
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><replaceable class="PARAMETER">condition</replaceable></term>
+ <listitem>
+ <para>
+ An expression that returns a value of type <type>boolean</type>. Only
+ rows for which this expression returns <literal>true</literal> will be
+ updated, although all rows will be locked when the
+ <literal>ON CONFLICT DO UPDATE</> action is taken.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
</refsect1>
+ <refsect1 id="sql-on-conflict">
+ <title id="sql-on-conflict-title"><literal>ON CONFLICT</literal> Clause</title>
+ <indexterm zone="SQL-INSERT">
+ <primary>UPSERT</primary>
+ </indexterm>
+ <indexterm zone="SQL-INSERT">
+ <primary>ON CONFLICT</primary>
+ </indexterm>
+ <para>
+ The optional <literal>ON CONFLICT</literal> clause specifies an
+ alternative action to raising a unique violation or exclusion
+ constraint violation error. For each individual row proposed for
+ insertion, either the insertion proceeds, or, if a constraint
+ specified by the <parameter>conflict_target</parameter> is
+ violated, the alternative <parameter>conflict_action</parameter> is
+ taken.
+ </para>
+
+ <para>
+ <parameter>conflict_target</parameter> describes which conflicts
+ are handled by the <literal>ON CONFLICT</literal> clause. Either a
+ <emphasis>unique index inference</emphasis> clause or an explicitly
+ named constraint can be used. For <literal>ON CONFLICT DO
+ NOTHING</literal>, it is optional to specify a
+ <parameter>conflict_target</parameter>; when omitted, conflicts
+ with all usable constraints (and unique indexes) are handled. For
+ <literal>ON CONFLICT DO UPDATE</literal>, a conflict target
+ <emphasis>must</emphasis> be specified.
+
+ Every time an insertion without <literal>ON CONFLICT</literal>
+ would ordinarily raise an error due to violating one of the
+ inferred (or explicitly named) constraints, a conflict (as in
+ <literal>ON CONFLICT</literal>) occurs, and the alternative action,
+ as specified by <parameter>conflict_action</parameter> is taken.
+ This happens on a row-by-row basis.
+ </para>
+
+ <para>
+ A <emphasis>unique index inference</emphasis> clause consists of
+ one or more <replaceable
+ class="PARAMETER">column_name_index</replaceable> columns and/or
+ <replaceable class="PARAMETER">expression_index</replaceable>
+ expressions, and an optional <replaceable class="PARAMETER">
+ index_predicate</replaceable>.
+ </para>
+
+ <para>
+ All the <replaceable class="PARAMETER">table_name</replaceable>
+ unique indexes that, without regard to order, contain exactly the
+ specified columns/expressions and, if specified, whose predicate
+ implies the <replaceable class="PARAMETER">
+ index_predicate</replaceable> are chosen as arbiter indexes. Note
+ that this means an index without a predicate will be used if a
+ non-partial index matching every other criteria happens to be
+ available.
+ </para>
+
+ <para>
+ If no index matches the inference clause (nor is there a constraint
+ explicitly named), an error is raised. Deferred constraints are
+ not supported as arbiters.
+ </para>
+
+ <para>
+ <parameter>conflict_action</parameter> defines the action to be
+ taken in case of conflict. <literal>ON CONFLICT DO
+ NOTHING</literal> simply avoids inserting a row as its alternative
+ action. <literal>ON CONFLICT DO UPDATE</literal> updates the
+ existing row that conflicts with the row proposed for insertion as
+ its alternative action.
+
+ <literal>ON CONFLICT DO UPDATE</literal> guarantees an atomic
+ <command>INSERT</command> or <command>UPDATE</command> outcome - provided
+ there is no independent error, one of those two outcomes is guaranteed,
+ even under high concurrency. This feature is also known as
+ <firstterm>UPSERT</firstterm>.
+
+ Note that exclusion constraints are not supported with
+ <literal>ON CONFLICT DO UPDATE</literal>.
+ </para>
+
+ <para>
+ <literal>ON CONFLICT DO UPDATE</literal> optionally accepts
+ a <literal>WHERE</literal> clause <replaceable>condition</replaceable>.
+ When provided, the statement only proceeds with updating if
+ the <replaceable>condition</replaceable> is satisfied. Otherwise, unlike a
+ conventional <command>UPDATE</command>, the row is still locked for update.
+ Note that the <replaceable>condition</replaceable> is evaluated last, after
+ a conflict has been identified as a candidate to update.
+ </para>
+
+ <para>
+ The <literal>SET</literal> and <literal>WHERE</literal> clauses in
+ <literal>ON CONFLICT UPDATE</literal> have access to the existing
+ row, using the table's name, and to the row
+ proposed for insertion, using the <varname>excluded</varname>
+ alias. The <varname>excluded</varname> alias requires
+ <literal>SELECT</> privilege on any column whose values are read.
+
+ Note that the effects of all per-row <literal>BEFORE INSERT</literal>
+ triggers are reflected in <varname>excluded</varname> values, since those
+ effects may have contributed to the row being excluded from insertion.
+ </para>
+
+ <para>
+ <command>INSERT</command> with an <literal>ON CONFLICT DO UPDATE</>
+ clause is a <quote>deterministic</quote> statement. This means
+ that the command will not be allowed to affect any single existing
+ row more than once; a cardinality violation error will be raised
+ when this situation arises. Rows proposed for insertion should not
+ duplicate each other in terms of attributes constrained by the
+ conflict-arbitrating unique index.
+ </para>
+ </refsect1>
+
<refsect1>
<title>Outputs</title>
@@ -198,20 +497,22 @@ INSERT INTO <replaceable class="PARAMETER">table_name</replaceable> [ ( <replace
<screen>
INSERT <replaceable>oid</replaceable> <replaceable class="parameter">count</replaceable>
</screen>
- The <replaceable class="parameter">count</replaceable> is the number
- of rows inserted. If <replaceable class="parameter">count</replaceable>
- is exactly one, and the target table has OIDs, then
- <replaceable class="parameter">oid</replaceable> is the
- <acronym>OID</acronym> assigned to the inserted row. Otherwise
- <replaceable class="parameter">oid</replaceable> is zero.
+ The <replaceable class="parameter">count</replaceable> is the
+ number of rows inserted or updated. If <replaceable
+ class="parameter">count</replaceable> is exactly one, and the
+ target table has OIDs, then <replaceable
+ class="parameter">oid</replaceable> is the <acronym>OID</acronym>
+ assigned to the inserted row. The single row must have been
+ inserted rather than updated. Otherwise <replaceable
+ class="parameter">oid</replaceable> is zero.
</para>
<para>
If the <command>INSERT</> command contains a <literal>RETURNING</>
clause, the result will be similar to that of a <command>SELECT</>
statement containing the columns and values defined in the
- <literal>RETURNING</> list, computed over the row(s) inserted by the
- command.
+ <literal>RETURNING</> list, computed over the row(s) inserted or
+ updated by the command.
</para>
</refsect1>
@@ -311,7 +612,65 @@ WITH upd AS (
RETURNING *
)
INSERT INTO employees_log SELECT *, current_timestamp FROM upd;
-</programlisting></para>
+</programlisting>
+ </para>
+ <para>
+ Insert or update new distributors as appropriate. Assumes a unique
+ index has been defined that constrains values appearing in the
+ <literal>did</literal> column. Note that an <varname>EXCLUDED</>
+ expression is used to reference values originally proposed for
+ insertion:
+<programlisting>
+ INSERT INTO distributors (did, dname)
+ VALUES (5, 'Gizmo transglobal'), (6, 'Associated Computing, inc')
+ ON CONFLICT (did) DO UPDATE SET dname = EXCLUDED.dname;
+</programlisting>
+ </para>
+ <para>
+ Insert a distributor, or do nothing for rows proposed for insertion
+ when an existing, excluded row (a row with a matching constrained
+ column or columns after before row insert triggers fire) exists.
+ Example assumes a unique index has been defined that constrains
+ values appearing in the <literal>did</literal> column:
+<programlisting>
+ INSERT INTO distributors (did, dname) VALUES (7, 'Redline GmbH')
+ ON CONFLICT (did) DO NOTHING;
+</programlisting>
+ </para>
+ <para>
+ Insert or update new distributors as appropriate. Example assumes
+ a unique index has been defined that constrains values appearing in
+ the <literal>did</literal> column. <literal>WHERE</> clause is
+ used to limit the rows actually updated (any existing row not
+ updated will still be locked, though):
+<programlisting>
+ -- Don't update existing distributors based in a certain ZIP code
+ INSERT INTO distributors AS d (did, dname) VALUES (8, 'Anvil Distribution')
+ ON CONFLICT (did) DO UPDATE
+ SET dname = EXCLUDED.dname || ' (formerly ' || d.dname || ')'
+ WHERE d.zipcode != '21201';
+
+ -- Name a constraint directly in the statement (uses associated
+ -- index to arbitrate taking the DO NOTHING action)
+ INSERT INTO distributors (did, dname) VALUES (9, 'Antwerp Design')
+ ON CONFLICT ON CONSTRAINT distributors_pkey DO NOTHING;
+</programlisting>
+ </para>
+ <para>
+ Insert new distributor if possible; otherwise
+ <literal>DO NOTHING</literal>. Example assumes a unique index has been
+ defined that constrains values appearing in the
+ <literal>did</literal> column on a subset of rows where the
+ <literal>is_active</literal> boolean column evaluates to
+ <literal>true</literal>:
+<programlisting>
+ -- This statement could infer a partial unique index on "did"
+ -- with a predicate of "WHERE is_active", but it could also
+ -- just use a regular unique constraint on "did"
+ INSERT INTO distributors (did, dname) VALUES (10, 'Conrad International')
+ ON CONFLICT (did) WHERE is_active DO NOTHING;
+</programlisting>
+ </para>
</refsect1>
<refsect1>
@@ -321,7 +680,8 @@ INSERT INTO employees_log SELECT *, current_timestamp FROM upd;
<command>INSERT</command> conforms to the SQL standard, except that
the <literal>RETURNING</> clause is a
<productname>PostgreSQL</productname> extension, as is the ability
- to use <literal>WITH</> with <command>INSERT</>.
+ to use <literal>WITH</> with <command>INSERT</>, and the ability to
+ specify an alternative action with <literal>ON CONFLICT</>.
Also, the case in
which a column name list is omitted, but not all the columns are
filled from the <literal>VALUES</> clause or <replaceable>query</>,
diff --git a/doc/src/sgml/ref/lock.sgml b/doc/src/sgml/ref/lock.sgml
index 913afe76dd..b946eab303 100644
--- a/doc/src/sgml/ref/lock.sgml
+++ b/doc/src/sgml/ref/lock.sgml
@@ -161,9 +161,11 @@ LOCK [ TABLE ] [ ONLY ] <replaceable class="PARAMETER">name</replaceable> [ * ]
<para>
<literal>LOCK TABLE ... IN ACCESS SHARE MODE</> requires <literal>SELECT</>
- privileges on the target table. All other forms of <command>LOCK</>
- require table-level <literal>UPDATE</>, <literal>DELETE</>, or
- <literal>TRUNCATE</> privileges.
+ privileges on the target table. <literal>LOCK TABLE ... IN ROW EXCLUSIVE
+ MODE</> requires <literal>INSERT</>, <literal>UPDATE</>, <literal>DELETE</>,
+ or <literal>TRUNCATE</> privileges on the target table. All other forms of
+ <command>LOCK</> require table-level <literal>UPDATE</>, <literal>DELETE</>,
+ or <literal>TRUNCATE</> privileges.
</para>
<para>
diff --git a/doc/src/sgml/ref/pg_basebackup.sgml b/doc/src/sgml/ref/pg_basebackup.sgml
index 642fccf325..07d3a5a1dc 100644
--- a/doc/src/sgml/ref/pg_basebackup.sgml
+++ b/doc/src/sgml/ref/pg_basebackup.sgml
@@ -588,10 +588,22 @@ PostgreSQL documentation
</para>
<para>
+ When tar format mode is used, it is the user's responsibility to unpack each
+ tar file before starting postgres. If there are additional tablespaces, the
+ tar files for them need to be unpacked in the correct locations. In this
+ case the symbolic links for those tablespaces will be created by Postgres
+ according to the contents of the <filename>tablespace_map</> file that is
+ included in the <filename>base.tar</> file.
+ </para>
+
+ <para>
<application>pg_basebackup</application> works with servers of the same
or an older major version, down to 9.1. However, WAL streaming mode (-X
- stream) only works with server version 9.3 and later.
+ stream) only works with server version 9.3 and later, and tar format mode
+ (--format=tar) of the current version only works with server version 9.5
+ or later.
</para>
+
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml
index fcf5f77a6d..272af3eb0a 100644
--- a/doc/src/sgml/ref/pg_dumpall.sgml
+++ b/doc/src/sgml/ref/pg_dumpall.sgml
@@ -454,8 +454,8 @@ PostgreSQL documentation
<term><option>--database=<replaceable>dbname</replaceable></option></term>
<listitem>
<para>
- Specifies the name of the database to connect to to dump global
- objects and discover what other databases should be dumped. If
+ Specifies the name of the database to connect to for dumping global
+ objects and discovering what other databases should be dumped. If
not specified, the <literal>postgres</literal> database will be used,
and if that does not exist, <literal>template1</literal> will be used.
</para>
diff --git a/doc/src/sgml/pg_xlogdump.sgml b/doc/src/sgml/ref/pg_xlogdump.sgml
index d9f4a6a499..d9f4a6a499 100644
--- a/doc/src/sgml/pg_xlogdump.sgml
+++ b/doc/src/sgml/ref/pg_xlogdump.sgml
diff --git a/doc/src/sgml/pgtestfsync.sgml b/doc/src/sgml/ref/pgtestfsync.sgml
index c4b4014b1a..5dcabe4b77 100644
--- a/doc/src/sgml/pgtestfsync.sgml
+++ b/doc/src/sgml/ref/pgtestfsync.sgml
@@ -1,4 +1,4 @@
-<!-- doc/src/sgml/pgtestfsync.sgml -->
+<!-- doc/src/sgml/ref/pgtestfsync.sgml -->
<refentry id="pgtestfsync">
<indexterm zone="pgtestfsync">
@@ -104,14 +104,6 @@
</refsect1>
<refsect1>
- <title>Author</title>
-
- <para>
- Bruce Momjian <email>[email protected]</email>
- </para>
- </refsect1>
-
- <refsect1>
<title>See Also</title>
<simplelist type="inline">
diff --git a/doc/src/sgml/pgtesttiming.sgml b/doc/src/sgml/ref/pgtesttiming.sgml
index a6ab9b114b..d5e231fff7 100644
--- a/doc/src/sgml/pgtesttiming.sgml
+++ b/doc/src/sgml/ref/pgtesttiming.sgml
@@ -1,4 +1,4 @@
-<!-- doc/src/sgml/pgtesttiming.sgml -->
+<!-- doc/src/sgml/ref/pgtesttiming.sgml -->
<refentry id="pgtesttiming">
<indexterm zone="pgtesttiming">
@@ -291,14 +291,6 @@ Histogram of timing durations:
</refsect1>
<refsect1>
- <title>Author</title>
-
- <para>
- Ants Aasma <email>[email protected]</email>
- </para>
- </refsect1>
-
- <refsect1>
<title>See Also</title>
<simplelist type="inline">
diff --git a/doc/src/sgml/ref/reassign_owned.sgml b/doc/src/sgml/ref/reassign_owned.sgml
index d7d6abb9d8..382cba337b 100644
--- a/doc/src/sgml/ref/reassign_owned.sgml
+++ b/doc/src/sgml/ref/reassign_owned.sgml
@@ -21,7 +21,8 @@ PostgreSQL documentation
<refsynopsisdiv>
<synopsis>
-REASSIGN OWNED BY <replaceable class="PARAMETER">old_role</replaceable> [, ...] TO <replaceable class="PARAMETER">new_role</replaceable>
+REASSIGN OWNED BY { <replaceable class="PARAMETER">old_role</replaceable> | CURRENT_USER | SESSION_USER } [, ...]
+ TO { <replaceable class="PARAMETER">new_role</replaceable> | CURRENT_USER | SESSION_USER }
</synopsis>
</refsynopsisdiv>
diff --git a/doc/src/sgml/ref/reindex.sgml b/doc/src/sgml/ref/reindex.sgml
index 998340c5db..703b7609cf 100644
--- a/doc/src/sgml/ref/reindex.sgml
+++ b/doc/src/sgml/ref/reindex.sgml
@@ -21,7 +21,7 @@ PostgreSQL documentation
<refsynopsisdiv>
<synopsis>
-REINDEX { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } <replaceable class="PARAMETER">name</replaceable>
+REINDEX [ ( { VERBOSE } [, ...] ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } <replaceable class="PARAMETER">name</replaceable>
</synopsis>
</refsynopsisdiv>
@@ -150,6 +150,15 @@ REINDEX { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } <replaceable class="PARAM
</para>
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term><literal>VERBOSE</literal></term>
+ <listitem>
+ <para>
+ Prints a progress report as each index is reindexed.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
</refsect1>
diff --git a/doc/src/sgml/ref/reindexdb.sgml b/doc/src/sgml/ref/reindexdb.sgml
index b5b449c256..713efc099b 100644
--- a/doc/src/sgml/ref/reindexdb.sgml
+++ b/doc/src/sgml/ref/reindexdb.sgml
@@ -23,14 +23,15 @@ PostgreSQL documentation
<cmdsynopsis>
<command>reindexdb</command>
<arg rep="repeat"><replaceable>connection-option</replaceable></arg>
-
+ <arg rep="repeat"><replaceable>option</replaceable></arg>
+
<arg choice="plain" rep="repeat">
<arg choice="opt">
<group choice="plain">
<arg choice="plain"><option>--schema</option></arg>
<arg choice="plain"><option>-S</option></arg>
</group>
- <replaceable>table</replaceable>
+ <replaceable>schema</replaceable>
</arg>
</arg>
@@ -60,6 +61,8 @@ PostgreSQL documentation
<cmdsynopsis>
<command>reindexdb</command>
<arg rep="repeat"><replaceable>connection-option</replaceable></arg>
+ <arg rep="repeat"><replaceable>option</replaceable></arg>
+
<group choice="plain">
<arg choice="plain"><option>--all</option></arg>
<arg choice="plain"><option>-a</option></arg>
@@ -69,6 +72,8 @@ PostgreSQL documentation
<cmdsynopsis>
<command>reindexdb</command>
<arg rep="repeat"><replaceable>connection-option</replaceable></arg>
+ <arg rep="repeat"><replaceable>option</replaceable></arg>
+
<group choice="plain">
<arg choice="plain"><option>--system</option></arg>
<arg choice="plain"><option>-s</option></arg>
@@ -196,6 +201,16 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
+ <term><option>-v</></term>
+ <term><option>--verbose</></term>
+ <listitem>
+ <para>
+ Print detailed information during processing.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>-V</></term>
<term><option>--version</></term>
<listitem>
diff --git a/doc/src/sgml/ref/select.sgml b/doc/src/sgml/ref/select.sgml
index 2295f63c13..632d7935cb 100644
--- a/doc/src/sgml/ref/select.sgml
+++ b/doc/src/sgml/ref/select.sgml
@@ -37,7 +37,7 @@ SELECT [ ALL | DISTINCT [ ON ( <replaceable class="parameter">expression</replac
[ * | <replaceable class="parameter">expression</replaceable> [ [ AS ] <replaceable class="parameter">output_name</replaceable> ] [, ...] ]
[ FROM <replaceable class="parameter">from_item</replaceable> [, ...] ]
[ WHERE <replaceable class="parameter">condition</replaceable> ]
- [ GROUP BY <replaceable class="parameter">expression</replaceable> [, ...] ]
+ [ GROUP BY <replaceable class="parameter">grouping_element</replaceable> [, ...] ]
[ HAVING <replaceable class="parameter">condition</replaceable> [, ...] ]
[ WINDOW <replaceable class="parameter">window_name</replaceable> AS ( <replaceable class="parameter">window_definition</replaceable> ) [, ...] ]
[ { UNION | INTERSECT | EXCEPT } [ ALL | DISTINCT ] <replaceable class="parameter">select</replaceable> ]
@@ -49,7 +49,7 @@ SELECT [ ALL | DISTINCT [ ON ( <replaceable class="parameter">expression</replac
<phrase>where <replaceable class="parameter">from_item</replaceable> can be one of:</phrase>
- [ ONLY ] <replaceable class="parameter">table_name</replaceable> [ * ] [ [ AS ] <replaceable class="parameter">alias</replaceable> [ ( <replaceable class="parameter">column_alias</replaceable> [, ...] ) ] ]
+ [ ONLY ] <replaceable class="parameter">table_name</replaceable> [ * ] [ [ AS ] <replaceable class="parameter">alias</replaceable> [ ( <replaceable class="parameter">column_alias</replaceable> [, ...] ) ] ] [ TABLESAMPLE <replaceable class="parameter">sampling_method</replaceable> ( <replaceable class="parameter">argument</replaceable> [, ...] ) [ REPEATABLE ( <replaceable class="parameter">seed</replaceable> ) ] ]
[ LATERAL ] ( <replaceable class="parameter">select</replaceable> ) [ AS ] <replaceable class="parameter">alias</replaceable> [ ( <replaceable class="parameter">column_alias</replaceable> [, ...] ) ]
<replaceable class="parameter">with_query_name</replaceable> [ [ AS ] <replaceable class="parameter">alias</replaceable> [ ( <replaceable class="parameter">column_alias</replaceable> [, ...] ) ] ]
[ LATERAL ] <replaceable class="parameter">function_name</replaceable> ( [ <replaceable class="parameter">argument</replaceable> [, ...] ] )
@@ -60,6 +60,15 @@ SELECT [ ALL | DISTINCT [ ON ( <replaceable class="parameter">expression</replac
[ WITH ORDINALITY ] [ [ AS ] <replaceable class="parameter">alias</replaceable> [ ( <replaceable class="parameter">column_alias</replaceable> [, ...] ) ] ]
<replaceable class="parameter">from_item</replaceable> [ NATURAL ] <replaceable class="parameter">join_type</replaceable> <replaceable class="parameter">from_item</replaceable> [ ON <replaceable class="parameter">join_condition</replaceable> | USING ( <replaceable class="parameter">join_column</replaceable> [, ...] ) ]
+<phrase>and <replaceable class="parameter">grouping_element</replaceable> can be one of:</phrase>
+
+ ( )
+ <replaceable class="parameter">expression</replaceable>
+ ( <replaceable class="parameter">expression</replaceable> [, ...] )
+ ROLLUP ( { <replaceable class="parameter">expression</replaceable> | ( <replaceable class="parameter">expression</replaceable> [, ...] ) } [, ...] )
+ CUBE ( { <replaceable class="parameter">expression</replaceable> | ( <replaceable class="parameter">expression</replaceable> [, ...] ) } [, ...] )
+ GROUPING SETS ( <replaceable class="parameter">grouping_element</replaceable> [, ...] )
+
<phrase>and <replaceable class="parameter">with_query</replaceable> is:</phrase>
<replaceable class="parameter">with_query_name</replaceable> [ ( <replaceable class="parameter">column_name</replaceable> [, ...] ) ] AS ( <replaceable class="parameter">select</replaceable> | <replaceable class="parameter">values</replaceable> | <replaceable class="parameter">insert</replaceable> | <replaceable class="parameter">update</replaceable> | <replaceable class="parameter">delete</replaceable> )
@@ -317,6 +326,50 @@ TABLE [ ONLY ] <replaceable class="parameter">table_name</replaceable> [ * ]
</varlistentry>
<varlistentry>
+ <term>TABLESAMPLE <replaceable class="parameter">sampling_method</replaceable> ( <replaceable class="parameter">argument</replaceable> [, ...] ) [ REPEATABLE ( <replaceable class="parameter">seed</replaceable> ) ]</term>
+ <listitem>
+ <para>
+ Table sample clause after
+ <replaceable class="parameter">table_name</replaceable> indicates that
+ a <replaceable class="parameter">sampling_method</replaceable> should
+ be used to retrieve subset of rows in the table.
+ The <replaceable class="parameter">sampling_method</replaceable> can be
+ any sampling method installed in the database. There are currently two
+ sampling methods available in the standard
+ <productname>PostgreSQL</productname> distribution:
+ <itemizedlist>
+ <listitem>
+ <para><literal>SYSTEM</literal></para>
+ </listitem>
+ <listitem>
+ <para><literal>BERNOULLI</literal></para>
+ </listitem>
+ </itemizedlist>
+ Both of these sampling methods currently accept only single argument
+ which is the percent (floating point from 0 to 100) of the rows to
+ be returned.
+ The <literal>SYSTEM</literal> sampling method does block level
+ sampling with each block having the same chance of being selected and
+ returns all rows from each selected block.
+ The <literal>BERNOULLI</literal> scans whole table and returns
+ individual rows with equal probability. Additional sampling methods
+ may be installed in the database via extensions.
+ </para>
+ <para>
+ The optional parameter <literal>REPEATABLE</literal> uses the seed
+ parameter, which can be a number or expression producing a number, as
+ a random seed for sampling. Note that subsequent commands may return
+ different results even if same <literal>REPEATABLE</literal> clause was
+ specified. This happens because <acronym>DML</acronym> statements and
+ maintenance operations such as <command>VACUUM</> may affect physical
+ distribution of data. The <function>setseed()</> function will not
+ affect the sampling result when the <literal>REPEATABLE</literal>
+ parameter is used.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><replaceable class="parameter">alias</replaceable></term>
<listitem>
<para>
@@ -621,23 +674,35 @@ WHERE <replaceable class="parameter">condition</replaceable>
<para>
The optional <literal>GROUP BY</literal> clause has the general form
<synopsis>
-GROUP BY <replaceable class="parameter">expression</replaceable> [, ...]
+GROUP BY <replaceable class="parameter">grouping_element</replaceable> [, ...]
</synopsis>
</para>
<para>
<literal>GROUP BY</literal> will condense into a single row all
selected rows that share the same values for the grouped
- expressions. <replaceable
- class="parameter">expression</replaceable> can be an input column
- name, or the name or ordinal number of an output column
- (<command>SELECT</command> list item), or an arbitrary
+ expressions. An <replaceable
+ class="parameter">expression</replaceable> used inside a
+ <replaceable class="parameter">grouping_element</replaceable>
+ can be an input column name, or the name or ordinal number of an
+ output column (<command>SELECT</command> list item), or an arbitrary
expression formed from input-column values. In case of ambiguity,
a <literal>GROUP BY</literal> name will be interpreted as an
input-column name rather than an output column name.
</para>
<para>
+ If any of <literal>GROUPING SETS</>, <literal>ROLLUP</> or
+ <literal>CUBE</> are present as grouping elements, then the
+ <literal>GROUP BY</> clause as a whole defines some number of
+ independent <replaceable>grouping sets</>. The effect of this is
+ equivalent to constructing a <literal>UNION ALL</> between
+ subqueries with the individual grouping sets as their
+ <literal>GROUP BY</> clauses. For further details on the handling
+ of grouping sets see <xref linkend="queries-grouping-sets">.
+ </para>
+
+ <para>
Aggregate functions, if any are used, are computed across all rows
making up each group, producing a separate value for each group.
(If there are aggregate functions but no <literal>GROUP BY</literal>
@@ -1927,5 +1992,20 @@ SELECT distributors.* WHERE distributors.name = 'Westward';
<literal>ROWS FROM( ... )</> is an extension of the SQL standard.
</para>
</refsect2>
+
+ <refsect2>
+ <title><literal>TABLESAMPLE</literal> clause</title>
+
+ <para>
+ The <literal>TABLESAMPLE</> clause is currently accepted only on physical
+ relations and materialized views.
+ </para>
+
+ <para>
+ Additional modules allow you to install custom sampling methods and use
+ them instead of the SQL standard methods.
+ </para>
+ </refsect2>
+
</refsect1>
</refentry>
diff --git a/doc/src/sgml/reference.sgml b/doc/src/sgml/reference.sgml
index fb18d94ea0..03020dfec4 100644
--- a/doc/src/sgml/reference.sgml
+++ b/doc/src/sgml/reference.sgml
@@ -111,6 +111,7 @@
&createTSDictionary;
&createTSParser;
&createTSTemplate;
+ &createTransform;
&createTrigger;
&createType;
&createUser;
@@ -152,6 +153,7 @@
&dropTSDictionary;
&dropTSParser;
&dropTSTemplate;
+ &dropTransform;
&dropTrigger;
&dropType;
&dropUser;
@@ -263,7 +265,10 @@
&pgCtl;
&pgResetxlog;
&pgRewind;
+ &pgtestfsync;
+ &pgtesttiming;
&pgupgrade;
+ &pgxlogdump;
&postgres;
&postmaster;
diff --git a/doc/src/sgml/release-9.0.sgml b/doc/src/sgml/release-9.0.sgml
index 90339a5eae..80cd1c43cd 100644
--- a/doc/src/sgml/release-9.0.sgml
+++ b/doc/src/sgml/release-9.0.sgml
@@ -1,6 +1,544 @@
<!-- doc/src/sgml/release-9.0.sgml -->
<!-- See header comment in release.sgml about typical markup -->
+ <sect1 id="release-9-0-21">
+ <title>Release 9.0.21</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-06-04</simpara>
+ </note>
+
+ <para>
+ This release contains a small number of fixes from 9.0.20.
+ For information about new features in the 9.0 major release, see
+ <xref linkend="release-9-0">.
+ </para>
+
+ <para>
+ The <productname>PostgreSQL</> community will stop releasing updates
+ for the 9.0.X release series in September 2015.
+ Users are encouraged to update to a newer release branch soon.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.0.21</title>
+
+ <para>
+ A dump/restore is not required for those running 9.0.X.
+ </para>
+
+ <para>
+ However, if you are upgrading from a version earlier than 9.0.18,
+ see <xref linkend="release-9-0-18">.
+ </para>
+
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+ <listitem>
+ <para>
+ Avoid failures while <function>fsync</>'ing data directory during
+ crash restart (Abhijit Menon-Sen, Tom Lane)
+ </para>
+
+ <para>
+ In the previous minor releases we added a patch to <function>fsync</>
+ everything in the data directory after a crash. Unfortunately its
+ response to any error condition was to fail, thereby preventing the
+ server from starting up, even when the problem was quite harmless.
+ An example is that an unwritable file in the data directory would
+ prevent restart on some platforms; but it is common to make SSL
+ certificate files unwritable by the server. Revise this behavior so
+ that permissions failures are ignored altogether, and other types of
+ failures are logged but do not prevent continuing.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Remove <application>configure</>'s check prohibiting linking to a
+ threaded <application>libpython</>
+ on <systemitem class="osname">OpenBSD</> (Tom Lane)
+ </para>
+
+ <para>
+ The failure this restriction was meant to prevent seems to not be a
+ problem anymore on current <systemitem class="osname">OpenBSD</>
+ versions.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Allow <application>libpq</> to use TLS protocol versions beyond v1
+ (Noah Misch)
+ </para>
+
+ <para>
+ For a long time, <application>libpq</> was coded so that the only SSL
+ protocol it would allow was TLS v1. Now that newer TLS versions are
+ becoming popular, allow it to negotiate the highest commonly-supported
+ TLS version with the server. (<productname>PostgreSQL</> servers were
+ already capable of such negotiation, so no change is needed on the
+ server side.) This is a back-patch of a change already released in
+ 9.4.0.
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
+ <sect1 id="release-9-0-20">
+ <title>Release 9.0.20</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-05-22</simpara>
+ </note>
+
+ <para>
+ This release contains a variety of fixes from 9.0.19.
+ For information about new features in the 9.0 major release, see
+ <xref linkend="release-9-0">.
+ </para>
+
+ <para>
+ The <productname>PostgreSQL</> community will stop releasing updates
+ for the 9.0.X release series in September 2015.
+ Users are encouraged to update to a newer release branch soon.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.0.20</title>
+
+ <para>
+ A dump/restore is not required for those running 9.0.X.
+ </para>
+
+ <para>
+ However, if you are upgrading from a version earlier than 9.0.18,
+ see <xref linkend="release-9-0-18">.
+ </para>
+
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+ <listitem>
+ <para>
+ Avoid possible crash when client disconnects just before the
+ authentication timeout expires (Benkocs Norbert Attila)
+ </para>
+
+ <para>
+ If the timeout interrupt fired partway through the session shutdown
+ sequence, SSL-related state would be freed twice, typically causing a
+ crash and hence denial of service to other sessions. Experimentation
+ shows that an unauthenticated remote attacker could trigger the bug
+ somewhat consistently, hence treat as security issue.
+ (CVE-2015-3165)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Improve detection of system-call failures (Noah Misch)
+ </para>
+
+ <para>
+ Our replacement implementation of <function>snprintf()</> failed to
+ check for errors reported by the underlying system library calls;
+ the main case that might be missed is out-of-memory situations.
+ In the worst case this might lead to information exposure, due to our
+ code assuming that a buffer had been overwritten when it hadn't been.
+ Also, there were a few places in which security-relevant calls of other
+ system library functions did not check for failure.
+ </para>
+
+ <para>
+ It remains possible that some calls of the <function>*printf()</>
+ family of functions are vulnerable to information disclosure if an
+ out-of-memory error occurs at just the wrong time. We judge the risk
+ to not be large, but will continue analysis in this area.
+ (CVE-2015-3166)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <filename>contrib/pgcrypto</>, uniformly report decryption failures
+ as <quote>Wrong key or corrupt data</> (Noah Misch)
+ </para>
+
+ <para>
+ Previously, some cases of decryption with an incorrect key could report
+ other error message texts. It has been shown that such variance in
+ error reports can aid attackers in recovering keys from other systems.
+ While it's unknown whether <filename>pgcrypto</>'s specific behaviors
+ are likewise exploitable, it seems better to avoid the risk by using a
+ one-size-fits-all message.
+ (CVE-2015-3167)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect checking of deferred exclusion constraints after a HOT
+ update (Tom Lane)
+ </para>
+
+ <para>
+ If a new row that potentially violates a deferred exclusion constraint
+ is HOT-updated (that is, no indexed columns change and the row can be
+ stored back onto the same table page) later in the same transaction,
+ the exclusion constraint would be reported as violated when the check
+ finally occurred, even if the row(s) the new row originally conflicted
+ with had been deleted.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Prevent improper reordering of antijoins (NOT EXISTS joins) versus
+ other outer joins (Tom Lane)
+ </para>
+
+ <para>
+ This oversight in the planner has been observed to cause <quote>could
+ not find RelOptInfo for given relids</> errors, but it seems possible
+ that sometimes an incorrect query plan might get past that consistency
+ check and result in silently-wrong query output.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect matching of subexpressions in outer-join plan nodes
+ (Tom Lane)
+ </para>
+
+ <para>
+ Previously, if textually identical non-strict subexpressions were used
+ both above and below an outer join, the planner might try to re-use
+ the value computed below the join, which would be incorrect because the
+ executor would force the value to NULL in case of an unmatched outer row.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix GEQO planner to cope with failure of its join order heuristic
+ (Tom Lane)
+ </para>
+
+ <para>
+ This oversight has been seen to lead to <quote>failed to join all
+ relations together</> errors in queries involving <literal>LATERAL</>,
+ and that might happen in other cases as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix possible deadlock at startup
+ when <literal>max_prepared_transactions</> is too small
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Don't archive useless preallocated WAL files after a timeline switch
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Avoid <quote>cannot GetMultiXactIdMembers() during recovery</> error
+ (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Recursively <function>fsync()</> the data directory after a crash
+ (Abhijit Menon-Sen, Robert Haas)
+ </para>
+
+ <para>
+ This ensures consistency if another crash occurs shortly later. (The
+ second crash would have to be a system-level crash, not just a database
+ crash, for there to be a problem.)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix autovacuum launcher's possible failure to shut down, if an error
+ occurs after it receives SIGTERM (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Cope with unexpected signals in <function>LockBufferForCleanup()</>
+ (Andres Freund)
+ </para>
+
+ <para>
+ This oversight could result in spurious errors about <quote>multiple
+ backends attempting to wait for pincount 1</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Avoid waiting for WAL flush or synchronous replication during commit of
+ a transaction that was read-only so far as the user is concerned
+ (Andres Freund)
+ </para>
+
+ <para>
+ Previously, a delay could occur at commit in transactions that had
+ written WAL due to HOT page pruning, leading to undesirable effects
+ such as sessions getting stuck at startup if all synchronous replicas
+ are down. Sessions have also been observed to get stuck in catchup
+ interrupt processing when using synchronous replication; this will fix
+ that problem as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix crash when manipulating hash indexes on temporary tables
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix possible failure during hash index bucket split, if other processes
+ are modifying the index concurrently (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Check for interrupts while analyzing index expressions (Jeff Janes)
+ </para>
+
+ <para>
+ <command>ANALYZE</> executes index expressions many times; if there are
+ slow functions in such an expression, it's desirable to be able to
+ cancel the <command>ANALYZE</> before that loop finishes.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Add the name of the target server to object description strings for
+ foreign-server user mappings (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Recommend setting <literal>include_realm</> to 1 when using
+ Kerberos/GSSAPI/SSPI authentication (Stephen Frost)
+ </para>
+
+ <para>
+ Without this, identically-named users from different realms cannot be
+ distinguished. For the moment this is only a documentation change, but
+ it will become the default setting in <productname>PostgreSQL</> 9.5.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Remove code for matching IPv4 <filename>pg_hba.conf</> entries to
+ IPv4-in-IPv6 addresses (Tom Lane)
+ </para>
+
+ <para>
+ This hack was added in 2003 in response to a report that some Linux
+ kernels of the time would report IPv4 connections as having
+ IPv4-in-IPv6 addresses. However, the logic was accidentally broken in
+ 9.0. The lack of any field complaints since then shows that it's not
+ needed anymore. Now we have reports that the broken code causes
+ crashes on some systems, so let's just remove it rather than fix it.
+ (Had we chosen to fix it, that would make for a subtle and potentially
+ security-sensitive change in the effective meaning of
+ IPv4 <filename>pg_hba.conf</> entries, which does not seem like a good
+ thing to do in minor releases.)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ While shutting down service on Windows, periodically send status
+ updates to the Service Control Manager to prevent it from killing the
+ service too soon; and ensure that <application>pg_ctl</> will wait for
+ shutdown (Krystian Bigaj)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Reduce risk of network deadlock when using <application>libpq</>'s
+ non-blocking mode (Heikki Linnakangas)
+ </para>
+
+ <para>
+ When sending large volumes of data, it's important to drain the input
+ buffer every so often, in case the server has sent enough response data
+ to cause it to block on output. (A typical scenario is that the server
+ is sending a stream of NOTICE messages during <literal>COPY FROM
+ STDIN</>.) This worked properly in the normal blocking mode, but not
+ so much in non-blocking mode. We've modified <application>libpq</>
+ to opportunistically drain input when it can, but a full defense
+ against this problem requires application cooperation: the application
+ should watch for socket read-ready as well as write-ready conditions,
+ and be sure to call <function>PQconsumeInput()</> upon read-ready.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix array handling in <application>ecpg</> (Michael Meskes)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <application>psql</> to sanely handle URIs and conninfo strings as
+ the first parameter to <command>\connect</>
+ (David Fetter, Andrew Dunstan, &Aacute;lvaro Herrera)
+ </para>
+
+ <para>
+ This syntax has been accepted (but undocumented) for a long time, but
+ previously some parameters might be taken from the old connection
+ instead of the given string, which was agreed to be undesirable.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Suppress incorrect complaints from <application>psql</> on some
+ platforms that it failed to write <filename>~/.psql_history</> at exit
+ (Tom Lane)
+ </para>
+
+ <para>
+ This misbehavior was caused by a workaround for a bug in very old
+ (pre-2006) versions of <application>libedit</>. We fixed it by
+ removing the workaround, which will cause a similar failure to appear
+ for anyone still using such versions of <application>libedit</>.
+ Recommendation: upgrade that library, or use <application>libreadline</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <application>pg_dump</>'s rule for deciding which casts are
+ system-provided casts that should not be dumped (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix dumping of views that are just <literal>VALUES(...)</> but have
+ column aliases (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, force timeline 1 in the new cluster
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This change prevents upgrade failures caused by bogus complaints about
+ missing WAL history files.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, check for improperly non-connectable
+ databases before proceeding
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, quote directory paths
+ properly in the generated <literal>delete_old_cluster</> script
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, preserve database-level freezing info
+ properly
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This oversight could cause missing-clog-file errors for tables within
+ the <literal>postgres</> and <literal>template1</> databases.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Run <application>pg_upgrade</> and <application>pg_resetxlog</> with
+ restricted privileges on Windows, so that they don't fail when run by
+ an administrator (Muhammad Asif Naeem)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix slow sorting algorithm in <filename>contrib/intarray</> (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix compile failure on Sparc V8 machines (Rob Rowan)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Update time zone data files to <application>tzdata</> release 2015d
+ for DST law changes in Egypt, Mongolia, and Palestine, plus historical
+ changes in Canada and Chile. Also adopt revised zone abbreviations for
+ the America/Adak zone (HST/HDT not HAST/HADT).
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
<sect1 id="release-9-0-19">
<title>Release 9.0.19</title>
diff --git a/doc/src/sgml/release-9.1.sgml b/doc/src/sgml/release-9.1.sgml
index f5849ff8fa..8306cfab03 100644
--- a/doc/src/sgml/release-9.1.sgml
+++ b/doc/src/sgml/release-9.1.sgml
@@ -1,6 +1,620 @@
<!-- doc/src/sgml/release-9.1.sgml -->
<!-- See header comment in release.sgml about typical markup -->
+ <sect1 id="release-9-1-17">
+ <title>Release 9.1.17</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-06-04</simpara>
+ </note>
+
+ <para>
+ This release contains a small number of fixes from 9.1.16.
+ For information about new features in the 9.1 major release, see
+ <xref linkend="release-9-1">.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.1.17</title>
+
+ <para>
+ A dump/restore is not required for those running 9.1.X.
+ </para>
+
+ <para>
+ However, if you are upgrading from a version earlier than 9.1.16,
+ see <xref linkend="release-9-1-16">.
+ </para>
+
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+ <listitem>
+ <para>
+ Avoid failures while <function>fsync</>'ing data directory during
+ crash restart (Abhijit Menon-Sen, Tom Lane)
+ </para>
+
+ <para>
+ In the previous minor releases we added a patch to <function>fsync</>
+ everything in the data directory after a crash. Unfortunately its
+ response to any error condition was to fail, thereby preventing the
+ server from starting up, even when the problem was quite harmless.
+ An example is that an unwritable file in the data directory would
+ prevent restart on some platforms; but it is common to make SSL
+ certificate files unwritable by the server. Revise this behavior so
+ that permissions failures are ignored altogether, and other types of
+ failures are logged but do not prevent continuing.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Remove <application>configure</>'s check prohibiting linking to a
+ threaded <application>libpython</>
+ on <systemitem class="osname">OpenBSD</> (Tom Lane)
+ </para>
+
+ <para>
+ The failure this restriction was meant to prevent seems to not be a
+ problem anymore on current <systemitem class="osname">OpenBSD</>
+ versions.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Allow <application>libpq</> to use TLS protocol versions beyond v1
+ (Noah Misch)
+ </para>
+
+ <para>
+ For a long time, <application>libpq</> was coded so that the only SSL
+ protocol it would allow was TLS v1. Now that newer TLS versions are
+ becoming popular, allow it to negotiate the highest commonly-supported
+ TLS version with the server. (<productname>PostgreSQL</> servers were
+ already capable of such negotiation, so no change is needed on the
+ server side.) This is a back-patch of a change already released in
+ 9.4.0.
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
+ <sect1 id="release-9-1-16">
+ <title>Release 9.1.16</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-05-22</simpara>
+ </note>
+
+ <para>
+ This release contains a variety of fixes from 9.1.15.
+ For information about new features in the 9.1 major release, see
+ <xref linkend="release-9-1">.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.1.16</title>
+
+ <para>
+ A dump/restore is not required for those running 9.1.X.
+ </para>
+
+ <para>
+ However, if you use <filename>contrib/citext</>'s
+ <function>regexp_matches()</> functions, see the changelog entry below
+ about that.
+ </para>
+
+ <para>
+ Also, if you are upgrading from a version earlier than 9.1.14,
+ see <xref linkend="release-9-1-14">.
+ </para>
+
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+ <listitem>
+ <para>
+ Avoid possible crash when client disconnects just before the
+ authentication timeout expires (Benkocs Norbert Attila)
+ </para>
+
+ <para>
+ If the timeout interrupt fired partway through the session shutdown
+ sequence, SSL-related state would be freed twice, typically causing a
+ crash and hence denial of service to other sessions. Experimentation
+ shows that an unauthenticated remote attacker could trigger the bug
+ somewhat consistently, hence treat as security issue.
+ (CVE-2015-3165)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Improve detection of system-call failures (Noah Misch)
+ </para>
+
+ <para>
+ Our replacement implementation of <function>snprintf()</> failed to
+ check for errors reported by the underlying system library calls;
+ the main case that might be missed is out-of-memory situations.
+ In the worst case this might lead to information exposure, due to our
+ code assuming that a buffer had been overwritten when it hadn't been.
+ Also, there were a few places in which security-relevant calls of other
+ system library functions did not check for failure.
+ </para>
+
+ <para>
+ It remains possible that some calls of the <function>*printf()</>
+ family of functions are vulnerable to information disclosure if an
+ out-of-memory error occurs at just the wrong time. We judge the risk
+ to not be large, but will continue analysis in this area.
+ (CVE-2015-3166)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <filename>contrib/pgcrypto</>, uniformly report decryption failures
+ as <quote>Wrong key or corrupt data</> (Noah Misch)
+ </para>
+
+ <para>
+ Previously, some cases of decryption with an incorrect key could report
+ other error message texts. It has been shown that such variance in
+ error reports can aid attackers in recovering keys from other systems.
+ While it's unknown whether <filename>pgcrypto</>'s specific behaviors
+ are likewise exploitable, it seems better to avoid the risk by using a
+ one-size-fits-all message.
+ (CVE-2015-3167)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect declaration of <filename>contrib/citext</>'s
+ <function>regexp_matches()</> functions (Tom Lane)
+ </para>
+
+ <para>
+ These functions should return <type>setof text[]</>, like the core
+ functions they are wrappers for; but they were incorrectly declared as
+ returning just <type>text[]</>. This mistake had two results: first,
+ if there was no match you got a scalar null result, whereas what you
+ should get is an empty set (zero rows). Second, the <literal>g</> flag
+ was effectively ignored, since you would get only one result array even
+ if there were multiple matches.
+ </para>
+
+ <para>
+ While the latter behavior is clearly a bug, there might be applications
+ depending on the former behavior; therefore the function declarations
+ will not be changed by default until <productname>PostgreSQL</> 9.5.
+ In pre-9.5 branches, the old behavior exists in version 1.0 of
+ the <literal>citext</> extension, while we have provided corrected
+ declarations in version 1.1 (which is <emphasis>not</> installed by
+ default). To adopt the fix in pre-9.5 branches, execute
+ <literal>ALTER EXTENSION citext UPDATE TO '1.1'</> in each database in
+ which <literal>citext</> is installed. (You can also <quote>update</>
+ back to 1.0 if you need to undo that.) Be aware that either update
+ direction will require dropping and recreating any views or rules that
+ use <filename>citext</>'s <function>regexp_matches()</> functions.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect checking of deferred exclusion constraints after a HOT
+ update (Tom Lane)
+ </para>
+
+ <para>
+ If a new row that potentially violates a deferred exclusion constraint
+ is HOT-updated (that is, no indexed columns change and the row can be
+ stored back onto the same table page) later in the same transaction,
+ the exclusion constraint would be reported as violated when the check
+ finally occurred, even if the row(s) the new row originally conflicted
+ with had been deleted.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Prevent improper reordering of antijoins (NOT EXISTS joins) versus
+ other outer joins (Tom Lane)
+ </para>
+
+ <para>
+ This oversight in the planner has been observed to cause <quote>could
+ not find RelOptInfo for given relids</> errors, but it seems possible
+ that sometimes an incorrect query plan might get past that consistency
+ check and result in silently-wrong query output.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect matching of subexpressions in outer-join plan nodes
+ (Tom Lane)
+ </para>
+
+ <para>
+ Previously, if textually identical non-strict subexpressions were used
+ both above and below an outer join, the planner might try to re-use
+ the value computed below the join, which would be incorrect because the
+ executor would force the value to NULL in case of an unmatched outer row.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix GEQO planner to cope with failure of its join order heuristic
+ (Tom Lane)
+ </para>
+
+ <para>
+ This oversight has been seen to lead to <quote>failed to join all
+ relations together</> errors in queries involving <literal>LATERAL</>,
+ and that might happen in other cases as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix possible deadlock at startup
+ when <literal>max_prepared_transactions</> is too small
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Don't archive useless preallocated WAL files after a timeline switch
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Avoid <quote>cannot GetMultiXactIdMembers() during recovery</> error
+ (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Recursively <function>fsync()</> the data directory after a crash
+ (Abhijit Menon-Sen, Robert Haas)
+ </para>
+
+ <para>
+ This ensures consistency if another crash occurs shortly later. (The
+ second crash would have to be a system-level crash, not just a database
+ crash, for there to be a problem.)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix autovacuum launcher's possible failure to shut down, if an error
+ occurs after it receives SIGTERM (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Cope with unexpected signals in <function>LockBufferForCleanup()</>
+ (Andres Freund)
+ </para>
+
+ <para>
+ This oversight could result in spurious errors about <quote>multiple
+ backends attempting to wait for pincount 1</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Avoid waiting for WAL flush or synchronous replication during commit of
+ a transaction that was read-only so far as the user is concerned
+ (Andres Freund)
+ </para>
+
+ <para>
+ Previously, a delay could occur at commit in transactions that had
+ written WAL due to HOT page pruning, leading to undesirable effects
+ such as sessions getting stuck at startup if all synchronous replicas
+ are down. Sessions have also been observed to get stuck in catchup
+ interrupt processing when using synchronous replication; this will fix
+ that problem as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix crash when manipulating hash indexes on temporary tables
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix possible failure during hash index bucket split, if other processes
+ are modifying the index concurrently (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Check for interrupts while analyzing index expressions (Jeff Janes)
+ </para>
+
+ <para>
+ <command>ANALYZE</> executes index expressions many times; if there are
+ slow functions in such an expression, it's desirable to be able to
+ cancel the <command>ANALYZE</> before that loop finishes.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Ensure <structfield>tableoid</> of a foreign table is reported
+ correctly when a <literal>READ COMMITTED</> recheck occurs after
+ locking rows in <command>SELECT FOR UPDATE</>, <command>UPDATE</>,
+ or <command>DELETE</> (Etsuro Fujita)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Add the name of the target server to object description strings for
+ foreign-server user mappings (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Recommend setting <literal>include_realm</> to 1 when using
+ Kerberos/GSSAPI/SSPI authentication (Stephen Frost)
+ </para>
+
+ <para>
+ Without this, identically-named users from different realms cannot be
+ distinguished. For the moment this is only a documentation change, but
+ it will become the default setting in <productname>PostgreSQL</> 9.5.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Remove code for matching IPv4 <filename>pg_hba.conf</> entries to
+ IPv4-in-IPv6 addresses (Tom Lane)
+ </para>
+
+ <para>
+ This hack was added in 2003 in response to a report that some Linux
+ kernels of the time would report IPv4 connections as having
+ IPv4-in-IPv6 addresses. However, the logic was accidentally broken in
+ 9.0. The lack of any field complaints since then shows that it's not
+ needed anymore. Now we have reports that the broken code causes
+ crashes on some systems, so let's just remove it rather than fix it.
+ (Had we chosen to fix it, that would make for a subtle and potentially
+ security-sensitive change in the effective meaning of
+ IPv4 <filename>pg_hba.conf</> entries, which does not seem like a good
+ thing to do in minor releases.)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Report WAL flush, not insert, position in <literal>IDENTIFY_SYSTEM</>
+ replication command (Heikki Linnakangas)
+ </para>
+
+ <para>
+ This avoids a possible startup failure
+ in <application>pg_receivexlog</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ While shutting down service on Windows, periodically send status
+ updates to the Service Control Manager to prevent it from killing the
+ service too soon; and ensure that <application>pg_ctl</> will wait for
+ shutdown (Krystian Bigaj)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Reduce risk of network deadlock when using <application>libpq</>'s
+ non-blocking mode (Heikki Linnakangas)
+ </para>
+
+ <para>
+ When sending large volumes of data, it's important to drain the input
+ buffer every so often, in case the server has sent enough response data
+ to cause it to block on output. (A typical scenario is that the server
+ is sending a stream of NOTICE messages during <literal>COPY FROM
+ STDIN</>.) This worked properly in the normal blocking mode, but not
+ so much in non-blocking mode. We've modified <application>libpq</>
+ to opportunistically drain input when it can, but a full defense
+ against this problem requires application cooperation: the application
+ should watch for socket read-ready as well as write-ready conditions,
+ and be sure to call <function>PQconsumeInput()</> upon read-ready.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix array handling in <application>ecpg</> (Michael Meskes)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <application>psql</> to sanely handle URIs and conninfo strings as
+ the first parameter to <command>\connect</>
+ (David Fetter, Andrew Dunstan, &Aacute;lvaro Herrera)
+ </para>
+
+ <para>
+ This syntax has been accepted (but undocumented) for a long time, but
+ previously some parameters might be taken from the old connection
+ instead of the given string, which was agreed to be undesirable.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Suppress incorrect complaints from <application>psql</> on some
+ platforms that it failed to write <filename>~/.psql_history</> at exit
+ (Tom Lane)
+ </para>
+
+ <para>
+ This misbehavior was caused by a workaround for a bug in very old
+ (pre-2006) versions of <application>libedit</>. We fixed it by
+ removing the workaround, which will cause a similar failure to appear
+ for anyone still using such versions of <application>libedit</>.
+ Recommendation: upgrade that library, or use <application>libreadline</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <application>pg_dump</>'s rule for deciding which casts are
+ system-provided casts that should not be dumped (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_dump</>, fix failure to honor <literal>-Z</>
+ compression level option together with <literal>-Fd</>
+ (Michael Paquier)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Make <application>pg_dump</> consider foreign key relationships
+ between extension configuration tables while choosing dump order
+ (Gilles Darold, Michael Paquier, Stephen Frost)
+ </para>
+
+ <para>
+ This oversight could result in producing dumps that fail to reload
+ because foreign key constraints are transiently violated.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix dumping of views that are just <literal>VALUES(...)</> but have
+ column aliases (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, force timeline 1 in the new cluster
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This change prevents upgrade failures caused by bogus complaints about
+ missing WAL history files.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, check for improperly non-connectable
+ databases before proceeding
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, quote directory paths
+ properly in the generated <literal>delete_old_cluster</> script
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, preserve database-level freezing info
+ properly
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This oversight could cause missing-clog-file errors for tables within
+ the <literal>postgres</> and <literal>template1</> databases.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Run <application>pg_upgrade</> and <application>pg_resetxlog</> with
+ restricted privileges on Windows, so that they don't fail when run by
+ an administrator (Muhammad Asif Naeem)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Improve handling of <function>readdir()</> failures when scanning
+ directories in <application>initdb</> and <application>pg_basebackup</>
+ (Marco Nenciarini)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix slow sorting algorithm in <filename>contrib/intarray</> (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix compile failure on Sparc V8 machines (Rob Rowan)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Update time zone data files to <application>tzdata</> release 2015d
+ for DST law changes in Egypt, Mongolia, and Palestine, plus historical
+ changes in Canada and Chile. Also adopt revised zone abbreviations for
+ the America/Adak zone (HST/HDT not HAST/HADT).
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
<sect1 id="release-9-1-15">
<title>Release 9.1.15</title>
diff --git a/doc/src/sgml/release-9.2.sgml b/doc/src/sgml/release-9.2.sgml
index 7bdbd89ae9..ef4ce98e53 100644
--- a/doc/src/sgml/release-9.2.sgml
+++ b/doc/src/sgml/release-9.2.sgml
@@ -1,6 +1,683 @@
<!-- doc/src/sgml/release-9.2.sgml -->
<!-- See header comment in release.sgml about typical markup -->
+ <sect1 id="release-9-2-12">
+ <title>Release 9.2.12</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-06-04</simpara>
+ </note>
+
+ <para>
+ This release contains a small number of fixes from 9.2.11.
+ For information about new features in the 9.2 major release, see
+ <xref linkend="release-9-2">.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.2.12</title>
+
+ <para>
+ A dump/restore is not required for those running 9.2.X.
+ </para>
+
+ <para>
+ However, if you are upgrading from a version earlier than 9.2.11,
+ see <xref linkend="release-9-2-11">.
+ </para>
+
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+ <listitem>
+ <para>
+ Avoid failures while <function>fsync</>'ing data directory during
+ crash restart (Abhijit Menon-Sen, Tom Lane)
+ </para>
+
+ <para>
+ In the previous minor releases we added a patch to <function>fsync</>
+ everything in the data directory after a crash. Unfortunately its
+ response to any error condition was to fail, thereby preventing the
+ server from starting up, even when the problem was quite harmless.
+ An example is that an unwritable file in the data directory would
+ prevent restart on some platforms; but it is common to make SSL
+ certificate files unwritable by the server. Revise this behavior so
+ that permissions failures are ignored altogether, and other types of
+ failures are logged but do not prevent continuing.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <function>pg_get_functiondef()</> to show
+ functions' <literal>LEAKPROOF</> property, if set (Jeevan Chalke)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Remove <application>configure</>'s check prohibiting linking to a
+ threaded <application>libpython</>
+ on <systemitem class="osname">OpenBSD</> (Tom Lane)
+ </para>
+
+ <para>
+ The failure this restriction was meant to prevent seems to not be a
+ problem anymore on current <systemitem class="osname">OpenBSD</>
+ versions.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Allow <application>libpq</> to use TLS protocol versions beyond v1
+ (Noah Misch)
+ </para>
+
+ <para>
+ For a long time, <application>libpq</> was coded so that the only SSL
+ protocol it would allow was TLS v1. Now that newer TLS versions are
+ becoming popular, allow it to negotiate the highest commonly-supported
+ TLS version with the server. (<productname>PostgreSQL</> servers were
+ already capable of such negotiation, so no change is needed on the
+ server side.) This is a back-patch of a change already released in
+ 9.4.0.
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
+ <sect1 id="release-9-2-11">
+ <title>Release 9.2.11</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-05-22</simpara>
+ </note>
+
+ <para>
+ This release contains a variety of fixes from 9.2.10.
+ For information about new features in the 9.2 major release, see
+ <xref linkend="release-9-2">.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.2.11</title>
+
+ <para>
+ A dump/restore is not required for those running 9.2.X.
+ </para>
+
+ <para>
+ However, if you use <filename>contrib/citext</>'s
+ <function>regexp_matches()</> functions, see the changelog entry below
+ about that.
+ </para>
+
+ <para>
+ Also, if you are upgrading from a version earlier than 9.2.10,
+ see <xref linkend="release-9-2-10">.
+ </para>
+
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+ <listitem>
+ <para>
+ Avoid possible crash when client disconnects just before the
+ authentication timeout expires (Benkocs Norbert Attila)
+ </para>
+
+ <para>
+ If the timeout interrupt fired partway through the session shutdown
+ sequence, SSL-related state would be freed twice, typically causing a
+ crash and hence denial of service to other sessions. Experimentation
+ shows that an unauthenticated remote attacker could trigger the bug
+ somewhat consistently, hence treat as security issue.
+ (CVE-2015-3165)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Improve detection of system-call failures (Noah Misch)
+ </para>
+
+ <para>
+ Our replacement implementation of <function>snprintf()</> failed to
+ check for errors reported by the underlying system library calls;
+ the main case that might be missed is out-of-memory situations.
+ In the worst case this might lead to information exposure, due to our
+ code assuming that a buffer had been overwritten when it hadn't been.
+ Also, there were a few places in which security-relevant calls of other
+ system library functions did not check for failure.
+ </para>
+
+ <para>
+ It remains possible that some calls of the <function>*printf()</>
+ family of functions are vulnerable to information disclosure if an
+ out-of-memory error occurs at just the wrong time. We judge the risk
+ to not be large, but will continue analysis in this area.
+ (CVE-2015-3166)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <filename>contrib/pgcrypto</>, uniformly report decryption failures
+ as <quote>Wrong key or corrupt data</> (Noah Misch)
+ </para>
+
+ <para>
+ Previously, some cases of decryption with an incorrect key could report
+ other error message texts. It has been shown that such variance in
+ error reports can aid attackers in recovering keys from other systems.
+ While it's unknown whether <filename>pgcrypto</>'s specific behaviors
+ are likewise exploitable, it seems better to avoid the risk by using a
+ one-size-fits-all message.
+ (CVE-2015-3167)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect declaration of <filename>contrib/citext</>'s
+ <function>regexp_matches()</> functions (Tom Lane)
+ </para>
+
+ <para>
+ These functions should return <type>setof text[]</>, like the core
+ functions they are wrappers for; but they were incorrectly declared as
+ returning just <type>text[]</>. This mistake had two results: first,
+ if there was no match you got a scalar null result, whereas what you
+ should get is an empty set (zero rows). Second, the <literal>g</> flag
+ was effectively ignored, since you would get only one result array even
+ if there were multiple matches.
+ </para>
+
+ <para>
+ While the latter behavior is clearly a bug, there might be applications
+ depending on the former behavior; therefore the function declarations
+ will not be changed by default until <productname>PostgreSQL</> 9.5.
+ In pre-9.5 branches, the old behavior exists in version 1.0 of
+ the <literal>citext</> extension, while we have provided corrected
+ declarations in version 1.1 (which is <emphasis>not</> installed by
+ default). To adopt the fix in pre-9.5 branches, execute
+ <literal>ALTER EXTENSION citext UPDATE TO '1.1'</> in each database in
+ which <literal>citext</> is installed. (You can also <quote>update</>
+ back to 1.0 if you need to undo that.) Be aware that either update
+ direction will require dropping and recreating any views or rules that
+ use <filename>citext</>'s <function>regexp_matches()</> functions.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect checking of deferred exclusion constraints after a HOT
+ update (Tom Lane)
+ </para>
+
+ <para>
+ If a new row that potentially violates a deferred exclusion constraint
+ is HOT-updated (that is, no indexed columns change and the row can be
+ stored back onto the same table page) later in the same transaction,
+ the exclusion constraint would be reported as violated when the check
+ finally occurred, even if the row(s) the new row originally conflicted
+ with had been deleted.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix planning of star-schema-style queries (Tom Lane)
+ </para>
+
+ <para>
+ Sometimes, efficient scanning of a large table requires that index
+ parameters be provided from more than one other table (commonly,
+ dimension tables whose keys are needed to index a large fact table).
+ The planner should be able to find such plans, but an overly
+ restrictive search heuristic prevented it.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Prevent improper reordering of antijoins (NOT EXISTS joins) versus
+ other outer joins (Tom Lane)
+ </para>
+
+ <para>
+ This oversight in the planner has been observed to cause <quote>could
+ not find RelOptInfo for given relids</> errors, but it seems possible
+ that sometimes an incorrect query plan might get past that consistency
+ check and result in silently-wrong query output.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect matching of subexpressions in outer-join plan nodes
+ (Tom Lane)
+ </para>
+
+ <para>
+ Previously, if textually identical non-strict subexpressions were used
+ both above and below an outer join, the planner might try to re-use
+ the value computed below the join, which would be incorrect because the
+ executor would force the value to NULL in case of an unmatched outer row.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix GEQO planner to cope with failure of its join order heuristic
+ (Tom Lane)
+ </para>
+
+ <para>
+ This oversight has been seen to lead to <quote>failed to join all
+ relations together</> errors in queries involving <literal>LATERAL</>,
+ and that might happen in other cases as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix possible deadlock at startup
+ when <literal>max_prepared_transactions</> is too small
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Don't archive useless preallocated WAL files after a timeline switch
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+<!--
+Author: Alvaro Herrera <[email protected]>
+Branch: REL9_2_STABLE [97ff2a564] 2015-05-18 17:44:21 -0300
+Branch: REL9_1_STABLE [2360eea3b] 2015-05-18 17:44:21 -0300
+Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300
+-->
+
+ <listitem>
+ <para>
+ Avoid <quote>cannot GetMultiXactIdMembers() during recovery</> error
+ (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Recursively <function>fsync()</> the data directory after a crash
+ (Abhijit Menon-Sen, Robert Haas)
+ </para>
+
+ <para>
+ This ensures consistency if another crash occurs shortly later. (The
+ second crash would have to be a system-level crash, not just a database
+ crash, for there to be a problem.)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix autovacuum launcher's possible failure to shut down, if an error
+ occurs after it receives SIGTERM (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Cope with unexpected signals in <function>LockBufferForCleanup()</>
+ (Andres Freund)
+ </para>
+
+ <para>
+ This oversight could result in spurious errors about <quote>multiple
+ backends attempting to wait for pincount 1</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix crash when doing <literal>COPY IN</> to a table with check
+ constraints that contain whole-row references (Tom Lane)
+ </para>
+
+ <para>
+ The known failure case only crashes in 9.4 and up, but there is very
+ similar code in 9.3 and 9.2, so back-patch those branches as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Avoid waiting for WAL flush or synchronous replication during commit of
+ a transaction that was read-only so far as the user is concerned
+ (Andres Freund)
+ </para>
+
+ <para>
+ Previously, a delay could occur at commit in transactions that had
+ written WAL due to HOT page pruning, leading to undesirable effects
+ such as sessions getting stuck at startup if all synchronous replicas
+ are down. Sessions have also been observed to get stuck in catchup
+ interrupt processing when using synchronous replication; this will fix
+ that problem as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix crash when manipulating hash indexes on temporary tables
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix possible failure during hash index bucket split, if other processes
+ are modifying the index concurrently (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Check for interrupts while analyzing index expressions (Jeff Janes)
+ </para>
+
+ <para>
+ <command>ANALYZE</> executes index expressions many times; if there are
+ slow functions in such an expression, it's desirable to be able to
+ cancel the <command>ANALYZE</> before that loop finishes.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Ensure <structfield>tableoid</> of a foreign table is reported
+ correctly when a <literal>READ COMMITTED</> recheck occurs after
+ locking rows in <command>SELECT FOR UPDATE</>, <command>UPDATE</>,
+ or <command>DELETE</> (Etsuro Fujita)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Add the name of the target server to object description strings for
+ foreign-server user mappings (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Recommend setting <literal>include_realm</> to 1 when using
+ Kerberos/GSSAPI/SSPI authentication (Stephen Frost)
+ </para>
+
+ <para>
+ Without this, identically-named users from different realms cannot be
+ distinguished. For the moment this is only a documentation change, but
+ it will become the default setting in <productname>PostgreSQL</> 9.5.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Remove code for matching IPv4 <filename>pg_hba.conf</> entries to
+ IPv4-in-IPv6 addresses (Tom Lane)
+ </para>
+
+ <para>
+ This hack was added in 2003 in response to a report that some Linux
+ kernels of the time would report IPv4 connections as having
+ IPv4-in-IPv6 addresses. However, the logic was accidentally broken in
+ 9.0. The lack of any field complaints since then shows that it's not
+ needed anymore. Now we have reports that the broken code causes
+ crashes on some systems, so let's just remove it rather than fix it.
+ (Had we chosen to fix it, that would make for a subtle and potentially
+ security-sensitive change in the effective meaning of
+ IPv4 <filename>pg_hba.conf</> entries, which does not seem like a good
+ thing to do in minor releases.)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Report WAL flush, not insert, position in <literal>IDENTIFY_SYSTEM</>
+ replication command (Heikki Linnakangas)
+ </para>
+
+ <para>
+ This avoids a possible startup failure
+ in <application>pg_receivexlog</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ While shutting down service on Windows, periodically send status
+ updates to the Service Control Manager to prevent it from killing the
+ service too soon; and ensure that <application>pg_ctl</> will wait for
+ shutdown (Krystian Bigaj)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Reduce risk of network deadlock when using <application>libpq</>'s
+ non-blocking mode (Heikki Linnakangas)
+ </para>
+
+ <para>
+ When sending large volumes of data, it's important to drain the input
+ buffer every so often, in case the server has sent enough response data
+ to cause it to block on output. (A typical scenario is that the server
+ is sending a stream of NOTICE messages during <literal>COPY FROM
+ STDIN</>.) This worked properly in the normal blocking mode, but not
+ so much in non-blocking mode. We've modified <application>libpq</>
+ to opportunistically drain input when it can, but a full defense
+ against this problem requires application cooperation: the application
+ should watch for socket read-ready as well as write-ready conditions,
+ and be sure to call <function>PQconsumeInput()</> upon read-ready.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>libpq</>, fix misparsing of empty values in URI
+ connection strings (Thomas Fanghaenel)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix array handling in <application>ecpg</> (Michael Meskes)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <application>psql</> to sanely handle URIs and conninfo strings as
+ the first parameter to <command>\connect</>
+ (David Fetter, Andrew Dunstan, &Aacute;lvaro Herrera)
+ </para>
+
+ <para>
+ This syntax has been accepted (but undocumented) for a long time, but
+ previously some parameters might be taken from the old connection
+ instead of the given string, which was agreed to be undesirable.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Suppress incorrect complaints from <application>psql</> on some
+ platforms that it failed to write <filename>~/.psql_history</> at exit
+ (Tom Lane)
+ </para>
+
+ <para>
+ This misbehavior was caused by a workaround for a bug in very old
+ (pre-2006) versions of <application>libedit</>. We fixed it by
+ removing the workaround, which will cause a similar failure to appear
+ for anyone still using such versions of <application>libedit</>.
+ Recommendation: upgrade that library, or use <application>libreadline</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <application>pg_dump</>'s rule for deciding which casts are
+ system-provided casts that should not be dumped (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_dump</>, fix failure to honor <literal>-Z</>
+ compression level option together with <literal>-Fd</>
+ (Michael Paquier)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Make <application>pg_dump</> consider foreign key relationships
+ between extension configuration tables while choosing dump order
+ (Gilles Darold, Michael Paquier, Stephen Frost)
+ </para>
+
+ <para>
+ This oversight could result in producing dumps that fail to reload
+ because foreign key constraints are transiently violated.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix dumping of views that are just <literal>VALUES(...)</> but have
+ column aliases (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, force timeline 1 in the new cluster
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This change prevents upgrade failures caused by bogus complaints about
+ missing WAL history files.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, check for improperly non-connectable
+ databases before proceeding
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, quote directory paths
+ properly in the generated <literal>delete_old_cluster</> script
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, preserve database-level freezing info
+ properly
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This oversight could cause missing-clog-file errors for tables within
+ the <literal>postgres</> and <literal>template1</> databases.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Run <application>pg_upgrade</> and <application>pg_resetxlog</> with
+ restricted privileges on Windows, so that they don't fail when run by
+ an administrator (Muhammad Asif Naeem)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Improve handling of <function>readdir()</> failures when scanning
+ directories in <application>initdb</> and <application>pg_basebackup</>
+ (Marco Nenciarini)
+ </para>
+ </listitem>
+
+<!--
+Author: Andres Freund <[email protected]>
+Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100
+-->
+
+ <listitem>
+ <para>
+ Fix failure in <application>pg_receivexlog</> (Andres Freund)
+ </para>
+
+ <para>
+ A patch merge mistake in 9.2.10 led to <quote>could not create archive
+ status file</> errors.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix slow sorting algorithm in <filename>contrib/intarray</> (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix compile failure on Sparc V8 machines (Rob Rowan)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Update time zone data files to <application>tzdata</> release 2015d
+ for DST law changes in Egypt, Mongolia, and Palestine, plus historical
+ changes in Canada and Chile. Also adopt revised zone abbreviations for
+ the America/Adak zone (HST/HDT not HAST/HADT).
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
<sect1 id="release-9-2-10">
<title>Release 9.2.10</title>
diff --git a/doc/src/sgml/release-9.3.sgml b/doc/src/sgml/release-9.3.sgml
index b4fa3845d3..8f1bc7e147 100644
--- a/doc/src/sgml/release-9.3.sgml
+++ b/doc/src/sgml/release-9.3.sgml
@@ -1,6 +1,704 @@
<!-- doc/src/sgml/release-9.3.sgml -->
<!-- See header comment in release.sgml about typical markup -->
+ <sect1 id="release-9-3-8">
+ <title>Release 9.3.8</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-06-04</simpara>
+ </note>
+
+ <para>
+ This release contains a small number of fixes from 9.3.7.
+ For information about new features in the 9.3 major release, see
+ <xref linkend="release-9-3">.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.3.8</title>
+
+ <para>
+ A dump/restore is not required for those running 9.3.X.
+ </para>
+
+ <para>
+ However, if you are upgrading from a version earlier than 9.3.7,
+ see <xref linkend="release-9-3-7">.
+ </para>
+
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+ <listitem>
+ <para>
+ Avoid failures while <function>fsync</>'ing data directory during
+ crash restart (Abhijit Menon-Sen, Tom Lane)
+ </para>
+
+ <para>
+ In the previous minor releases we added a patch to <function>fsync</>
+ everything in the data directory after a crash. Unfortunately its
+ response to any error condition was to fail, thereby preventing the
+ server from starting up, even when the problem was quite harmless.
+ An example is that an unwritable file in the data directory would
+ prevent restart on some platforms; but it is common to make SSL
+ certificate files unwritable by the server. Revise this behavior so
+ that permissions failures are ignored altogether, and other types of
+ failures are logged but do not prevent continuing.
+ </para>
+
+ <para>
+ Also apply the same rules in <literal>initdb --sync-only</>.
+ This case is less critical but it should act similarly.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <function>pg_get_functiondef()</> to show
+ functions' <literal>LEAKPROOF</> property, if set (Jeevan Chalke)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Remove <application>configure</>'s check prohibiting linking to a
+ threaded <application>libpython</>
+ on <systemitem class="osname">OpenBSD</> (Tom Lane)
+ </para>
+
+ <para>
+ The failure this restriction was meant to prevent seems to not be a
+ problem anymore on current <systemitem class="osname">OpenBSD</>
+ versions.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: REL9_3_STABLE [c6b7b9a9c] 2015-05-21 20:41:55 -0400
+Branch: REL9_2_STABLE [b78fbfe65] 2015-05-21 20:41:55 -0400
+Branch: REL9_1_STABLE [2c2c5f0e0] 2015-05-21 20:41:55 -0400
+Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400
+-->
+
+ <listitem>
+ <para>
+ Allow <application>libpq</> to use TLS protocol versions beyond v1
+ (Noah Misch)
+ </para>
+
+ <para>
+ For a long time, <application>libpq</> was coded so that the only SSL
+ protocol it would allow was TLS v1. Now that newer TLS versions are
+ becoming popular, allow it to negotiate the highest commonly-supported
+ TLS version with the server. (<productname>PostgreSQL</> servers were
+ already capable of such negotiation, so no change is needed on the
+ server side.) This is a back-patch of a change already released in
+ 9.4.0.
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
+ <sect1 id="release-9-3-7">
+ <title>Release 9.3.7</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-05-22</simpara>
+ </note>
+
+ <para>
+ This release contains a variety of fixes from 9.3.6.
+ For information about new features in the 9.3 major release, see
+ <xref linkend="release-9-3">.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.3.7</title>
+
+ <para>
+ A dump/restore is not required for those running 9.3.X.
+ </para>
+
+ <para>
+ However, if you use <filename>contrib/citext</>'s
+ <function>regexp_matches()</> functions, see the changelog entry below
+ about that.
+ </para>
+
+ <para>
+ Also, if you are upgrading from a version earlier than 9.3.6,
+ see <xref linkend="release-9-3-6">.
+ </para>
+
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+ <listitem>
+ <para>
+ Avoid possible crash when client disconnects just before the
+ authentication timeout expires (Benkocs Norbert Attila)
+ </para>
+
+ <para>
+ If the timeout interrupt fired partway through the session shutdown
+ sequence, SSL-related state would be freed twice, typically causing a
+ crash and hence denial of service to other sessions. Experimentation
+ shows that an unauthenticated remote attacker could trigger the bug
+ somewhat consistently, hence treat as security issue.
+ (CVE-2015-3165)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Improve detection of system-call failures (Noah Misch)
+ </para>
+
+ <para>
+ Our replacement implementation of <function>snprintf()</> failed to
+ check for errors reported by the underlying system library calls;
+ the main case that might be missed is out-of-memory situations.
+ In the worst case this might lead to information exposure, due to our
+ code assuming that a buffer had been overwritten when it hadn't been.
+ Also, there were a few places in which security-relevant calls of other
+ system library functions did not check for failure.
+ </para>
+
+ <para>
+ It remains possible that some calls of the <function>*printf()</>
+ family of functions are vulnerable to information disclosure if an
+ out-of-memory error occurs at just the wrong time. We judge the risk
+ to not be large, but will continue analysis in this area.
+ (CVE-2015-3166)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <filename>contrib/pgcrypto</>, uniformly report decryption failures
+ as <quote>Wrong key or corrupt data</> (Noah Misch)
+ </para>
+
+ <para>
+ Previously, some cases of decryption with an incorrect key could report
+ other error message texts. It has been shown that such variance in
+ error reports can aid attackers in recovering keys from other systems.
+ While it's unknown whether <filename>pgcrypto</>'s specific behaviors
+ are likewise exploitable, it seems better to avoid the risk by using a
+ one-size-fits-all message.
+ (CVE-2015-3167)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Protect against wraparound of multixact member IDs
+ (&Aacute;lvaro Herrera, Robert Haas, Thomas Munro)
+ </para>
+
+ <para>
+ Under certain usage patterns, the existing defenses against this might
+ be insufficient, allowing <filename>pg_multixact/members</> files to be
+ removed too early, resulting in data loss.
+ The fix for this includes modifying the server to fail transactions
+ that would result in overwriting old multixact member ID data, and
+ improving autovacuum to ensure it will act proactively to prevent
+ multixact member ID wraparound, as it does for transaction ID
+ wraparound.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect declaration of <filename>contrib/citext</>'s
+ <function>regexp_matches()</> functions (Tom Lane)
+ </para>
+
+ <para>
+ These functions should return <type>setof text[]</>, like the core
+ functions they are wrappers for; but they were incorrectly declared as
+ returning just <type>text[]</>. This mistake had two results: first,
+ if there was no match you got a scalar null result, whereas what you
+ should get is an empty set (zero rows). Second, the <literal>g</> flag
+ was effectively ignored, since you would get only one result array even
+ if there were multiple matches.
+ </para>
+
+ <para>
+ While the latter behavior is clearly a bug, there might be applications
+ depending on the former behavior; therefore the function declarations
+ will not be changed by default until <productname>PostgreSQL</> 9.5.
+ In pre-9.5 branches, the old behavior exists in version 1.0 of
+ the <literal>citext</> extension, while we have provided corrected
+ declarations in version 1.1 (which is <emphasis>not</> installed by
+ default). To adopt the fix in pre-9.5 branches, execute
+ <literal>ALTER EXTENSION citext UPDATE TO '1.1'</> in each database in
+ which <literal>citext</> is installed. (You can also <quote>update</>
+ back to 1.0 if you need to undo that.) Be aware that either update
+ direction will require dropping and recreating any views or rules that
+ use <filename>citext</>'s <function>regexp_matches()</> functions.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect checking of deferred exclusion constraints after a HOT
+ update (Tom Lane)
+ </para>
+
+ <para>
+ If a new row that potentially violates a deferred exclusion constraint
+ is HOT-updated (that is, no indexed columns change and the row can be
+ stored back onto the same table page) later in the same transaction,
+ the exclusion constraint would be reported as violated when the check
+ finally occurred, even if the row(s) the new row originally conflicted
+ with had been deleted.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix planning of star-schema-style queries (Tom Lane)
+ </para>
+
+ <para>
+ Sometimes, efficient scanning of a large table requires that index
+ parameters be provided from more than one other table (commonly,
+ dimension tables whose keys are needed to index a large fact table).
+ The planner should be able to find such plans, but an overly
+ restrictive search heuristic prevented it.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Prevent improper reordering of antijoins (NOT EXISTS joins) versus
+ other outer joins (Tom Lane)
+ </para>
+
+ <para>
+ This oversight in the planner has been observed to cause <quote>could
+ not find RelOptInfo for given relids</> errors, but it seems possible
+ that sometimes an incorrect query plan might get past that consistency
+ check and result in silently-wrong query output.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix incorrect matching of subexpressions in outer-join plan nodes
+ (Tom Lane)
+ </para>
+
+ <para>
+ Previously, if textually identical non-strict subexpressions were used
+ both above and below an outer join, the planner might try to re-use
+ the value computed below the join, which would be incorrect because the
+ executor would force the value to NULL in case of an unmatched outer row.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix GEQO planner to cope with failure of its join order heuristic
+ (Tom Lane)
+ </para>
+
+ <para>
+ This oversight has been seen to lead to <quote>failed to join all
+ relations together</> errors in queries involving <literal>LATERAL</>,
+ and that might happen in other cases as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix possible deadlock at startup
+ when <literal>max_prepared_transactions</> is too small
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Don't archive useless preallocated WAL files after a timeline switch
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Recursively <function>fsync()</> the data directory after a crash
+ (Abhijit Menon-Sen, Robert Haas)
+ </para>
+
+ <para>
+ This ensures consistency if another crash occurs shortly later. (The
+ second crash would have to be a system-level crash, not just a database
+ crash, for there to be a problem.)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix autovacuum launcher's possible failure to shut down, if an error
+ occurs after it receives SIGTERM (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Cope with unexpected signals in <function>LockBufferForCleanup()</>
+ (Andres Freund)
+ </para>
+
+ <para>
+ This oversight could result in spurious errors about <quote>multiple
+ backends attempting to wait for pincount 1</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix crash when doing <literal>COPY IN</> to a table with check
+ constraints that contain whole-row references (Tom Lane)
+ </para>
+
+ <para>
+ The known failure case only crashes in 9.4 and up, but there is very
+ similar code in 9.3 and 9.2, so back-patch those branches as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Avoid waiting for WAL flush or synchronous replication during commit of
+ a transaction that was read-only so far as the user is concerned
+ (Andres Freund)
+ </para>
+
+ <para>
+ Previously, a delay could occur at commit in transactions that had
+ written WAL due to HOT page pruning, leading to undesirable effects
+ such as sessions getting stuck at startup if all synchronous replicas
+ are down. Sessions have also been observed to get stuck in catchup
+ interrupt processing when using synchronous replication; this will fix
+ that problem as well.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix crash when manipulating hash indexes on temporary tables
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix possible failure during hash index bucket split, if other processes
+ are modifying the index concurrently (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Check for interrupts while analyzing index expressions (Jeff Janes)
+ </para>
+
+ <para>
+ <command>ANALYZE</> executes index expressions many times; if there are
+ slow functions in such an expression, it's desirable to be able to
+ cancel the <command>ANALYZE</> before that loop finishes.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Ensure <structfield>tableoid</> of a foreign table is reported
+ correctly when a <literal>READ COMMITTED</> recheck occurs after
+ locking rows in <command>SELECT FOR UPDATE</>, <command>UPDATE</>,
+ or <command>DELETE</> (Etsuro Fujita)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Add the name of the target server to object description strings for
+ foreign-server user mappings (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Include the schema name in object identity strings for conversions
+ (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Recommend setting <literal>include_realm</> to 1 when using
+ Kerberos/GSSAPI/SSPI authentication (Stephen Frost)
+ </para>
+
+ <para>
+ Without this, identically-named users from different realms cannot be
+ distinguished. For the moment this is only a documentation change, but
+ it will become the default setting in <productname>PostgreSQL</> 9.5.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Remove code for matching IPv4 <filename>pg_hba.conf</> entries to
+ IPv4-in-IPv6 addresses (Tom Lane)
+ </para>
+
+ <para>
+ This hack was added in 2003 in response to a report that some Linux
+ kernels of the time would report IPv4 connections as having
+ IPv4-in-IPv6 addresses. However, the logic was accidentally broken in
+ 9.0. The lack of any field complaints since then shows that it's not
+ needed anymore. Now we have reports that the broken code causes
+ crashes on some systems, so let's just remove it rather than fix it.
+ (Had we chosen to fix it, that would make for a subtle and potentially
+ security-sensitive change in the effective meaning of
+ IPv4 <filename>pg_hba.conf</> entries, which does not seem like a good
+ thing to do in minor releases.)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Report WAL flush, not insert, position in <literal>IDENTIFY_SYSTEM</>
+ replication command (Heikki Linnakangas)
+ </para>
+
+ <para>
+ This avoids a possible startup failure
+ in <application>pg_receivexlog</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ While shutting down service on Windows, periodically send status
+ updates to the Service Control Manager to prevent it from killing the
+ service too soon; and ensure that <application>pg_ctl</> will wait for
+ shutdown (Krystian Bigaj)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Reduce risk of network deadlock when using <application>libpq</>'s
+ non-blocking mode (Heikki Linnakangas)
+ </para>
+
+ <para>
+ When sending large volumes of data, it's important to drain the input
+ buffer every so often, in case the server has sent enough response data
+ to cause it to block on output. (A typical scenario is that the server
+ is sending a stream of NOTICE messages during <literal>COPY FROM
+ STDIN</>.) This worked properly in the normal blocking mode, but not
+ so much in non-blocking mode. We've modified <application>libpq</>
+ to opportunistically drain input when it can, but a full defense
+ against this problem requires application cooperation: the application
+ should watch for socket read-ready as well as write-ready conditions,
+ and be sure to call <function>PQconsumeInput()</> upon read-ready.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>libpq</>, fix misparsing of empty values in URI
+ connection strings (Thomas Fanghaenel)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix array handling in <application>ecpg</> (Michael Meskes)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <application>psql</> to sanely handle URIs and conninfo strings as
+ the first parameter to <command>\connect</>
+ (David Fetter, Andrew Dunstan, &Aacute;lvaro Herrera)
+ </para>
+
+ <para>
+ This syntax has been accepted (but undocumented) for a long time, but
+ previously some parameters might be taken from the old connection
+ instead of the given string, which was agreed to be undesirable.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Suppress incorrect complaints from <application>psql</> on some
+ platforms that it failed to write <filename>~/.psql_history</> at exit
+ (Tom Lane)
+ </para>
+
+ <para>
+ This misbehavior was caused by a workaround for a bug in very old
+ (pre-2006) versions of <application>libedit</>. We fixed it by
+ removing the workaround, which will cause a similar failure to appear
+ for anyone still using such versions of <application>libedit</>.
+ Recommendation: upgrade that library, or use <application>libreadline</>.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix <application>pg_dump</>'s rule for deciding which casts are
+ system-provided casts that should not be dumped (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_dump</>, fix failure to honor <literal>-Z</>
+ compression level option together with <literal>-Fd</>
+ (Michael Paquier)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Make <application>pg_dump</> consider foreign key relationships
+ between extension configuration tables while choosing dump order
+ (Gilles Darold, Michael Paquier, Stephen Frost)
+ </para>
+
+ <para>
+ This oversight could result in producing dumps that fail to reload
+ because foreign key constraints are transiently violated.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Avoid possible <application>pg_dump</> failure when concurrent sessions
+ are creating and dropping temporary functions (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix dumping of views that are just <literal>VALUES(...)</> but have
+ column aliases (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, force timeline 1 in the new cluster
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This change prevents upgrade failures caused by bogus complaints about
+ missing WAL history files.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, check for improperly non-connectable
+ databases before proceeding
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, quote directory paths
+ properly in the generated <literal>delete_old_cluster</> script
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, preserve database-level freezing info
+ properly
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This oversight could cause missing-clog-file errors for tables within
+ the <literal>postgres</> and <literal>template1</> databases.
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Run <application>pg_upgrade</> and <application>pg_resetxlog</> with
+ restricted privileges on Windows, so that they don't fail when run by
+ an administrator (Muhammad Asif Naeem)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Improve handling of <function>readdir()</> failures when scanning
+ directories in <application>initdb</> and <application>pg_basebackup</>
+ (Marco Nenciarini)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix slow sorting algorithm in <filename>contrib/intarray</> (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Fix compile failure on Sparc V8 machines (Rob Rowan)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Silence some build warnings on OS X (Tom Lane)
+ </para>
+ </listitem>
+
+ <listitem>
+ <para>
+ Update time zone data files to <application>tzdata</> release 2015d
+ for DST law changes in Egypt, Mongolia, and Palestine, plus historical
+ changes in Canada and Chile. Also adopt revised zone abbreviations for
+ the America/Adak zone (HST/HDT not HAST/HADT).
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
<sect1 id="release-9-3-6">
<title>Release 9.3.6</title>
diff --git a/doc/src/sgml/release-9.4.sgml b/doc/src/sgml/release-9.4.sgml
index 3e15bb6cad..a96cd8dce2 100644
--- a/doc/src/sgml/release-9.4.sgml
+++ b/doc/src/sgml/release-9.4.sgml
@@ -1,6 +1,1430 @@
<!-- doc/src/sgml/release-9.4.sgml -->
<!-- See header comment in release.sgml about typical markup -->
+ <sect1 id="release-9-4-3">
+ <title>Release 9.4.3</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-06-04</simpara>
+ </note>
+
+ <para>
+ This release contains a small number of fixes from 9.4.2.
+ For information about new features in the 9.4 major release, see
+ <xref linkend="release-9-4">.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.4.3</title>
+
+ <para>
+ A dump/restore is not required for those running 9.4.X.
+ </para>
+
+ <para>
+ However, if you are upgrading from a version earlier than 9.4.2,
+ see <xref linkend="release-9-4-2">.
+ </para>
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [d8179b001] 2015-05-28 17:33:03 -0400
+Branch: REL9_4_STABLE [a3ae3db43] 2015-05-28 17:33:03 -0400
+Branch: REL9_3_STABLE [81f3d3b7c] 2015-05-28 17:33:03 -0400
+Branch: REL9_2_STABLE [aa8377e64] 2015-05-28 17:33:03 -0400
+Branch: REL9_1_STABLE [cb867853a] 2015-05-28 17:33:03 -0400
+Branch: REL9_0_STABLE [e4a04e5a4] 2015-05-28 17:33:03 -0400
+Author: Tom Lane <[email protected]>
+Branch: master [c07d8c963] 2015-05-29 13:05:16 -0400
+Branch: REL9_4_STABLE [dbde225b1] 2015-05-29 13:05:16 -0400
+Branch: REL9_3_STABLE [52fc94844] 2015-05-29 13:05:16 -0400
+Author: Tom Lane <[email protected]>
+Branch: master [57e1138bc] 2015-05-29 15:11:36 -0400
+Branch: REL9_4_STABLE [70a4519b8] 2015-05-29 15:11:36 -0400
+Branch: REL9_3_STABLE [35dd1b51f] 2015-05-29 15:11:36 -0400
+Branch: REL9_2_STABLE [77642a819] 2015-05-29 15:11:36 -0400
+Branch: REL9_1_STABLE [9e79ca326] 2015-05-29 15:11:36 -0400
+Branch: REL9_0_STABLE [bf2f20c08] 2015-05-29 15:11:36 -0400
+Author: Tom Lane <[email protected]>
+Branch: master [1943c000b] 2015-05-29 17:02:58 -0400
+Branch: REL9_4_STABLE [99f50dd72] 2015-05-29 17:02:58 -0400
+Branch: REL9_3_STABLE [c2b68b1f7] 2015-05-29 17:02:58 -0400
+-->
+
+ <listitem>
+ <para>
+ Avoid failures while <function>fsync</>'ing data directory during
+ crash restart (Abhijit Menon-Sen, Tom Lane)
+ </para>
+
+ <para>
+ In the previous minor releases we added a patch to <function>fsync</>
+ everything in the data directory after a crash. Unfortunately its
+ response to any error condition was to fail, thereby preventing the
+ server from starting up, even when the problem was quite harmless.
+ An example is that an unwritable file in the data directory would
+ prevent restart on some platforms; but it is common to make SSL
+ certificate files unwritable by the server. Revise this behavior so
+ that permissions failures are ignored altogether, and other types of
+ failures are logged but do not prevent continuing.
+ </para>
+
+ <para>
+ Also apply the same rules in <literal>initdb --sync-only</>.
+ This case is less critical but it should act similarly.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [f46edf479] 2015-05-28 11:24:37 -0400
+Branch: REL9_4_STABLE [d4a9f5519] 2015-05-28 11:24:37 -0400
+Branch: REL9_3_STABLE [27bae8d96] 2015-05-28 11:24:37 -0400
+Branch: REL9_2_STABLE [f3c67aad4] 2015-05-28 11:24:37 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix <function>pg_get_functiondef()</> to show
+ functions' <literal>LEAKPROOF</> property, if set (Jeevan Chalke)
+ </para>
+ </listitem>
+
+<!--
+Author: Andrew Dunstan <[email protected]>
+Branch: master [5302760a5] 2015-05-22 10:21:41 -0400
+Branch: REL9_4_STABLE [9b74f32cd] 2015-05-22 10:31:29 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix <function>pushJsonbValue()</> to unpack <type>jbvBinary</>
+ objects (Andrew Dunstan)
+ </para>
+
+ <para>
+ This change does not affect any behavior in the core code as of 9.4,
+ but it avoids a corner case for possible third-party callers.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [86832eb89] 2015-05-26 22:14:59 -0400
+Branch: REL9_4_STABLE [79f0f7cab] 2015-05-26 22:14:59 -0400
+Branch: REL9_3_STABLE [9e980e7d0] 2015-05-26 22:14:59 -0400
+Branch: REL9_2_STABLE [1b1457120] 2015-05-26 22:14:59 -0400
+Branch: REL9_1_STABLE [5585cc707] 2015-05-26 22:14:59 -0400
+Branch: REL9_0_STABLE [b06649b7f] 2015-05-26 22:15:00 -0400
+-->
+
+ <listitem>
+ <para>
+ Remove <application>configure</>'s check prohibiting linking to a
+ threaded <application>libpython</>
+ on <systemitem class="osname">OpenBSD</> (Tom Lane)
+ </para>
+
+ <para>
+ The failure this restriction was meant to prevent seems to not be a
+ problem anymore on current <systemitem class="osname">OpenBSD</>
+ versions.
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
+ <sect1 id="release-9-4-2">
+ <title>Release 9.4.2</title>
+
+ <note>
+ <title>Release Date</title>
+ <simpara>2015-05-22</simpara>
+ </note>
+
+ <para>
+ This release contains a variety of fixes from 9.4.1.
+ For information about new features in the 9.4 major release, see
+ <xref linkend="release-9-4">.
+ </para>
+
+ <sect2>
+ <title>Migration to Version 9.4.2</title>
+
+ <para>
+ A dump/restore is not required for those running 9.4.X.
+ </para>
+
+ <para>
+ However, if you use <filename>contrib/citext</>'s
+ <function>regexp_matches()</> functions, see the changelog entry below
+ about that.
+ </para>
+
+ <para>
+ Also, if you are upgrading from a version earlier than 9.4.1,
+ see <xref linkend="release-9-4-1">.
+ </para>
+ </sect2>
+
+ <sect2>
+ <title>Changes</title>
+
+ <itemizedlist>
+
+<!--
+Author: Noah Misch <[email protected]>
+Branch: master [b0ce38503] 2015-05-18 10:02:31 -0400
+Branch: REL9_4_STABLE [7a0d48ac7] 2015-05-18 10:02:35 -0400
+Branch: REL9_3_STABLE [f4c12b415] 2015-05-18 10:02:36 -0400
+Branch: REL9_2_STABLE [439ff9b6b] 2015-05-18 10:02:37 -0400
+Branch: REL9_1_STABLE [6675ab595] 2015-05-18 10:02:38 -0400
+Branch: REL9_0_STABLE [648e41a6e] 2015-05-18 10:02:38 -0400
+-->
+
+ <listitem>
+ <para>
+ Avoid possible crash when client disconnects just before the
+ authentication timeout expires (Benkocs Norbert Attila)
+ </para>
+
+ <para>
+ If the timeout interrupt fired partway through the session shutdown
+ sequence, SSL-related state would be freed twice, typically causing a
+ crash and hence denial of service to other sessions. Experimentation
+ shows that an unauthenticated remote attacker could trigger the bug
+ somewhat consistently, hence treat as security issue.
+ (CVE-2015-3165)
+ </para>
+ </listitem>
+
+<!--
+Author: Noah Misch <[email protected]>
+Branch: master [cac18a76b] 2015-05-18 10:02:31 -0400
+Branch: REL9_4_STABLE [f7c4fe7d9] 2015-05-18 10:02:35 -0400
+Branch: REL9_3_STABLE [d5abbd114] 2015-05-18 10:02:36 -0400
+Branch: REL9_2_STABLE [1e6652aea] 2015-05-18 10:02:37 -0400
+Branch: REL9_1_STABLE [b544dcdad] 2015-05-18 10:02:38 -0400
+Branch: REL9_0_STABLE [19f7adc01] 2015-05-18 10:02:38 -0400
+Author: Noah Misch <[email protected]>
+Branch: master [16304a013] 2015-05-18 10:02:31 -0400
+Branch: REL9_4_STABLE [2e3bd0665] 2015-05-18 10:02:35 -0400
+Branch: REL9_3_STABLE [34d21e770] 2015-05-18 10:02:36 -0400
+Branch: REL9_2_STABLE [82b7393eb] 2015-05-18 10:02:37 -0400
+Branch: REL9_1_STABLE [e58f042d9] 2015-05-18 10:02:38 -0400
+Branch: REL9_0_STABLE [b08c7aff7] 2015-05-18 10:02:38 -0400
+Author: Noah Misch <[email protected]>
+Branch: master [fd97bd411] 2015-05-18 10:02:31 -0400
+Branch: REL9_4_STABLE [ca325941d] 2015-05-18 10:02:35 -0400
+Branch: REL9_3_STABLE [c669915fd] 2015-05-18 10:02:37 -0400
+Branch: REL9_2_STABLE [01272d95a] 2015-05-18 10:02:37 -0400
+Branch: REL9_1_STABLE [2cb9f2cab] 2015-05-18 10:02:38 -0400
+Branch: REL9_0_STABLE [9b5e831e3] 2015-05-18 10:02:38 -0400
+Author: Tom Lane <[email protected]>
+Branch: master [0c071936e] 2015-05-19 18:19:38 -0400
+Branch: REL9_4_STABLE [2eb2fcd56] 2015-05-19 18:16:19 -0400
+Branch: REL9_3_STABLE [13341276e] 2015-05-19 18:16:58 -0400
+Branch: REL9_2_STABLE [221f7a949] 2015-05-19 18:17:42 -0400
+Branch: REL9_1_STABLE [0510cff6e] 2015-05-19 18:18:16 -0400
+Branch: REL9_0_STABLE [cf893530a] 2015-05-19 18:18:56 -0400
+-->
+
+ <listitem>
+ <para>
+ Improve detection of system-call failures (Noah Misch)
+ </para>
+
+ <para>
+ Our replacement implementation of <function>snprintf()</> failed to
+ check for errors reported by the underlying system library calls;
+ the main case that might be missed is out-of-memory situations.
+ In the worst case this might lead to information exposure, due to our
+ code assuming that a buffer had been overwritten when it hadn't been.
+ Also, there were a few places in which security-relevant calls of other
+ system library functions did not check for failure.
+ </para>
+
+ <para>
+ It remains possible that some calls of the <function>*printf()</>
+ family of functions are vulnerable to information disclosure if an
+ out-of-memory error occurs at just the wrong time. We judge the risk
+ to not be large, but will continue analysis in this area.
+ (CVE-2015-3166)
+ </para>
+ </listitem>
+
+<!--
+Author: Noah Misch <[email protected]>
+Branch: master [85270ac7a] 2015-05-18 10:02:31 -0400
+Branch: REL9_4_STABLE [fba1fb4ef] 2015-05-18 10:02:35 -0400
+Branch: REL9_3_STABLE [7b758b7d6] 2015-05-18 10:02:37 -0400
+Branch: REL9_2_STABLE [0ba200431] 2015-05-18 10:02:37 -0400
+Branch: REL9_1_STABLE [e5981aebd] 2015-05-18 10:02:38 -0400
+Branch: REL9_0_STABLE [b84e5c017] 2015-05-18 10:02:39 -0400
+-->
+
+ <listitem>
+ <para>
+ In <filename>contrib/pgcrypto</>, uniformly report decryption failures
+ as <quote>Wrong key or corrupt data</> (Noah Misch)
+ </para>
+
+ <para>
+ Previously, some cases of decryption with an incorrect key could report
+ other error message texts. It has been shown that such variance in
+ error reports can aid attackers in recovering keys from other systems.
+ While it's unknown whether <filename>pgcrypto</>'s specific behaviors
+ are likewise exploitable, it seems better to avoid the risk by using a
+ one-size-fits-all message.
+ (CVE-2015-3167)
+ </para>
+ </listitem>
+
+<!--
+Author: Alvaro Herrera <[email protected]>
+Branch: master [b69bf30b9] 2015-04-28 11:32:53 -0300
+Branch: REL9_4_STABLE [942542cbb] 2015-04-28 11:32:53 -0300
+Branch: REL9_3_STABLE [e2eda4b11] 2015-04-28 11:32:53 -0300
+Author: Alvaro Herrera <[email protected]>
+Branch: master [669c7d20e] 2015-04-30 13:55:06 -0300
+Branch: REL9_4_STABLE [7140e11d8] 2015-04-30 13:55:06 -0300
+Branch: REL9_3_STABLE [e60581fdf] 2015-04-30 13:55:06 -0300
+Author: Robert Haas <[email protected]>
+Branch: master [7be47c56a] 2015-05-07 11:19:31 -0400
+Branch: REL9_4_STABLE [32c50af4c] 2015-05-07 11:13:55 -0400
+Branch: REL9_3_STABLE [83fbd9b59] 2015-05-07 11:16:41 -0400
+Author: Robert Haas <[email protected]>
+Branch: master [312747c22] 2015-05-10 21:34:26 -0400
+Branch: REL9_4_STABLE [7b3f0f8b8] 2015-05-10 21:47:28 -0400
+Branch: REL9_3_STABLE [24aa77ec9] 2015-05-10 21:47:41 -0400
+Author: Robert Haas <[email protected]>
+Branch: master [f6a6c46d7] 2015-05-10 22:21:20 -0400
+Branch: REL9_4_STABLE [ded891916] 2015-05-10 22:45:27 -0400
+Branch: REL9_3_STABLE [5bbac7ec1] 2015-05-10 22:45:42 -0400
+Author: Robert Haas <[email protected]>
+Branch: master [53bb309d2] 2015-05-08 12:53:00 -0400
+Branch: REL9_4_STABLE [3ecab37d9] 2015-05-08 12:53:30 -0400
+Branch: REL9_3_STABLE [596fb5aa7] 2015-05-08 12:55:14 -0400
+Author: Robert Haas <[email protected]>
+Branch: master [04e6d3b87] 2015-05-11 10:51:14 -0400
+Branch: REL9_4_STABLE [8ec1a3a54] 2015-05-11 10:56:19 -0400
+Branch: REL9_3_STABLE [543fbecee] 2015-05-11 10:56:32 -0400
+Author: Robert Haas <[email protected]>
+Branch: master [b4d4ce1d5] 2015-05-11 12:15:50 -0400
+Branch: REL9_4_STABLE [ea70595a3] 2015-05-11 12:16:35 -0400
+Branch: REL9_3_STABLE [ddebd2119] 2015-05-11 12:16:51 -0400
+-->
+
+ <listitem>
+ <para>
+ Protect against wraparound of multixact member IDs
+ (&Aacute;lvaro Herrera, Robert Haas, Thomas Munro)
+ </para>
+
+ <para>
+ Under certain usage patterns, the existing defenses against this might
+ be insufficient, allowing <filename>pg_multixact/members</> files to be
+ removed too early, resulting in data loss.
+ The fix for this includes modifying the server to fail transactions
+ that would result in overwriting old multixact member ID data, and
+ improving autovacuum to ensure it will act proactively to prevent
+ multixact member ID wraparound, as it does for transaction ID
+ wraparound.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [b22527f29] 2015-05-05 15:51:22 -0400
+Branch: REL9_4_STABLE [b1ec45994] 2015-05-05 15:50:53 -0400
+Branch: REL9_3_STABLE [ffac9f65d] 2015-05-05 15:50:53 -0400
+Branch: REL9_2_STABLE [d4070d10c] 2015-05-05 15:50:53 -0400
+Branch: REL9_1_STABLE [801e250a8] 2015-05-05 15:50:53 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix incorrect declaration of <filename>contrib/citext</>'s
+ <function>regexp_matches()</> functions (Tom Lane)
+ </para>
+
+ <para>
+ These functions should return <type>setof text[]</>, like the core
+ functions they are wrappers for; but they were incorrectly declared as
+ returning just <type>text[]</>. This mistake had two results: first,
+ if there was no match you got a scalar null result, whereas what you
+ should get is an empty set (zero rows). Second, the <literal>g</> flag
+ was effectively ignored, since you would get only one result array even
+ if there were multiple matches.
+ </para>
+
+ <para>
+ While the latter behavior is clearly a bug, there might be applications
+ depending on the former behavior; therefore the function declarations
+ will not be changed by default until <productname>PostgreSQL</> 9.5.
+ In pre-9.5 branches, the old behavior exists in version 1.0 of
+ the <literal>citext</> extension, while we have provided corrected
+ declarations in version 1.1 (which is <emphasis>not</> installed by
+ default). To adopt the fix in pre-9.5 branches, execute
+ <literal>ALTER EXTENSION citext UPDATE TO '1.1'</> in each database in
+ which <literal>citext</> is installed. (You can also <quote>update</>
+ back to 1.0 if you need to undo that.) Be aware that either update
+ direction will require dropping and recreating any views or rules that
+ use <filename>citext</>'s <function>regexp_matches()</> functions.
+ </para>
+ </listitem>
+
+<!--
+Author: Andrew Dunstan <[email protected]>
+Branch: master [bda76c1c8] 2015-02-26 12:25:21 -0500
+Branch: REL9_4_STABLE [79afe6e66] 2015-02-26 12:34:43 -0500
+-->
+
+ <listitem>
+ <para>
+ Render infinite dates and timestamps as <literal>infinity</> when
+ converting to <type>json</>, rather than throwing an error
+ (Andrew Dunstan)
+ </para>
+ </listitem>
+
+<!--
+Author: Andrew Dunstan <[email protected]>
+Branch: master [3c000fd9a] 2015-05-04 12:38:58 -0400
+Branch: REL9_4_STABLE [997066f44] 2015-05-04 12:43:16 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix <type>json</>/<type>jsonb</>'s <function>populate_record()</>
+ and <function>to_record()</> functions to handle empty input properly
+ (Andrew Dunstan)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [20781765f] 2015-05-11 12:25:43 -0400
+Branch: REL9_4_STABLE [4d3d9719d] 2015-05-11 12:25:44 -0400
+Branch: REL9_3_STABLE [7d09fdf82] 2015-05-11 12:25:45 -0400
+Branch: REL9_2_STABLE [46f9acd3e] 2015-05-11 12:25:28 -0400
+Branch: REL9_1_STABLE [dd75518d5] 2015-05-11 12:25:28 -0400
+Branch: REL9_0_STABLE [b93c8eaf8] 2015-05-11 12:25:28 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix incorrect checking of deferred exclusion constraints after a HOT
+ update (Tom Lane)
+ </para>
+
+ <para>
+ If a new row that potentially violates a deferred exclusion constraint
+ is HOT-updated (that is, no indexed columns change and the row can be
+ stored back onto the same table page) later in the same transaction,
+ the exclusion constraint would be reported as violated when the check
+ finally occurred, even if the row(s) the new row originally conflicted
+ with had been deleted.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [a4820434c] 2015-05-03 11:30:24 -0400
+Branch: REL9_4_STABLE [79edb2981] 2015-05-03 11:30:24 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix behavior when changing foreign key constraint deferrability status
+ with <literal>ALTER TABLE ... ALTER CONSTRAINT</> (Tom Lane)
+ </para>
+
+ <para>
+ Operations later in the same session or concurrent sessions might not
+ honor the status change promptly.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [b514a7460] 2015-02-28 12:43:04 -0500
+Branch: REL9_4_STABLE [fdacbf9e8] 2015-02-28 12:43:04 -0500
+Branch: REL9_3_STABLE [1b558782b] 2015-02-28 12:43:04 -0500
+Branch: REL9_2_STABLE [6f419958a] 2015-02-28 12:43:04 -0500
+-->
+
+ <listitem>
+ <para>
+ Fix planning of star-schema-style queries (Tom Lane)
+ </para>
+
+ <para>
+ Sometimes, efficient scanning of a large table requires that index
+ parameters be provided from more than one other table (commonly,
+ dimension tables whose keys are needed to index a large fact table).
+ The planner should be able to find such plans, but an overly
+ restrictive search heuristic prevented it.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [3cf868601] 2015-04-25 16:44:27 -0400
+Branch: REL9_4_STABLE [5f3d1909c] 2015-04-25 16:44:27 -0400
+Branch: REL9_3_STABLE [3e47d0b2a] 2015-04-25 16:44:27 -0400
+Branch: REL9_2_STABLE [950f80dd5] 2015-04-25 16:44:27 -0400
+Branch: REL9_1_STABLE [2e38198f6] 2015-04-25 16:44:27 -0400
+Branch: REL9_0_STABLE [985da346e] 2015-04-25 16:44:27 -0400
+-->
+
+ <listitem>
+ <para>
+ Prevent improper reordering of antijoins (NOT EXISTS joins) versus
+ other outer joins (Tom Lane)
+ </para>
+
+ <para>
+ This oversight in the planner has been observed to cause <quote>could
+ not find RelOptInfo for given relids</> errors, but it seems possible
+ that sometimes an incorrect query plan might get past that consistency
+ check and result in silently-wrong query output.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [ca6805338] 2015-04-04 19:55:15 -0400
+Branch: REL9_4_STABLE [1d71d36ff] 2015-04-04 19:55:15 -0400
+Branch: REL9_3_STABLE [e105df208] 2015-04-04 19:55:15 -0400
+Branch: REL9_2_STABLE [b7d493bf7] 2015-04-04 19:55:15 -0400
+Branch: REL9_1_STABLE [3b5d67102] 2015-04-04 19:55:15 -0400
+Branch: REL9_0_STABLE [da8819194] 2015-04-04 19:55:15 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix incorrect matching of subexpressions in outer-join plan nodes
+ (Tom Lane)
+ </para>
+
+ <para>
+ Previously, if textually identical non-strict subexpressions were used
+ both above and below an outer join, the planner might try to re-use
+ the value computed below the join, which would be incorrect because the
+ executor would force the value to NULL in case of an unmatched outer row.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [1a179f36f] 2015-02-10 20:37:19 -0500
+Branch: REL9_4_STABLE [433c79d2c] 2015-02-10 20:37:22 -0500
+Branch: REL9_3_STABLE [672abc402] 2015-02-10 20:37:24 -0500
+Branch: REL9_2_STABLE [0d083103f] 2015-02-10 20:37:26 -0500
+Branch: REL9_1_STABLE [52579d507] 2015-02-10 20:37:29 -0500
+Branch: REL9_0_STABLE [72bbca27e] 2015-02-10 20:37:31 -0500
+-->
+
+ <listitem>
+ <para>
+ Fix GEQO planner to cope with failure of its join order heuristic
+ (Tom Lane)
+ </para>
+
+ <para>
+ This oversight has been seen to lead to <quote>failed to join all
+ relations together</> errors in queries involving <literal>LATERAL</>,
+ and that might happen in other cases as well.
+ </para>
+ </listitem>
+
+<!--
+Author: Stephen Frost <[email protected]>
+Branch: master [6f9bd50ea] 2015-02-25 21:36:29 -0500
+Branch: REL9_4_STABLE [f16270ade] 2015-02-25 21:36:40 -0500
+-->
+
+ <listitem>
+ <para>
+ Ensure that row locking occurs properly when the target of
+ an <command>UPDATE</> or <command>DELETE</> is a security-barrier view
+ (Stephen Frost)
+ </para>
+ </listitem>
+
+<!--
+Author: Andres Freund <[email protected]>
+Branch: master [dfbaed459] 2015-04-28 00:17:43 +0200
+Branch: REL9_4_STABLE [fd3dfc236] 2015-04-28 00:18:04 +0200
+-->
+
+ <listitem>
+ <para>
+ Use a file opened for read/write when syncing replication slot data
+ during database startup (Andres Freund)
+ </para>
+
+ <para>
+ On some platforms, the previous coding could result in errors like
+ <quote>could not fsync file "pg_replslot/...": Bad file descriptor</>.
+ </para>
+ </listitem>
+
+<!--
+Author: Heikki Linnakangas <[email protected]>
+Branch: master [2c47fe16a] 2015-04-23 21:39:35 +0300
+Branch: REL9_4_STABLE [438a062d5] 2015-04-23 21:35:10 +0300
+Branch: REL9_3_STABLE [f73ebd766] 2015-04-23 21:36:24 +0300
+Branch: REL9_2_STABLE [d3f5d2892] 2015-04-23 21:36:50 +0300
+Branch: REL9_1_STABLE [e8528a8f5] 2015-04-23 21:36:59 +0300
+Branch: REL9_0_STABLE [223a94680] 2015-04-23 21:37:09 +0300
+-->
+
+ <listitem>
+ <para>
+ Fix possible deadlock at startup
+ when <literal>max_prepared_transactions</> is too small
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+<!--
+Author: Heikki Linnakangas <[email protected]>
+Branch: master [b2a5545bd] 2015-04-13 16:53:49 +0300
+Branch: REL9_4_STABLE [d72792d02] 2015-04-13 17:22:21 +0300
+Branch: REL9_3_STABLE [a800267e4] 2015-04-13 17:22:35 +0300
+Branch: REL9_2_STABLE [cc2939f44] 2015-04-13 17:26:59 +0300
+Branch: REL9_1_STABLE [ad2925e20] 2015-04-13 17:26:49 +0300
+Branch: REL9_0_STABLE [5b6938186] 2015-04-13 17:26:35 +0300
+-->
+
+ <listitem>
+ <para>
+ Don't archive useless preallocated WAL files after a timeline switch
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+<!--
+Author: Robert Haas <[email protected]>
+Branch: master [2ce439f33] 2015-05-04 14:13:53 -0400
+Branch: REL9_4_STABLE [d8ac77ab1] 2015-05-04 14:19:32 -0400
+Branch: REL9_3_STABLE [14de825de] 2015-05-04 12:27:55 -0400
+Branch: REL9_2_STABLE [2bc339716] 2015-05-04 12:41:53 -0400
+Branch: REL9_1_STABLE [4b71d28d5] 2015-05-04 12:47:11 -0400
+Branch: REL9_0_STABLE [30ba0d0c2] 2015-05-04 14:04:53 -0400
+Author: Robert Haas <[email protected]>
+Branch: master [456ff0863] 2015-05-05 09:29:49 -0400
+Branch: REL9_4_STABLE [603fe0181] 2015-05-05 09:16:39 -0400
+Branch: REL9_3_STABLE [6fd666954] 2015-05-05 09:19:39 -0400
+Branch: REL9_2_STABLE [53e1498c6] 2015-05-05 09:22:51 -0400
+Branch: REL9_1_STABLE [6ee1a7738] 2015-05-05 09:25:51 -0400
+Branch: REL9_0_STABLE [262fbcb9d] 2015-05-05 09:30:07 -0400
+-->
+
+ <listitem>
+ <para>
+ Recursively <function>fsync()</> the data directory after a crash
+ (Abhijit Menon-Sen, Robert Haas)
+ </para>
+
+ <para>
+ This ensures consistency if another crash occurs shortly later. (The
+ second crash would have to be a system-level crash, not just a database
+ crash, for there to be a problem.)
+ </para>
+ </listitem>
+
+<!--
+Author: Alvaro Herrera <[email protected]>
+Branch: master [5df64f298] 2015-04-08 13:19:49 -0300
+Branch: REL9_4_STABLE [ec01c1c0a] 2015-04-08 13:19:49 -0300
+Branch: REL9_3_STABLE [0d6c9e061] 2015-04-08 13:19:49 -0300
+Branch: REL9_2_STABLE [37dc228e8] 2015-04-08 13:19:49 -0300
+Branch: REL9_1_STABLE [cf5d3f274] 2015-04-08 13:19:49 -0300
+Branch: REL9_0_STABLE [595bc97b5] 2015-04-08 13:19:49 -0300
+-->
+
+ <listitem>
+ <para>
+ Fix autovacuum launcher's possible failure to shut down, if an error
+ occurs after it receives SIGTERM (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [bc49d9324] 2015-04-03 00:07:29 -0400
+Branch: REL9_4_STABLE [ee0d06c0b] 2015-04-03 00:07:29 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix failure to handle invalidation messages for system catalogs
+ early in session startup (Tom Lane)
+ </para>
+
+ <para>
+ This oversight could result in failures in sessions that start
+ concurrently with a <command>VACUUM FULL</> on a system catalog.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [701dcc983] 2015-03-30 13:05:27 -0400
+Branch: REL9_4_STABLE [2897e069c] 2015-03-30 13:05:35 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix crash in <function>BackendIdGetTransactionIds()</> when trying
+ to get status for a backend process that just exited (Tom Lane)
+ </para>
+ </listitem>
+
+<!--
+Author: Andres Freund <[email protected]>
+Branch: master [bc208a5a2] 2015-02-23 16:14:14 +0100
+Branch: REL9_4_STABLE [89629f289] 2015-02-23 16:14:14 +0100
+Branch: REL9_3_STABLE [a6ddff812] 2015-02-23 16:14:15 +0100
+Branch: REL9_2_STABLE [c76e6dd7a] 2015-02-23 16:14:15 +0100
+Branch: REL9_1_STABLE [25576bee2] 2015-02-23 16:14:15 +0100
+Branch: REL9_0_STABLE [87b7fcc87] 2015-02-23 16:14:16 +0100
+-->
+
+ <listitem>
+ <para>
+ Cope with unexpected signals in <function>LockBufferForCleanup()</>
+ (Andres Freund)
+ </para>
+
+ <para>
+ This oversight could result in spurious errors about <quote>multiple
+ backends attempting to wait for pincount 1</>.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [08361cea2] 2015-02-15 23:26:45 -0500
+Branch: REL9_4_STABLE [1bf32972e] 2015-02-15 23:26:45 -0500
+Branch: REL9_3_STABLE [4662ba5a2] 2015-02-15 23:26:46 -0500
+Branch: REL9_2_STABLE [effcaa4c2] 2015-02-15 23:26:46 -0500
+-->
+
+ <listitem>
+ <para>
+ Fix crash when doing <literal>COPY IN</> to a table with check
+ constraints that contain whole-row references (Tom Lane)
+ </para>
+
+ <para>
+ The known failure case only crashes in 9.4 and up, but there is very
+ similar code in 9.3 and 9.2, so back-patch those branches as well.
+ </para>
+ </listitem>
+
+<!--
+Author: Andres Freund <[email protected]>
+Branch: master [fd6a3f3ad] 2015-02-26 12:50:07 +0100
+Branch: REL9_4_STABLE [d72115112] 2015-02-26 12:50:07 +0100
+Branch: REL9_3_STABLE [abce8dc7d] 2015-02-26 12:50:07 +0100
+Branch: REL9_2_STABLE [d67076529] 2015-02-26 12:50:07 +0100
+Branch: REL9_1_STABLE [5c8dabecd] 2015-02-26 12:50:08 +0100
+Branch: REL9_0_STABLE [82e0d6eb5] 2015-02-26 12:50:08 +0100
+-->
+
+ <listitem>
+ <para>
+ Avoid waiting for WAL flush or synchronous replication during commit of
+ a transaction that was read-only so far as the user is concerned
+ (Andres Freund)
+ </para>
+
+ <para>
+ Previously, a delay could occur at commit in transactions that had
+ written WAL due to HOT page pruning, leading to undesirable effects
+ such as sessions getting stuck at startup if all synchronous replicas
+ are down. Sessions have also been observed to get stuck in catchup
+ interrupt processing when using synchronous replication; this will fix
+ that problem as well.
+ </para>
+ </listitem>
+
+<!--
+Author: Andres Freund <[email protected]>
+Branch: master [87cec51d3] 2015-03-23 16:51:11 +0100
+Branch: REL9_4_STABLE [16be9737c] 2015-03-23 16:52:17 +0100
+-->
+
+ <listitem>
+ <para>
+ Avoid busy-waiting with short <literal>recovery_min_apply_delay</>
+ values (Andres Freund)
+ </para>
+ </listitem>
+
+<!--
+Author: Heikki Linnakangas <[email protected]>
+Branch: REL9_4_STABLE [462a2f1f0] 2015-05-13 09:53:50 +0300
+Branch: REL9_3_STABLE [96b676cc6] 2015-05-13 09:54:06 +0300
+Branch: REL9_2_STABLE [1a99d392c] 2015-05-13 10:06:52 +0300
+Branch: REL9_1_STABLE [f6c4a8690] 2015-05-13 10:06:47 +0300
+Branch: REL9_0_STABLE [bd1cfde70] 2015-05-13 10:06:38 +0300
+-->
+
+ <listitem>
+ <para>
+ Fix crash when manipulating hash indexes on temporary tables
+ (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [ed9cc2b5d] 2015-03-30 16:40:05 -0400
+Branch: REL9_4_STABLE [a6a8bf5cd] 2015-03-30 16:40:05 -0400
+Branch: REL9_3_STABLE [246bbf65c] 2015-03-30 16:40:05 -0400
+Branch: REL9_2_STABLE [f155466fe] 2015-03-30 16:40:05 -0400
+Branch: REL9_1_STABLE [46bfe44e8] 2015-03-30 16:40:05 -0400
+Branch: REL9_0_STABLE [8f3c57721] 2015-03-30 16:40:05 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix possible failure during hash index bucket split, if other processes
+ are modifying the index concurrently (Tom Lane)
+ </para>
+ </listitem>
+
+<!--
+Author: Heikki Linnakangas <[email protected]>
+Branch: master [26d2c5dc8] 2015-03-12 15:34:32 +0100
+Branch: REL9_4_STABLE [d81072026] 2015-03-12 15:40:07 +0100
+-->
+
+ <listitem>
+ <para>
+ Fix memory leaks in GIN index vacuum (Heikki Linnakangas)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [e4cbfd673] 2015-03-29 15:04:09 -0400
+Branch: REL9_4_STABLE [f444de5e3] 2015-03-29 15:04:18 -0400
+Branch: REL9_3_STABLE [995a664c8] 2015-03-29 15:04:24 -0400
+Branch: REL9_2_STABLE [d12afe114] 2015-03-29 15:04:28 -0400
+Branch: REL9_1_STABLE [ab02d35e0] 2015-03-29 15:04:33 -0400
+Branch: REL9_0_STABLE [152c94632] 2015-03-29 15:04:38 -0400
+-->
+
+ <listitem>
+ <para>
+ Check for interrupts while analyzing index expressions (Jeff Janes)
+ </para>
+
+ <para>
+ <command>ANALYZE</> executes index expressions many times; if there are
+ slow functions in such an expression, it's desirable to be able to
+ cancel the <command>ANALYZE</> before that loop finishes.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [443fd0540] 2015-03-12 13:39:09 -0400
+Branch: REL9_4_STABLE [32269be59] 2015-03-12 13:39:10 -0400
+Branch: REL9_3_STABLE [5bdf3cf5a] 2015-03-12 13:38:49 -0400
+Branch: REL9_2_STABLE [590fc5d96] 2015-03-12 13:38:49 -0400
+Branch: REL9_1_STABLE [4a4fd2b0c] 2015-03-12 13:38:49 -0400
+-->
+
+ <listitem>
+ <para>
+ Ensure <structfield>tableoid</> of a foreign table is reported
+ correctly when a <literal>READ COMMITTED</> recheck occurs after
+ locking rows in <command>SELECT FOR UPDATE</>, <command>UPDATE</>,
+ or <command>DELETE</> (Etsuro Fujita)
+ </para>
+ </listitem>
+
+<!--
+Author: Alvaro Herrera <[email protected]>
+Branch: master [cf34e373f] 2015-03-05 18:03:16 -0300
+Branch: REL9_4_STABLE [749977634] 2015-03-05 18:03:16 -0300
+Branch: REL9_3_STABLE [5cf400003] 2015-03-05 18:03:16 -0300
+Branch: REL9_2_STABLE [e166e6441] 2015-03-05 18:03:16 -0300
+Branch: REL9_1_STABLE [8167ef8e2] 2015-03-05 18:03:16 -0300
+Branch: REL9_0_STABLE [71b8e8e6c] 2015-03-05 18:03:16 -0300
+-->
+
+ <listitem>
+ <para>
+ Add the name of the target server to object description strings for
+ foreign-server user mappings (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+<!--
+Author: Alvaro Herrera <[email protected]>
+Branch: master [0d906798f] 2015-02-18 14:28:11 -0300
+Branch: REL9_4_STABLE [66463a3cf] 2015-02-18 14:28:12 -0300
+Branch: REL9_3_STABLE [a196e67f9] 2015-02-18 14:28:12 -0300
+-->
+
+ <listitem>
+ <para>
+ Include the schema name in object identity strings for conversions
+ (&Aacute;lvaro Herrera)
+ </para>
+ </listitem>
+
+<!--
+Author: Stephen Frost <[email protected]>
+Branch: REL9_4_STABLE [c106f397d] 2015-05-08 19:39:52 -0400
+Branch: REL9_3_STABLE [3de791ee7] 2015-05-08 19:40:06 -0400
+Branch: REL9_2_STABLE [21cb21de2] 2015-05-08 19:40:09 -0400
+Branch: REL9_1_STABLE [edfef090a] 2015-05-08 19:40:11 -0400
+Branch: REL9_0_STABLE [c981e5999] 2015-05-08 19:40:15 -0400
+-->
+
+ <listitem>
+ <para>
+ Recommend setting <literal>include_realm</> to 1 when using
+ Kerberos/GSSAPI/SSPI authentication (Stephen Frost)
+ </para>
+
+ <para>
+ Without this, identically-named users from different realms cannot be
+ distinguished. For the moment this is only a documentation change, but
+ it will become the default setting in <productname>PostgreSQL</> 9.5.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [cb66f495f] 2015-02-16 16:18:31 -0500
+Branch: REL9_4_STABLE [23291a796] 2015-02-16 16:17:59 -0500
+Branch: REL9_3_STABLE [7bc6e5954] 2015-02-16 16:18:04 -0500
+Branch: REL9_2_STABLE [3913b897d] 2015-02-16 16:18:08 -0500
+Branch: REL9_1_STABLE [2df854f84] 2015-02-16 16:18:12 -0500
+Branch: REL9_0_STABLE [c99ef9aff] 2015-02-16 16:18:17 -0500
+Author: Tom Lane <[email protected]>
+Branch: master [2e105def0] 2015-02-17 12:49:18 -0500
+Branch: REL9_4_STABLE [a271c9260] 2015-02-17 12:49:18 -0500
+Branch: REL9_3_STABLE [4ea2d2ddb] 2015-02-17 12:49:18 -0500
+Branch: REL9_2_STABLE [d068609b9] 2015-02-17 12:49:44 -0500
+Branch: REL9_1_STABLE [64e045838] 2015-02-17 12:49:46 -0500
+Branch: REL9_0_STABLE [e48ce4f33] 2015-02-17 12:49:18 -0500
+-->
+
+ <listitem>
+ <para>
+ Remove code for matching IPv4 <filename>pg_hba.conf</> entries to
+ IPv4-in-IPv6 addresses (Tom Lane)
+ </para>
+
+ <para>
+ This hack was added in 2003 in response to a report that some Linux
+ kernels of the time would report IPv4 connections as having
+ IPv4-in-IPv6 addresses. However, the logic was accidentally broken in
+ 9.0. The lack of any field complaints since then shows that it's not
+ needed anymore. Now we have reports that the broken code causes
+ crashes on some systems, so let's just remove it rather than fix it.
+ (Had we chosen to fix it, that would make for a subtle and potentially
+ security-sensitive change in the effective meaning of
+ IPv4 <filename>pg_hba.conf</> entries, which does not seem like a good
+ thing to do in minor releases.)
+ </para>
+ </listitem>
+
+<!--
+Author: Robert Haas <[email protected]>
+Branch: master [bf740ce9e] 2015-03-19 11:04:09 -0400
+Branch: REL9_4_STABLE [76d07a2a0] 2015-03-19 11:08:54 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix status reporting for terminated background workers that were never
+ actually started (Robert Haas)
+ </para>
+ </listitem>
+
+<!--
+Author: Robert Haas <[email protected]>
+Branch: master [b3a5e76e1] 2015-04-02 14:38:06 -0400
+Branch: REL9_4_STABLE [a1f4ade01] 2015-04-02 14:39:18 -0400
+-->
+
+ <listitem>
+ <para>
+ After a database crash, don't restart background workers that are
+ marked <literal>BGW_NEVER_RESTART</> (Amit Khandekar)
+ </para>
+ </listitem>
+
+<!--
+Author: Heikki Linnakangas <[email protected]>
+Branch: master [ff16b40f8] 2015-02-06 11:26:50 +0200
+Branch: REL9_4_STABLE [3bc4c6942] 2015-02-06 11:27:12 +0200
+Branch: REL9_3_STABLE [5f0ba4abb] 2015-02-06 11:32:16 +0200
+Branch: REL9_2_STABLE [2af568c6b] 2015-02-06 11:32:37 +0200
+Branch: REL9_1_STABLE [0d36d9f2b] 2015-02-06 11:32:42 +0200
+-->
+
+ <listitem>
+ <para>
+ Report WAL flush, not insert, position in <literal>IDENTIFY_SYSTEM</>
+ replication command (Heikki Linnakangas)
+ </para>
+
+ <para>
+ This avoids a possible startup failure
+ in <application>pg_receivexlog</>.
+ </para>
+ </listitem>
+
+<!--
+Author: Magnus Hagander <[email protected]>
+Branch: master [1a241d22a] 2015-05-07 15:04:13 +0200
+Branch: REL9_4_STABLE [43ed06816] 2015-05-07 15:09:21 +0200
+Branch: REL9_3_STABLE [ba3caee84] 2015-05-07 15:09:32 +0200
+Branch: REL9_2_STABLE [447e16581] 2015-05-07 15:09:42 +0200
+Branch: REL9_1_STABLE [b9ded1529] 2015-05-07 15:09:53 +0200
+Branch: REL9_0_STABLE [78ce2dc8e] 2015-05-07 15:10:01 +0200
+-->
+
+ <listitem>
+ <para>
+ While shutting down service on Windows, periodically send status
+ updates to the Service Control Manager to prevent it from killing the
+ service too soon; and ensure that <application>pg_ctl</> will wait for
+ shutdown (Krystian Bigaj)
+ </para>
+ </listitem>
+
+<!--
+Author: Heikki Linnakangas <[email protected]>
+Branch: master [2a3f6e368] 2015-02-23 13:34:21 +0200
+Branch: REL9_4_STABLE [0214a61e0] 2015-02-23 13:32:39 +0200
+Branch: REL9_3_STABLE [cdf813c59] 2015-02-23 13:32:42 +0200
+Branch: REL9_2_STABLE [22c9c8a7e] 2015-02-23 13:32:46 +0200
+Branch: REL9_1_STABLE [7052abbb6] 2015-02-23 13:32:50 +0200
+Branch: REL9_0_STABLE [8878eaaa8] 2015-02-23 13:32:53 +0200
+-->
+
+ <listitem>
+ <para>
+ Reduce risk of network deadlock when using <application>libpq</>'s
+ non-blocking mode (Heikki Linnakangas)
+ </para>
+
+ <para>
+ When sending large volumes of data, it's important to drain the input
+ buffer every so often, in case the server has sent enough response data
+ to cause it to block on output. (A typical scenario is that the server
+ is sending a stream of NOTICE messages during <literal>COPY FROM
+ STDIN</>.) This worked properly in the normal blocking mode, but not
+ so much in non-blocking mode. We've modified <application>libpq</>
+ to opportunistically drain input when it can, but a full defense
+ against this problem requires application cooperation: the application
+ should watch for socket read-ready as well as write-ready conditions,
+ and be sure to call <function>PQconsumeInput()</> upon read-ready.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [b26e20814] 2015-02-21 12:59:54 -0500
+Branch: REL9_4_STABLE [9c15a778a] 2015-02-21 12:59:35 -0500
+Branch: REL9_3_STABLE [f389b6e0a] 2015-02-21 12:59:39 -0500
+Branch: REL9_2_STABLE [83c3115dd] 2015-02-21 12:59:43 -0500
+-->
+
+ <listitem>
+ <para>
+ In <application>libpq</>, fix misparsing of empty values in URI
+ connection strings (Thomas Fanghaenel)
+ </para>
+ </listitem>
+
+<!--
+Author: Michael Meskes <[email protected]>
+Branch: master [1f393fc92] 2015-02-10 12:04:10 +0100
+Branch: REL9_4_STABLE [66c4ea8cb] 2015-02-11 10:57:02 +0100
+Branch: REL9_3_STABLE [1a321fea7] 2015-02-11 11:13:11 +0100
+Branch: REL9_2_STABLE [9be9ac425] 2015-02-11 11:14:14 +0100
+Branch: REL9_1_STABLE [32e633195] 2015-02-11 11:27:21 +0100
+Branch: REL9_0_STABLE [ce2fcc58e] 2015-02-11 11:30:11 +0100
+-->
+
+ <listitem>
+ <para>
+ Fix array handling in <application>ecpg</> (Michael Meskes)
+ </para>
+ </listitem>
+
+<!--
+Author: Alvaro Herrera <[email protected]>
+Branch: master [fcef16172] 2015-04-01 20:00:07 -0300
+Branch: REL9_4_STABLE [a44e54cf4] 2015-04-01 20:00:07 -0300
+Branch: REL9_3_STABLE [f4540cae1] 2015-04-01 20:00:07 -0300
+Branch: REL9_2_STABLE [d4bacdcb9] 2015-04-01 20:00:07 -0300
+Branch: REL9_1_STABLE [276591bc4] 2015-04-01 20:00:07 -0300
+Branch: REL9_0_STABLE [557fcfae3] 2015-04-01 20:00:07 -0300
+-->
+
+ <listitem>
+ <para>
+ Fix <application>psql</> to sanely handle URIs and conninfo strings as
+ the first parameter to <command>\connect</>
+ (David Fetter, Andrew Dunstan, &Aacute;lvaro Herrera)
+ </para>
+
+ <para>
+ This syntax has been accepted (but undocumented) for a long time, but
+ previously some parameters might be taken from the old connection
+ instead of the given string, which was agreed to be undesirable.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [df9ebf1ee] 2015-03-14 13:43:00 -0400
+Branch: REL9_4_STABLE [f50b5c7d0] 2015-03-14 13:43:08 -0400
+Branch: REL9_3_STABLE [2cb76fa6f] 2015-03-14 13:43:13 -0400
+Branch: REL9_2_STABLE [309ff2ad0] 2015-03-14 13:43:17 -0400
+Branch: REL9_1_STABLE [043fe5c5a] 2015-03-14 13:43:21 -0400
+Branch: REL9_0_STABLE [396ef6fd8] 2015-03-14 13:43:26 -0400
+-->
+
+ <listitem>
+ <para>
+ Suppress incorrect complaints from <application>psql</> on some
+ platforms that it failed to write <filename>~/.psql_history</> at exit
+ (Tom Lane)
+ </para>
+
+ <para>
+ This misbehavior was caused by a workaround for a bug in very old
+ (pre-2006) versions of <application>libedit</>. We fixed it by
+ removing the workaround, which will cause a similar failure to appear
+ for anyone still using such versions of <application>libedit</>.
+ Recommendation: upgrade that library, or use <application>libreadline</>.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [9feefedf9] 2015-02-10 22:38:15 -0500
+Branch: REL9_4_STABLE [a592e5883] 2015-02-10 22:38:17 -0500
+Branch: REL9_3_STABLE [a4e871caa] 2015-02-10 22:38:20 -0500
+Branch: REL9_2_STABLE [2593c7039] 2015-02-10 22:38:22 -0500
+Branch: REL9_1_STABLE [14794f9b8] 2015-02-10 22:38:26 -0500
+Branch: REL9_0_STABLE [8e70f3c40] 2015-02-10 22:38:29 -0500
+-->
+
+ <listitem>
+ <para>
+ Fix <application>pg_dump</>'s rule for deciding which casts are
+ system-provided casts that should not be dumped (Tom Lane)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [0e7e355f2] 2015-02-18 11:43:00 -0500
+Branch: REL9_4_STABLE [a75dfb73e] 2015-02-18 11:43:00 -0500
+Branch: REL9_3_STABLE [a7ad5cf0c] 2015-02-18 11:43:00 -0500
+Branch: REL9_2_STABLE [c86f8f361] 2015-02-18 11:43:00 -0500
+Branch: REL9_1_STABLE [b0d53b2e3] 2015-02-18 11:43:00 -0500
+-->
+
+ <listitem>
+ <para>
+ In <application>pg_dump</>, fix failure to honor <literal>-Z</>
+ compression level option together with <literal>-Fd</>
+ (Michael Paquier)
+ </para>
+ </listitem>
+
+<!--
+Author: Stephen Frost <[email protected]>
+Branch: master [ebd092bc2] 2015-03-02 14:12:21 -0500
+Branch: REL9_4_STABLE [c05fa3433] 2015-03-02 14:12:28 -0500
+Branch: REL9_3_STABLE [43d81f16a] 2015-03-02 14:12:33 -0500
+Branch: REL9_2_STABLE [d13bbfabb] 2015-03-02 14:12:38 -0500
+Branch: REL9_1_STABLE [dcb467b8e] 2015-03-02 14:12:43 -0500
+-->
+
+ <listitem>
+ <para>
+ Make <application>pg_dump</> consider foreign key relationships
+ between extension configuration tables while choosing dump order
+ (Gilles Darold, Michael Paquier, Stephen Frost)
+ </para>
+
+ <para>
+ This oversight could result in producing dumps that fail to reload
+ because foreign key constraints are transiently violated.
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [e3bfe6d84] 2015-03-06 13:27:46 -0500
+Branch: REL9_4_STABLE [629f8613f] 2015-03-06 13:27:46 -0500
+Branch: REL9_3_STABLE [d645273cf] 2015-03-06 13:27:46 -0500
+-->
+
+ <listitem>
+ <para>
+ Avoid possible <application>pg_dump</> failure when concurrent sessions
+ are creating and dropping temporary functions (Tom Lane)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [e9f1c01b7] 2015-02-25 12:01:12 -0500
+Branch: REL9_4_STABLE [2164a0de2] 2015-02-25 12:01:12 -0500
+Branch: REL9_3_STABLE [f864fe074] 2015-02-25 12:01:12 -0500
+Branch: REL9_2_STABLE [be8801e9c] 2015-02-25 12:01:12 -0500
+Branch: REL9_1_STABLE [f7b41902a] 2015-02-25 12:01:12 -0500
+Branch: REL9_0_STABLE [7a501bcbf] 2015-02-25 12:01:12 -0500
+-->
+
+ <listitem>
+ <para>
+ Fix dumping of views that are just <literal>VALUES(...)</> but have
+ column aliases (Tom Lane)
+ </para>
+ </listitem>
+
+<!--
+Author: Bruce Momjian <[email protected]>
+Branch: master [b2f95c34f] 2015-05-01 13:03:23 -0400
+Branch: REL9_4_STABLE [70fac4844] 2015-05-01 13:03:23 -0400
+-->
+
+ <listitem>
+ <para>
+ Ensure that a view's replication identity is correctly set
+ to <literal>nothing</> during dump/restore (Marko Tiikkaja)
+ </para>
+
+ <para>
+ Previously, if the view was involved in a circular dependency,
+ it might wind up with an incorrect replication identity property.
+ </para>
+ </listitem>
+
+<!--
+Author: Bruce Momjian <[email protected]>
+Branch: master [4c5e06004] 2015-05-16 00:40:18 -0400
+Branch: REL9_4_STABLE [387a3e46c] 2015-05-16 00:40:18 -0400
+Branch: REL9_3_STABLE [bffbeec0c] 2015-05-16 00:40:18 -0400
+Branch: REL9_2_STABLE [affc04d16] 2015-05-16 00:40:18 -0400
+Branch: REL9_1_STABLE [acd75b264] 2015-05-16 00:40:18 -0400
+Branch: REL9_0_STABLE [df161c94e] 2015-05-16 00:40:18 -0400
+Author: Bruce Momjian <[email protected]>
+Branch: REL9_4_STABLE [5f6539635] 2015-05-16 15:16:28 -0400
+Branch: REL9_3_STABLE [4e9935979] 2015-05-16 15:16:28 -0400
+-->
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, force timeline 1 in the new cluster
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This change prevents upgrade failures caused by bogus complaints about
+ missing WAL history files.
+ </para>
+ </listitem>
+
+<!--
+Author: Bruce Momjian <[email protected]>
+Branch: master [fb694d959] 2015-05-16 00:10:03 -0400
+Branch: REL9_4_STABLE [31f5d3f35] 2015-05-16 00:10:03 -0400
+Branch: REL9_3_STABLE [4cfba5369] 2015-05-16 00:10:03 -0400
+Branch: REL9_2_STABLE [2a55e7134] 2015-05-16 00:10:03 -0400
+Branch: REL9_1_STABLE [321db7123] 2015-05-16 00:10:03 -0400
+Branch: REL9_0_STABLE [2194aa92b] 2015-05-16 00:10:03 -0400
+-->
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, check for improperly non-connectable
+ databases before proceeding
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+<!--
+Author: Bruce Momjian <[email protected]>
+Branch: master [056764b10] 2015-02-11 22:06:04 -0500
+Branch: REL9_4_STABLE [5eef3c61e] 2015-02-11 22:06:04 -0500
+Branch: REL9_3_STABLE [9ecd51da7] 2015-02-11 22:06:04 -0500
+Branch: REL9_2_STABLE [66f5217f5] 2015-02-11 22:06:04 -0500
+Branch: REL9_1_STABLE [08aaae40e] 2015-02-11 22:06:04 -0500
+Branch: REL9_0_STABLE [4ae178f60] 2015-02-11 22:06:04 -0500
+-->
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, quote directory paths
+ properly in the generated <literal>delete_old_cluster</> script
+ (Bruce Momjian)
+ </para>
+ </listitem>
+
+<!--
+Author: Bruce Momjian <[email protected]>
+Branch: master [866f3017a] 2015-02-11 21:02:44 -0500
+Branch: REL9_4_STABLE [c7bc5be11] 2015-02-11 21:02:36 -0500
+Branch: REL9_3_STABLE [e20523f8f] 2015-02-11 21:02:28 -0500
+Branch: REL9_2_STABLE [d99cf27b7] 2015-02-11 21:02:12 -0500
+Branch: REL9_1_STABLE [55179b03e] 2015-02-11 21:02:07 -0500
+Branch: REL9_0_STABLE [85dac37ee] 2015-02-11 21:02:06 -0500
+-->
+
+ <listitem>
+ <para>
+ In <application>pg_upgrade</>, preserve database-level freezing info
+ properly
+ (Bruce Momjian)
+ </para>
+
+ <para>
+ This oversight could cause missing-clog-file errors for tables within
+ the <literal>postgres</> and <literal>template1</> databases.
+ </para>
+ </listitem>
+
+<!--
+Author: Andrew Dunstan <[email protected]>
+Branch: master [fa1e5afa8] 2015-03-30 17:07:52 -0400
+Branch: REL9_4_STABLE [2366761bf] 2015-03-30 17:16:57 -0400
+Branch: REL9_3_STABLE [0904eb3e1] 2015-03-30 17:17:17 -0400
+Branch: REL9_2_STABLE [948566313] 2015-03-30 17:17:39 -0400
+Branch: REL9_1_STABLE [22b3f5b26] 2015-03-30 17:17:54 -0400
+Branch: REL9_0_STABLE [bf22a8e58] 2015-03-30 17:18:10 -0400
+-->
+
+ <listitem>
+ <para>
+ Run <application>pg_upgrade</> and <application>pg_resetxlog</> with
+ restricted privileges on Windows, so that they don't fail when run by
+ an administrator (Muhammad Asif Naeem)
+ </para>
+ </listitem>
+
+<!--
+Author: Robert Haas <[email protected]>
+Branch: master [5d6c2405f] 2015-02-17 10:19:30 -0500
+Branch: REL9_4_STABLE [5e49c98e0] 2015-02-17 10:50:49 -0500
+Branch: REL9_3_STABLE [9a90ec9cf] 2015-02-17 10:54:29 -0500
+Author: Robert Haas <[email protected]>
+Branch: REL9_2_STABLE [319406c2a] 2015-02-17 11:02:46 -0500
+Branch: REL9_1_STABLE [d7d294f59] 2015-02-17 11:08:40 -0500
+-->
+
+ <listitem>
+ <para>
+ Improve handling of <function>readdir()</> failures when scanning
+ directories in <application>initdb</> and <application>pg_basebackup</>
+ (Marco Nenciarini)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [8d1f23900] 2015-03-15 23:22:03 -0400
+Branch: REL9_4_STABLE [904e8b627] 2015-03-15 23:22:03 -0400
+Branch: REL9_3_STABLE [83587a075] 2015-03-15 23:22:03 -0400
+Branch: REL9_2_STABLE [8582ae7aa] 2015-03-15 23:22:03 -0400
+Branch: REL9_1_STABLE [9288645b5] 2015-03-15 23:22:03 -0400
+Branch: REL9_0_STABLE [40b0c10b7] 2015-03-15 23:22:03 -0400
+-->
+
+ <listitem>
+ <para>
+ Fix slow sorting algorithm in <filename>contrib/intarray</> (Tom Lane)
+ </para>
+ </listitem>
+
+<!--
+Author: Heikki Linnakangas <[email protected]>
+Branch: master [33e879c4e] 2015-02-13 23:56:25 +0200
+Branch: REL9_4_STABLE [56a23a83f] 2015-02-13 23:56:57 +0200
+Branch: REL9_3_STABLE [6ef5d894a] 2015-02-13 23:57:05 +0200
+Branch: REL9_2_STABLE [a0d84da1d] 2015-02-13 23:57:25 +0200
+Branch: REL9_1_STABLE [ebdc2e1e2] 2015-02-13 23:57:28 +0200
+Branch: REL9_0_STABLE [61165fae0] 2015-02-13 23:57:35 +0200
+-->
+
+ <listitem>
+ <para>
+ Fix compile failure on Sparc V8 machines (Rob Rowan)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [91f4a5a97] 2015-03-14 14:08:45 -0400
+Branch: REL9_4_STABLE [c415c13b7] 2015-03-14 14:08:45 -0400
+Author: Tom Lane <[email protected]>
+Branch: master [73b416b2e] 2015-04-05 13:01:59 -0400
+Branch: REL9_4_STABLE [8972a152c] 2015-04-05 13:01:55 -0400
+Branch: REL9_3_STABLE [6347bdb31] 2015-04-05 13:01:55 -0400
+-->
+
+ <listitem>
+ <para>
+ Silence some build warnings on OS X (Tom Lane)
+ </para>
+ </listitem>
+
+<!--
+Author: Tom Lane <[email protected]>
+Branch: master [9d366c1f3] 2015-05-15 19:35:29 -0400
+Branch: REL9_4_STABLE [d0ddcf62e] 2015-05-15 19:35:51 -0400
+Branch: REL9_3_STABLE [4fd69e412] 2015-05-15 19:35:58 -0400
+Branch: REL9_2_STABLE [2a63434f0] 2015-05-15 19:36:06 -0400
+Branch: REL9_1_STABLE [436f35609] 2015-05-15 19:36:13 -0400
+Branch: REL9_0_STABLE [3c3749a3b] 2015-05-15 19:36:20 -0400
+-->
+
+ <listitem>
+ <para>
+ Update time zone data files to <application>tzdata</> release 2015d
+ for DST law changes in Egypt, Mongolia, and Palestine, plus historical
+ changes in Canada and Chile. Also adopt revised zone abbreviations for
+ the America/Adak zone (HST/HDT not HAST/HADT).
+ </para>
+ </listitem>
+
+ </itemizedlist>
+
+ </sect2>
+ </sect1>
+
<sect1 id="release-9-4-1">
<title>Release 9.4.1</title>
diff --git a/doc/src/sgml/replication-origins.sgml b/doc/src/sgml/replication-origins.sgml
new file mode 100644
index 0000000000..40fcc6d3d0
--- /dev/null
+++ b/doc/src/sgml/replication-origins.sgml
@@ -0,0 +1,93 @@
+<!-- doc/src/sgml/replication-origins.sgml -->
+<chapter id="replication-origins">
+ <title>Replication Progress Tracking</title>
+ <indexterm zone="replication-origins">
+ <primary>Replication Progress Tracking</primary>
+ </indexterm>
+ <indexterm zone="replication-origins">
+ <primary>Replication Origins</primary>
+ </indexterm>
+
+ <para>
+ Replication origins are intended to make it easier to implement
+ logical replication solutions on top
+ of <xref linkend="logicaldecoding">. They provide a solution to two
+ common problems:
+ <itemizedlist>
+ <listitem><para>How to safely keep track of replication progress</para></listitem>
+ <listitem><para>How to change replication behavior, based on the
+ origin of a row; e.g. to avoid loops in bi-directional replication
+ setups</para></listitem>
+ </itemizedlist>
+ </para>
+
+ <para>
+ Replication origins consist out of a name and an <type>oid</type>. The name,
+ which is what should be used to refer to the origin across systems, is
+ free-form <type>text</type>. It should be used in a way that makes conflicts
+ between replication origins created by different replication solutions
+ unlikely; e.g. by prefixing the replication solution's name to it.
+ The <type>oid</type> is used only to avoid having to store the long version
+ in situations where space efficiency is important. It should never be shared
+ between systems.
+ </para>
+
+ <para>
+ Replication origins can be created using the
+ <link linkend="pg-replication-origin-create"><function>pg_replication_origin_create()</function></link>;
+ dropped using
+ <link linkend="pg-replication-origin-drop"><function>pg_replication_origin_drop()</function></link>;
+ and seen in the
+ <link linkend="catalog-pg-replication-origin"><structname>pg_replication_origin</structname></link>
+ catalog.
+ </para>
+
+ <para>
+ When replicating from one system to another (independent of the fact that
+ those two might be in the same cluster, or even same database) one
+ nontrivial part of building a replication solution is to keep track of
+ replay progress in a safe manner. When the applying process, or the whole
+ cluster, dies, it needs to be possible to find out up to where data has
+ successfully been replicated. Naive solutions to this like updating a row in
+ a table for every replayed transaction have problems like runtime overhead
+ bloat.
+ </para>
+
+ <para>
+ Using the replication origin infrastructure a session can be
+ marked as replaying from a remote node (using the
+ <link linkend="pg-replication-origin-session-setup"><function>pg_replication_origin_session_setup()</function></link>
+ function. Additionally the <acronym>LSN</acronym> and commit
+ timestamp of every source transaction can be configured on a per
+ transaction basis using
+ <link linkend="pg-replication-origin-xact-setup"><function>pg_replication_origin_xact_setup()</function></link>.
+ If that's done replication progress will persist in a crash safe
+ manner. Replay progress for all replication origins can be seen in the
+ <link linkend="catalog-pg-replication-origin-status">
+ <structname>pg_replication_origin_status</structname>
+ </link> view. An individual origin's progress, e.g. when resuming
+ replication, can be acquired using
+ <link linkend="pg-replication-origin-progress"><function>pg_replication_origin_progress()</function></link>
+ for any origin or
+ <link linkend="pg-replication-origin-session-progress"><function>pg_replication_origin_session_progress()</function></link>
+ for the origin configured in the current session.
+ </para>
+
+ <para>
+ In more complex replication topologies than replication from exactly one
+ system to one other, another problem can be that it is hard to avoid
+ replicating replayed rows again. That can lead both to cycles in the
+ replication and inefficiencies. Replication origins provide an optional
+ mechanism to recognize and prevent that. When configured using the functions
+ referenced in the previous paragraph, every change and transaction passed to
+ output plugin callbacks (see <xref linkend="logicaldecoding-output-plugin">)
+ generated by the session is tagged with the replication origin of the
+ generating session. This allows to treat them differently in the output
+ plugin, e.g. ignoring all but locally originating rows. Additionally
+ the <link linkend="logicaldecoding-output-plugin-filter-origin">
+ <function>filter_by_origin_cb</function></link> callback can be used
+ to filter the logical decoding change stream based on the
+ source. While less flexible, filtering via that callback is
+ considerably more efficient.
+ </para>
+</chapter>
diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml
index 973db7435b..cb5c8fccae 100644
--- a/doc/src/sgml/rules.sgml
+++ b/doc/src/sgml/rules.sgml
@@ -2136,7 +2136,7 @@ SELECT * FROM phone_number WHERE tricky(person, phone);
When it is necessary for a view to provide row level security, the
<literal>security_barrier</literal> attribute should be applied to
the view. This prevents maliciously-chosen functions and operators from
- being invoked on rows until after the view has done its work. For
+ being passed values from rows until after the view has done its work. For
example, if the view shown above had been created like this, it would
be secure:
<programlisting>
@@ -2157,9 +2157,12 @@ CREATE VIEW phone_number WITH (security_barrier) AS
operators. The query planner can safely allow such functions to be evaluated
at any point in the query execution process, since invoking them on rows
invisible to the user will not leak any information about the unseen rows.
- In contrast, a function that might throw an error depending on the values
- received as arguments (such as one that throws an error in the event of
- overflow or division by zero) are not leak-proof, and could provide
+ Further, functions which do not take arguments or which are not passed any
+ arguments from the security barrier view do not have to be marked as
+ <literal>LEAKPROOF</literal> to be pushed down, as they never receive data
+ from the view. In contrast, a function that might throw an error depending
+ on the values received as arguments (such as one that throws an error in the
+ event of overflow or division by zero) are not leak-proof, and could provide
significant information about the unseen rows if applied before the security
view's row filters.
</para>
diff --git a/doc/src/sgml/storage.sgml b/doc/src/sgml/storage.sgml
index d8c52875d8..e5b7b4b68d 100644
--- a/doc/src/sgml/storage.sgml
+++ b/doc/src/sgml/storage.sgml
@@ -503,8 +503,9 @@ comparison table, in which all the HTML pages were cut down to 7 kB to fit.
<acronym>TOAST</> pointers can point to data that is not on disk, but is
elsewhere in the memory of the current server process. Such pointers
obviously cannot be long-lived, but they are nonetheless useful. There
-is currently just one sub-case:
-pointers to <firstterm>indirect</> data.
+are currently two sub-cases:
+pointers to <firstterm>indirect</> data and
+pointers to <firstterm>expanded</> data.
</para>
<para>
@@ -519,6 +520,43 @@ and there is no infrastructure to help with this.
</para>
<para>
+Expanded <acronym>TOAST</> pointers are useful for complex data types
+whose on-disk representation is not especially suited for computational
+purposes. As an example, the standard varlena representation of a
+<productname>PostgreSQL</> array includes dimensionality information, a
+nulls bitmap if there are any null elements, then the values of all the
+elements in order. When the element type itself is variable-length, the
+only way to find the <replaceable>N</>'th element is to scan through all the
+preceding elements. This representation is appropriate for on-disk storage
+because of its compactness, but for computations with the array it's much
+nicer to have an <quote>expanded</> or <quote>deconstructed</>
+representation in which all the element starting locations have been
+identified. The <acronym>TOAST</> pointer mechanism supports this need by
+allowing a pass-by-reference Datum to point to either a standard varlena
+value (the on-disk representation) or a <acronym>TOAST</> pointer that
+points to an expanded representation somewhere in memory. The details of
+this expanded representation are up to the data type, though it must have
+a standard header and meet the other API requirements given
+in <filename>src/include/utils/expandeddatum.h</>. C-level functions
+working with the data type can choose to handle either representation.
+Functions that do not know about the expanded representation, but simply
+apply <function>PG_DETOAST_DATUM</> to their inputs, will automatically
+receive the traditional varlena representation; so support for an expanded
+representation can be introduced incrementally, one function at a time.
+</para>
+
+<para>
+<acronym>TOAST</> pointers to expanded values are further broken down
+into <firstterm>read-write</> and <firstterm>read-only</> pointers.
+The pointed-to representation is the same either way, but a function that
+receives a read-write pointer is allowed to modify the referenced value
+in-place, whereas one that receives a read-only pointer must not; it must
+first create a copy if it wants to make a modified version of the value.
+This distinction and some associated conventions make it possible to avoid
+unnecessary copying of expanded values during query execution.
+</para>
+
+<para>
For all types of in-memory <acronym>TOAST</> pointer, the <acronym>TOAST</>
management code ensures that no such pointer datum can accidentally get
stored on disk. In-memory <acronym>TOAST</> pointers are automatically
diff --git a/doc/src/sgml/stylesheet-fo.xsl b/doc/src/sgml/stylesheet-fo.xsl
index ad26a5abbe..434e69d8e3 100644
--- a/doc/src/sgml/stylesheet-fo.xsl
+++ b/doc/src/sgml/stylesheet-fo.xsl
@@ -24,4 +24,11 @@
<xsl:call-template name="inline.monoseq"/>
</xsl:template>
+<!-- bug fix from <https://sourceforge.net/p/docbook/bugs/1360/#831b> -->
+
+<xsl:template match="varlistentry/term" mode="xref-to">
+ <xsl:param name="verbose" select="1"/>
+ <xsl:apply-templates mode="no.anchor.mode"/>
+</xsl:template>
+
</xsl:stylesheet>
diff --git a/doc/src/sgml/tablesample-method.sgml b/doc/src/sgml/tablesample-method.sgml
new file mode 100644
index 0000000000..48eb7fe84e
--- /dev/null
+++ b/doc/src/sgml/tablesample-method.sgml
@@ -0,0 +1,139 @@
+<!-- doc/src/sgml/tablesample-method.sgml -->
+
+<chapter id="tablesample-method">
+ <title>Writing A TABLESAMPLE Sampling Method</title>
+
+ <indexterm zone="tablesample-method">
+ <primary>tablesample method</primary>
+ </indexterm>
+
+ <para>
+ The <command>TABLESAMPLE</command> clause implementation in
+ <productname>PostgreSQL</> supports creating a custom sampling methods.
+ These methods control what sample of the table will be returned when the
+ <command>TABLESAMPLE</command> clause is used.
+ </para>
+
+ <sect1 id="tablesample-method-functions">
+ <title>Tablesample Method Functions</title>
+
+ <para>
+ The tablesample method must provide following set of functions:
+ </para>
+
+ <para>
+<programlisting>
+void
+tsm_init (TableSampleDesc *desc,
+ uint32 seed, ...);
+</programlisting>
+ Initialize the tablesample scan. The function is called at the beginning
+ of each relation scan.
+ </para>
+ <para>
+ Note that the first two parameters are required but you can specify
+ additional parameters which then will be used by the <command>TABLESAMPLE</>
+ clause to determine the required user input in the query itself.
+ This means that if your function will specify additional float4 parameter
+ named percent, the user will have to call the tablesample method with
+ expression which evaluates (or can be coerced) to float4.
+ For example this definition:
+<programlisting>
+tsm_init (TableSampleDesc *desc,
+ uint32 seed, float4 pct);
+</programlisting>
+Will lead to SQL call like this:
+<programlisting>
+... TABLESAMPLE yourmethod(0.5) ...
+</programlisting>
+ </para>
+
+ <para>
+<programlisting>
+BlockNumber
+tsm_nextblock (TableSampleDesc *desc);
+</programlisting>
+ Returns the block number of next page to be scanned. InvalidBlockNumber
+ should be returned if the sampling has reached end of the relation.
+ </para>
+
+ <para>
+<programlisting>
+OffsetNumber
+tsm_nexttuple (TableSampleDesc *desc, BlockNumber blockno,
+ OffsetNumber maxoffset);
+</programlisting>
+ Return next tuple offset for the current page. InvalidOffsetNumber should
+ be returned if the sampling has reached end of the page.
+ </para>
+
+ <para>
+<programlisting>
+void
+tsm_end (TableSampleDesc *desc);
+</programlisting>
+ The scan has finished, cleanup any left over state.
+ </para>
+
+ <para>
+<programlisting>
+void
+tsm_reset (TableSampleDesc *desc);
+</programlisting>
+ The scan needs to rescan the relation again, reset any tablesample method
+ state.
+ </para>
+
+ <para>
+<programlisting>
+void
+tsm_cost (PlannerInfo *root, Path *path, RelOptInfo *baserel,
+ List *args, BlockNumber *pages, double *tuples);
+</programlisting>
+ This function is used by optimizer to decide best plan and is also used
+ for output of <command>EXPLAIN</>.
+ </para>
+
+ <para>
+ There is one more function which tablesampling method can implement in order
+ to gain more fine grained control over sampling. This function is optional:
+ </para>
+
+ <para>
+<programlisting>
+bool
+tsm_examinetuple (TableSampleDesc *desc, BlockNumber blockno,
+ HeapTuple tuple, bool visible);
+</programlisting>
+ Function that enables the sampling method to examine contents of the tuple
+ (for example to collect some internal statistics). The return value of this
+ function is used to determine if the tuple should be returned to client.
+ Note that this function will receive even invisible tuples but it is not
+ allowed to return true for such tuple (if it does,
+ <productname>PostgreSQL</> will raise an error).
+ </para>
+
+ <para>
+ As you can see most of the tablesample method interfaces get the
+ <structname>TableSampleDesc</> as a first parameter. This structure holds
+ state of the current scan and also provides storage for the tablesample
+ method's state. It is defined as following:
+<programlisting>
+typedef struct TableSampleDesc {
+ HeapScanDesc heapScan;
+ TupleDesc tupDesc;
+
+ void *tsmdata;
+} TableSampleDesc;
+</programlisting>
+ Where <structfield>heapScan</> is the descriptor of the physical table scan.
+ It's possible to get table size info from it. The <structfield>tupDesc</>
+ represents the tuple descriptor of the tuples returned by the scan and passed
+ to the <function>tsm_examinetuple()</> interface. The <structfield>tsmdata</>
+ can be used by tablesample method itself to store any state info it might
+ need during the scan. If used by the method, it should be <function>pfree</>d
+ in <function>tsm_end()</> function.
+ </para>
+ </sect1>
+
+</chapter>
diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml
index 0bc7e7b41c..b1c669fb91 100644
--- a/doc/src/sgml/textsearch.sgml
+++ b/doc/src/sgml/textsearch.sgml
@@ -481,7 +481,7 @@ LIMIT 10;
linkend="textsearch-indexes">) to speed up text searches:
<programlisting>
-CREATE INDEX pgweb_idx ON pgweb USING gin(to_tsvector('english', body));
+CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', body));
</programlisting>
Notice that the 2-argument version of <function>to_tsvector</function> is
@@ -511,7 +511,7 @@ CREATE INDEX pgweb_idx ON pgweb USING gin(to_tsvector('english', body));
configuration name is specified by another column, e.g.:
<programlisting>
-CREATE INDEX pgweb_idx ON pgweb USING gin(to_tsvector(config_name, body));
+CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector(config_name, body));
</programlisting>
where <literal>config_name</> is a column in the <literal>pgweb</>
@@ -527,7 +527,7 @@ CREATE INDEX pgweb_idx ON pgweb USING gin(to_tsvector(config_name, body));
Indexes can even concatenate columns:
<programlisting>
-CREATE INDEX pgweb_idx ON pgweb USING gin(to_tsvector('english', title || ' ' || body));
+CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', title || ' ' || body));
</programlisting>
</para>
@@ -547,7 +547,7 @@ UPDATE pgweb SET textsearchable_index_col =
Then we create a <acronym>GIN</acronym> index to speed up the search:
<programlisting>
-CREATE INDEX textsearch_idx ON pgweb USING gin(textsearchable_index_col);
+CREATE INDEX textsearch_idx ON pgweb USING GIN (textsearchable_index_col);
</programlisting>
Now we are ready to perform a fast full text search:
@@ -3217,7 +3217,7 @@ SELECT plainto_tsquery('supernovae stars');
<tertiary>text search</tertiary>
</indexterm>
- <literal>CREATE INDEX <replaceable>name</replaceable> ON <replaceable>table</replaceable> USING gist(<replaceable>column</replaceable>);</literal>
+ <literal>CREATE INDEX <replaceable>name</replaceable> ON <replaceable>table</replaceable> USING GIST (<replaceable>column</replaceable>);</literal>
</term>
<listitem>
@@ -3238,7 +3238,7 @@ SELECT plainto_tsquery('supernovae stars');
<tertiary>text search</tertiary>
</indexterm>
- <literal>CREATE INDEX <replaceable>name</replaceable> ON <replaceable>table</replaceable> USING gin(<replaceable>column</replaceable>);</literal>
+ <literal>CREATE INDEX <replaceable>name</replaceable> ON <replaceable>table</replaceable> USING GIN (<replaceable>column</replaceable>);</literal>
</term>
<listitem>
diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml
index f94aea174a..52f28bca7a 100644
--- a/doc/src/sgml/trigger.sgml
+++ b/doc/src/sgml/trigger.sgml
@@ -40,14 +40,17 @@
On tables and foreign tables, triggers can be defined to execute either
before or after any <command>INSERT</command>, <command>UPDATE</command>,
or <command>DELETE</command> operation, either once per modified row,
- or once per <acronym>SQL</acronym> statement.
- <command>UPDATE</command> triggers can moreover be set to fire only if
- certain columns are mentioned in the <literal>SET</literal> clause of the
- <command>UPDATE</command> statement.
- Triggers can also fire for <command>TRUNCATE</command> statements.
- If a trigger event occurs, the trigger's function is called at the
- appropriate time to handle the event. Foreign tables do not support the
- TRUNCATE statement at all.
+ or once per <acronym>SQL</acronym> statement. If an
+ <command>INSERT</command> contains an <literal>ON CONFLICT DO UPDATE</>
+ clause, it is possible that the effects of a BEFORE insert trigger and
+ a BEFORE update trigger can both be applied together, if a reference to
+ an <varname>EXCLUDED</> column appears. <command>UPDATE</command>
+ triggers can moreover be set to fire only if certain columns are
+ mentioned in the <literal>SET</literal> clause of the
+ <command>UPDATE</command> statement. Triggers can also fire for
+ <command>TRUNCATE</command> statements. If a trigger event occurs,
+ the trigger's function is called at the appropriate time to handle the
+ event. Foreign tables do not support the TRUNCATE statement at all.
</para>
<para>
@@ -119,6 +122,35 @@
</para>
<para>
+ If an <command>INSERT</command> contains an <literal>ON CONFLICT
+ DO UPDATE</> clause, it is possible that the effects of all
+ row-level <literal>BEFORE</> <command>INSERT</command> triggers
+ and all row-level BEFORE <command>UPDATE</command> triggers can
+ both be applied in a way that is apparent from the final state of
+ the updated row, if an <varname>EXCLUDED</> column is referenced.
+ There need not be an <varname>EXCLUDED</> column reference for
+ both sets of BEFORE row-level triggers to execute, though. The
+ possibility of surprising outcomes should be considered when there
+ are both <literal>BEFORE</> <command>INSERT</command> and
+ <literal>BEFORE</> <command>UPDATE</command> row-level triggers
+ that both affect a row being inserted/updated (this can still be
+ problematic if the modifications are more or less equivalent if
+ they're not also idempotent). Note that statement-level
+ <command>UPDATE</command> triggers are executed when <literal>ON
+ CONFLICT DO UPDATE</> is specified, regardless of whether or not
+ any rows were affected by the <command>UPDATE</command> (and
+ regardless of whether the alternative <command>UPDATE</command>
+ path was ever taken). An <command>INSERT</command> with an
+ <literal>ON CONFLICT DO UPDATE</> clause will execute
+ statement-level <literal>BEFORE</> <command>INSERT</command>
+ triggers first, then statement-level <literal>BEFORE</>
+ <command>UPDATE</command> triggers, followed by statement-level
+ <literal>AFTER</> <command>UPDATE</command> triggers and finally
+ statement-level <literal>AFTER</> <command>INSERT</command>
+ triggers.
+ </para>
+
+ <para>
Trigger functions invoked by per-statement triggers should always
return <symbol>NULL</symbol>. Trigger functions invoked by per-row
triggers can return a table row (a value of
@@ -677,11 +709,7 @@ CREATE TABLE ttest (
#include "commands/trigger.h" /* ... triggers ... */
#include "utils/rel.h" /* ... and relations */
-#ifdef PG_MODULE_MAGIC
PG_MODULE_MAGIC;
-#endif
-
-extern Datum trigf(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(trigf);
diff --git a/doc/src/sgml/tsm-system-rows.sgml b/doc/src/sgml/tsm-system-rows.sgml
new file mode 100644
index 0000000000..0c2f1779c9
--- /dev/null
+++ b/doc/src/sgml/tsm-system-rows.sgml
@@ -0,0 +1,50 @@
+<!-- doc/src/sgml/tsm-system-rows.sgml -->
+
+<sect1 id="tsm-system-rows" xreflabel="tsm_system_rows">
+ <title>tsm_system_rows</title>
+
+ <indexterm zone="tsm-system-rows">
+ <primary>tsm_system_rows</primary>
+ </indexterm>
+
+ <para>
+ The <filename>tsm_system_rows</> module provides the tablesample method
+ <literal>SYSTEM_ROWS</literal>, which can be used inside the
+ <command>TABLESAMPLE</command> clause of a <command>SELECT</command>.
+ </para>
+
+ <para>
+ This tablesample method uses a linear probing algorithm to read sample
+ of a table and uses actual number of rows as limit (unlike the
+ <literal>SYSTEM</literal> tablesample method which limits by percentage
+ of a table).
+ </para>
+
+ <sect2>
+ <title>Examples</title>
+
+ <para>
+ Here is an example of selecting sample of a table with
+ <literal>SYSTEM_ROWS</>. First install the extension:
+ </para>
+
+<programlisting>
+CREATE EXTENSION tsm_system_rows;
+</programlisting>
+
+ <para>
+ Then you can use it in <command>SELECT</command> command same way as other
+ tablesample methods:
+
+<programlisting>
+SELECT * FROM my_table TABLESAMPLE SYSTEM_ROWS(100);
+</programlisting>
+ </para>
+
+ <para>
+ The above command will return a sample of 100 rows from the table my_table
+ (less if the table does not have 100 visible rows).
+ </para>
+ </sect2>
+
+</sect1>
diff --git a/doc/src/sgml/tsm-system-time.sgml b/doc/src/sgml/tsm-system-time.sgml
new file mode 100644
index 0000000000..2343ab16d4
--- /dev/null
+++ b/doc/src/sgml/tsm-system-time.sgml
@@ -0,0 +1,51 @@
+<!-- doc/src/sgml/tsm-system-time.sgml -->
+
+<sect1 id="tsm-system-time" xreflabel="tsm_system_time">
+ <title>tsm_system_time</title>
+
+ <indexterm zone="tsm-system-time">
+ <primary>tsm_system_time</primary>
+ </indexterm>
+
+ <para>
+ The <filename>tsm_system_time</> module provides the tablesample method
+ <literal>SYSTEM_TIME</literal>, which can be used inside the
+ <command>TABLESAMPLE</command> clause of a <command>SELECT</command>.
+ </para>
+
+ <para>
+ This tablesample method uses a linear probing algorithm to read sample
+ of a table and uses time in milliseconds as limit (unlike the
+ <literal>SYSTEM</literal> tablesample method which limits by percentage
+ of a table). This gives you some control over the length of execution
+ of your query.
+ </para>
+
+ <sect2>
+ <title>Examples</title>
+
+ <para>
+ Here is an example of selecting sample of a table with
+ <literal>SYSTEM_TIME</>. First install the extension:
+ </para>
+
+<programlisting>
+CREATE EXTENSION tsm_system_time;
+</programlisting>
+
+ <para>
+ Then you can use it in a <command>SELECT</command> command the same way as
+ other tablesample methods:
+
+<programlisting>
+SELECT * FROM my_table TABLESAMPLE SYSTEM_TIME(1000);
+</programlisting>
+ </para>
+
+ <para>
+ The above command will return as large a sample of my_table as it can read in
+ 1 second (or less if it reads whole table faster).
+ </para>
+ </sect2>
+
+</sect1>
diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml
index 9de7ccc258..9c15950ccd 100644
--- a/doc/src/sgml/xfunc.sgml
+++ b/doc/src/sgml/xfunc.sgml
@@ -1955,6 +1955,11 @@ memcpy(destination->data, buffer, 40);
<entry><filename>utils/nabstime.h</filename></entry>
</row>
<row>
+ <entry><type>bigint</type> (<type>int8</type>)</entry>
+ <entry><type>int64</type></entry>
+ <entry><filename>postgres.h</filename></entry>
+ </row>
+ <row>
<entry><type>boolean</type></entry>
<entry><type>bool</type></entry>
<entry><filename>postgres.h</filename> (maybe compiler built-in)</entry>