summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavan Deolasee2015-07-28 06:43:48 +0000
committerPavan Deolasee2015-09-14 05:38:25 +0000
commitff1010e0aa3b94815ed71657c8c3d0bba7aa42ae (patch)
tree5f86d7feae9469aa77d3db2c9b85de7bf9f4a389
parent57d93c30dc8f1b0deea9e6686a0dfa89854a44a3 (diff)
Remove doc-xc directory and supporting infrastructure to build XC docs
We now change the documentation in-place and hence don't need this additional copy of the docs. The doc changes are already rebased on the original 9.5 docs per previous commit
-rw-r--r--GNUmakefile.in18
-rw-r--r--doc-xc/KNOWN_BUGS3
-rw-r--r--doc-xc/MISSING_FEATURES3
-rw-r--r--doc-xc/Makefile16
-rw-r--r--doc-xc/README.mb.big5326
-rw-r--r--doc-xc/README.mb.jp814
-rw-r--r--doc-xc/TODO3
-rw-r--r--doc-xc/bug.template53
-rw-r--r--doc-xc/src/Makefile8
-rw-r--r--doc-xc/src/sgml/.gitignore33
-rw-r--r--doc-xc/src/sgml/Makefile421
-rw-r--r--doc-xc/src/sgml/Makefile.xc.wk407
-rw-r--r--doc-xc/src/sgml/README.links45
-rw-r--r--doc-xc/src/sgml/acronyms.sgmlin832
-rw-r--r--doc-xc/src/sgml/add-node.sgmlin273
-rw-r--r--doc-xc/src/sgml/adminpack.sgmlin47
-rw-r--r--doc-xc/src/sgml/advanced.sgmlin929
-rw-r--r--doc-xc/src/sgml/arch-dev.sgmlin1630
-rw-r--r--doc-xc/src/sgml/array.sgmlin749
-rw-r--r--doc-xc/src/sgml/auth-delay.sgmlin67
-rw-r--r--doc-xc/src/sgml/auto-explain.sgmlin239
-rw-r--r--doc-xc/src/sgml/backup.sgmlin1487
-rw-r--r--doc-xc/src/sgml/biblio.sgmlin594
-rw-r--r--doc-xc/src/sgml/bki.sgmlin371
-rw-r--r--doc-xc/src/sgml/btree-gin.sgmlin59
-rw-r--r--doc-xc/src/sgml/btree-gist.sgmlin131
-rw-r--r--doc-xc/src/sgml/catalogs.sgmlin9076
-rw-r--r--doc-xc/src/sgml/charset.sgmlin1716
-rw-r--r--doc-xc/src/sgml/chkpass.sgmlin97
-rw-r--r--doc-xc/src/sgml/citext.sgmlin313
-rw-r--r--doc-xc/src/sgml/client-auth.sgmlin1938
-rw-r--r--doc-xc/src/sgml/common.sgmlin14
-rw-r--r--doc-xc/src/sgml/config.sgmlin7544
-rw-r--r--doc-xc/src/sgml/contacts.sgmlin26
-rw-r--r--doc-xc/src/sgml/contrib-spi.sgmlin237
-rw-r--r--doc-xc/src/sgml/contrib.sgmlin152
-rw-r--r--doc-xc/src/sgml/cube.sgmlin432
-rw-r--r--doc-xc/src/sgml/datatype.sgmlin5067
-rw-r--r--doc-xc/src/sgml/datetime.sgmlin633
-rw-r--r--doc-xc/src/sgml/dblink.sgmlin2717
-rw-r--r--doc-xc/src/sgml/ddl.sgmlin3837
-rw-r--r--doc-xc/src/sgml/dfunc.sgmlin368
-rw-r--r--doc-xc/src/sgml/dict-int.sgmlin94
-rw-r--r--doc-xc/src/sgml/dict-xsyn.sgmlin158
-rw-r--r--doc-xc/src/sgml/diskusage.sgmlin147
-rw-r--r--doc-xc/src/sgml/dml.sgmlin278
-rw-r--r--doc-xc/src/sgml/docguide.sgmlin1482
-rw-r--r--doc-xc/src/sgml/dummy-seclabel.sgmlin74
-rw-r--r--doc-xc/src/sgml/earthdistance.sgmlin201
-rw-r--r--doc-xc/src/sgml/ecpg.sgmlin9736
-rw-r--r--doc-xc/src/sgml/errcodes.sgmlin79
-rw-r--r--doc-xc/src/sgml/extend.sgmlin1265
-rw-r--r--doc-xc/src/sgml/external-projects.sgmlin284
-rw-r--r--doc-xc/src/sgml/fdwhandler.sgmlin212
-rw-r--r--doc-xc/src/sgml/features-supported.sgmlin2078
-rw-r--r--doc-xc/src/sgml/features-unsupported.sgmlin1826
-rw-r--r--doc-xc/src/sgml/features.sgmlin164
-rw-r--r--doc-xc/src/sgml/file-fdw.sgmlin138
-rw-r--r--doc-xc/src/sgml/filelist.sgmlin226
-rwxr-xr-xdoc-xc/src/sgml/fixrtf46
-rw-r--r--doc-xc/src/sgml/func.sgmlin16285
-rw-r--r--doc-xc/src/sgml/fuzzystrmatch.sgmlin220
-rw-r--r--doc-xc/src/sgml/generate-errcodes-table.pl56
-rw-r--r--doc-xc/src/sgml/generate_history.pl58
-rw-r--r--doc-xc/src/sgml/geqo.sgmlin387
-rw-r--r--doc-xc/src/sgml/gin.sgmlin560
-rw-r--r--doc-xc/src/sgml/gist.sgmlin762
-rw-r--r--doc-xc/src/sgml/high-availability.sgmlin2183
-rw-r--r--doc-xc/src/sgml/history.sgmlin340
-rw-r--r--doc-xc/src/sgml/hstore.sgmlin625
-rw-r--r--doc-xc/src/sgml/indexam.sgmlin1133
-rw-r--r--doc-xc/src/sgml/indices.sgmlin1323
-rw-r--r--doc-xc/src/sgml/info.sgmlin145
-rw-r--r--doc-xc/src/sgml/information_schema.sgmlin7490
-rw-r--r--doc-xc/src/sgml/install-windows.sgmlin578
-rw-r--r--doc-xc/src/sgml/installation.sgmlin4740
-rw-r--r--doc-xc/src/sgml/intagg.sgmlin127
-rw-r--r--doc-xc/src/sgml/intarray.sgmlin348
-rw-r--r--doc-xc/src/sgml/intro.sgmlin821
-rw-r--r--doc-xc/src/sgml/isn.sgmlin394
-rw-r--r--doc-xc/src/sgml/jadetex.cfg30
-rw-r--r--doc-xc/src/sgml/keywords.sgmlin6201
-rw-r--r--doc-xc/src/sgml/legal.sgmlin456
-rw-r--r--doc-xc/src/sgml/libpq.sgmlin8250
-rw-r--r--doc-xc/src/sgml/lo.sgmlin146
-rw-r--r--doc-xc/src/sgml/lobj.sgmlin757
-rw-r--r--doc-xc/src/sgml/ltree.sgmlin700
-rw-r--r--doc-xc/src/sgml/maintenance.sgmlin1129
-rw-r--r--doc-xc/src/sgml/manage-ag.sgmlin613
-rw-r--r--doc-xc/src/sgml/mk_feature_tables.pl58
-rw-r--r--doc-xc/src/sgml/monitoring.sgmlin2067
-rw-r--r--doc-xc/src/sgml/mvcc.sgmlin1872
-rw-r--r--doc-xc/src/sgml/nls.sgmlin570
-rw-r--r--doc-xc/src/sgml/notation.sgmlin114
-rw-r--r--doc-xc/src/sgml/oid2name.sgmlin310
-rw-r--r--doc-xc/src/sgml/pageinspect.sgmlin224
-rw-r--r--doc-xc/src/sgml/passwordcheck.sgmlin64
-rw-r--r--doc-xc/src/sgml/perform.sgmlin1382
-rw-r--r--doc-xc/src/sgml/pgarchivecleanup.sgmlin187
-rw-r--r--doc-xc/src/sgml/pgbench.sgmlin832
-rw-r--r--doc-xc/src/sgml/pgbuffercache.sgmlin199
-rw-r--r--doc-xc/src/sgml/pgcrypto.sgmlin1320
-rw-r--r--doc-xc/src/sgml/pgfreespacemap.sgmlin146
-rw-r--r--doc-xc/src/sgml/pgnotice.sgmlin16
-rw-r--r--doc-xc/src/sgml/pgonly.sgmlin14
-rw-r--r--doc-xc/src/sgml/pgrowlocks.sgmlin161
-rw-r--r--doc-xc/src/sgml/pgstandby.sgmlin403
-rw-r--r--doc-xc/src/sgml/pgstatstatements.sgmlin368
-rw-r--r--doc-xc/src/sgml/pgstattuple.sgmlin295
-rw-r--r--doc-xc/src/sgml/pgtestfsync.sgmlin74
-rw-r--r--doc-xc/src/sgml/pgtrgm.sgmlin297
-rw-r--r--doc-xc/src/sgml/pgupgrade.sgmlin577
-rw-r--r--doc-xc/src/sgml/pgxc_ctl-ref.sgmlin1911
-rw-r--r--doc-xc/src/sgml/pgxcclean.sgmlin211
-rw-r--r--doc-xc/src/sgml/pgxcddl.sgmlin189
-rw-r--r--doc-xc/src/sgml/pgxcmonitor.sgmlin165
-rw-r--r--doc-xc/src/sgml/planstats.sgmlin454
-rw-r--r--doc-xc/src/sgml/plhandler.sgmlin238
-rw-r--r--doc-xc/src/sgml/plperl.sgmlin1425
-rw-r--r--doc-xc/src/sgml/plpgsql.sgmlin5358
-rw-r--r--doc-xc/src/sgml/plpython.sgmlin1287
-rw-r--r--doc-xc/src/sgml/pltcl.sgmlin937
-rw-r--r--doc-xc/src/sgml/postgres.sgmlin425
-rw-r--r--doc-xc/src/sgml/problems.sgmlin603
-rw-r--r--doc-xc/src/sgml/protocol.sgmlin4821
-rw-r--r--doc-xc/src/sgml/queries.sgmlin2019
-rw-r--r--doc-xc/src/sgml/query.sgmlin1036
-rw-r--r--doc-xc/src/sgml/recovery-config.sgmlin493
-rw-r--r--doc-xc/src/sgml/ref/abort.sgmlin111
-rw-r--r--doc-xc/src/sgml/ref/allfiles.sgmlin243
-rw-r--r--doc-xc/src/sgml/ref/alter_aggregate.sgmlin151
-rw-r--r--doc-xc/src/sgml/ref/alter_collation.sgmlin128
-rw-r--r--doc-xc/src/sgml/ref/alter_conversion.sgmlin130
-rw-r--r--doc-xc/src/sgml/ref/alter_database.sgmlin252
-rw-r--r--doc-xc/src/sgml/ref/alter_default_privileges.sgmlin214
-rw-r--r--doc-xc/src/sgml/ref/alter_domain.sgmlin279
-rw-r--r--doc-xc/src/sgml/ref/alter_extension.sgmlin291
-rw-r--r--doc-xc/src/sgml/ref/alter_foreign_data_wrapper.sgmlin187
-rw-r--r--doc-xc/src/sgml/ref/alter_foreign_table.sgmlin315
-rw-r--r--doc-xc/src/sgml/ref/alter_function.sgmlin330
-rw-r--r--doc-xc/src/sgml/ref/alter_group.sgmlin132
-rw-r--r--doc-xc/src/sgml/ref/alter_index.sgmlin226
-rw-r--r--doc-xc/src/sgml/ref/alter_language.sgmlin93
-rw-r--r--doc-xc/src/sgml/ref/alter_large_object.sgmlin100
-rw-r--r--doc-xc/src/sgml/ref/alter_node.sgmlin330
-rw-r--r--doc-xc/src/sgml/ref/alter_opclass.sgmlin121
-rw-r--r--doc-xc/src/sgml/ref/alter_operator.sgmlin131
-rw-r--r--doc-xc/src/sgml/ref/alter_opfamily.sgmlin358
-rw-r--r--doc-xc/src/sgml/ref/alter_role.sgmlin332
-rw-r--r--doc-xc/src/sgml/ref/alter_schema.sgmlin102
-rw-r--r--doc-xc/src/sgml/ref/alter_sequence.sgmlin312
-rw-r--r--doc-xc/src/sgml/ref/alter_server.sgmlin144
-rw-r--r--doc-xc/src/sgml/ref/alter_table.sgmlin1531
-rw-r--r--doc-xc/src/sgml/ref/alter_tablespace.sgmlin137
-rw-r--r--doc-xc/src/sgml/ref/alter_trigger.sgmlin133
-rw-r--r--doc-xc/src/sgml/ref/alter_tsconfig.sgmlin191
-rw-r--r--doc-xc/src/sgml/ref/alter_tsdictionary.sgmlin172
-rw-r--r--doc-xc/src/sgml/ref/alter_tsparser.sgmlin95
-rw-r--r--doc-xc/src/sgml/ref/alter_tstemplate.sgmlin93
-rw-r--r--doc-xc/src/sgml/ref/alter_type.sgmlin339
-rw-r--r--doc-xc/src/sgml/ref/alter_user.sgmlin84
-rw-r--r--doc-xc/src/sgml/ref/alter_user_mapping.sgmlin145
-rw-r--r--doc-xc/src/sgml/ref/alter_view.sgmlin157
-rw-r--r--doc-xc/src/sgml/ref/analyze.sgmlin212
-rw-r--r--doc-xc/src/sgml/ref/begin.sgmlin178
-rw-r--r--doc-xc/src/sgml/ref/checkpoint.sgmlin80
-rw-r--r--doc-xc/src/sgml/ref/clean_connection.sgmlin144
-rw-r--r--doc-xc/src/sgml/ref/close.sgmlin135
-rw-r--r--doc-xc/src/sgml/ref/cluster.sgmlin260
-rw-r--r--doc-xc/src/sgml/ref/clusterdb.sgmlin311
-rw-r--r--doc-xc/src/sgml/ref/comment.sgmlin309
-rw-r--r--doc-xc/src/sgml/ref/commit.sgmlin97
-rw-r--r--doc-xc/src/sgml/ref/commit_prepared.sgmlin126
-rw-r--r--doc-xc/src/sgml/ref/copy.sgmlin1006
-rw-r--r--doc-xc/src/sgml/ref/create_aggregate.sgmlin675
-rw-r--r--doc-xc/src/sgml/ref/create_barrier.sgmlin113
-rw-r--r--doc-xc/src/sgml/ref/create_cast.sgmlin458
-rw-r--r--doc-xc/src/sgml/ref/create_collation.sgmlin177
-rw-r--r--doc-xc/src/sgml/ref/create_conversion.sgmlin182
-rw-r--r--doc-xc/src/sgml/ref/create_database.sgmlin340
-rw-r--r--doc-xc/src/sgml/ref/create_domain.sgmlin230
-rw-r--r--doc-xc/src/sgml/ref/create_extension.sgmlin205
-rw-r--r--doc-xc/src/sgml/ref/create_foreign_data_wrapper.sgmlin211
-rw-r--r--doc-xc/src/sgml/ref/create_foreign_table.sgmlin197
-rw-r--r--doc-xc/src/sgml/ref/create_function.sgmlin837
-rw-r--r--doc-xc/src/sgml/ref/create_group.sgmlin72
-rw-r--r--doc-xc/src/sgml/ref/create_index.sgmlin775
-rw-r--r--doc-xc/src/sgml/ref/create_language.sgmlin388
-rw-r--r--doc-xc/src/sgml/ref/create_node.sgmlin393
-rw-r--r--doc-xc/src/sgml/ref/create_nodegroup.sgmlin196
-rw-r--r--doc-xc/src/sgml/ref/create_opclass.sgmlin332
-rw-r--r--doc-xc/src/sgml/ref/create_operator.sgmlin310
-rw-r--r--doc-xc/src/sgml/ref/create_opfamily.sgmlin128
-rw-r--r--doc-xc/src/sgml/ref/create_role.sgmlin529
-rw-r--r--doc-xc/src/sgml/ref/create_rule.sgmlin310
-rw-r--r--doc-xc/src/sgml/ref/create_schema.sgmlin218
-rw-r--r--doc-xc/src/sgml/ref/create_sequence.sgmlin366
-rw-r--r--doc-xc/src/sgml/ref/create_server.sgmlin182
-rw-r--r--doc-xc/src/sgml/ref/create_table.sgmlin2117
-rw-r--r--doc-xc/src/sgml/ref/create_table_as.sgmlin613
-rw-r--r--doc-xc/src/sgml/ref/create_tablespace.sgmlin196
-rw-r--r--doc-xc/src/sgml/ref/create_trigger.sgmlin583
-rw-r--r--doc-xc/src/sgml/ref/create_tsconfig.sgmlin128
-rw-r--r--doc-xc/src/sgml/ref/create_tsdictionary.sgmlin143
-rw-r--r--doc-xc/src/sgml/ref/create_tsparser.sgmlin155
-rw-r--r--doc-xc/src/sgml/ref/create_tstemplate.sgmlin128
-rw-r--r--doc-xc/src/sgml/ref/create_type.sgmlin887
-rw-r--r--doc-xc/src/sgml/ref/create_user.sgmlin88
-rw-r--r--doc-xc/src/sgml/ref/create_user_mapping.sgmlin140
-rw-r--r--doc-xc/src/sgml/ref/create_view.sgmlin311
-rw-r--r--doc-xc/src/sgml/ref/createdb.sgmlin412
-rw-r--r--doc-xc/src/sgml/ref/createlang.sgmlin322
-rw-r--r--doc-xc/src/sgml/ref/createuser.sgmlin488
-rw-r--r--doc-xc/src/sgml/ref/deallocate.sgmlin99
-rw-r--r--doc-xc/src/sgml/ref/declare.sgmlin411
-rw-r--r--doc-xc/src/sgml/ref/delete.sgmlin361
-rw-r--r--doc-xc/src/sgml/ref/discard.sgmlin111
-rw-r--r--doc-xc/src/sgml/ref/do.sgmlin130
-rw-r--r--doc-xc/src/sgml/ref/drop_aggregate.sgmlin126
-rw-r--r--doc-xc/src/sgml/ref/drop_cast.sgmlin117
-rw-r--r--doc-xc/src/sgml/ref/drop_collation.sgmlin110
-rw-r--r--doc-xc/src/sgml/ref/drop_conversion.sgmlin115
-rw-r--r--doc-xc/src/sgml/ref/drop_database.sgmlin123
-rw-r--r--doc-xc/src/sgml/ref/drop_domain.sgmlin125
-rw-r--r--doc-xc/src/sgml/ref/drop_extension.sgmlin121
-rw-r--r--doc-xc/src/sgml/ref/drop_foreign_data_wrapper.sgmlin133
-rw-r--r--doc-xc/src/sgml/ref/drop_foreign_table.sgmlin112
-rw-r--r--doc-xc/src/sgml/ref/drop_function.sgmlin159
-rw-r--r--doc-xc/src/sgml/ref/drop_group.sgmlin55
-rw-r--r--doc-xc/src/sgml/ref/drop_index.sgmlin122
-rw-r--r--doc-xc/src/sgml/ref/drop_language.sgmlin128
-rw-r--r--doc-xc/src/sgml/ref/drop_node.sgmlin168
-rw-r--r--doc-xc/src/sgml/ref/drop_nodegroup.sgmlin162
-rw-r--r--doc-xc/src/sgml/ref/drop_opclass.sgmlin149
-rw-r--r--doc-xc/src/sgml/ref/drop_operator.sgmlin149
-rw-r--r--doc-xc/src/sgml/ref/drop_opfamily.sgmlin138
-rw-r--r--doc-xc/src/sgml/ref/drop_owned.sgmlin128
-rw-r--r--doc-xc/src/sgml/ref/drop_role.sgmlin141
-rw-r--r--doc-xc/src/sgml/ref/drop_rule.sgmlin121
-rw-r--r--doc-xc/src/sgml/ref/drop_schema.sgmlin133
-rw-r--r--doc-xc/src/sgml/ref/drop_sequence.sgmlin116
-rw-r--r--doc-xc/src/sgml/ref/drop_server.sgmlin133
-rw-r--r--doc-xc/src/sgml/ref/drop_table.sgmlin139
-rw-r--r--doc-xc/src/sgml/ref/drop_tablespace.sgmlin134
-rw-r--r--doc-xc/src/sgml/ref/drop_trigger.sgmlin145
-rw-r--r--doc-xc/src/sgml/ref/drop_tsconfig.sgmlin121
-rw-r--r--doc-xc/src/sgml/ref/drop_tsdictionary.sgmlin120
-rw-r--r--doc-xc/src/sgml/ref/drop_tsparser.sgmlin118
-rw-r--r--doc-xc/src/sgml/ref/drop_tstemplate.sgmlin117
-rw-r--r--doc-xc/src/sgml/ref/drop_type.sgmlin133
-rw-r--r--doc-xc/src/sgml/ref/drop_user.sgmlin65
-rw-r--r--doc-xc/src/sgml/ref/drop_user_mapping.sgmlin131
-rw-r--r--doc-xc/src/sgml/ref/drop_view.sgmlin125
-rw-r--r--doc-xc/src/sgml/ref/dropdb.sgmlin272
-rw-r--r--doc-xc/src/sgml/ref/droplang.sgmlin320
-rw-r--r--doc-xc/src/sgml/ref/dropuser.sgmlin313
-rw-r--r--doc-xc/src/sgml/ref/ecpg-ref.sgmlin265
-rw-r--r--doc-xc/src/sgml/ref/end.sgmlin101
-rw-r--r--doc-xc/src/sgml/ref/execute.sgmlin122
-rw-r--r--doc-xc/src/sgml/ref/execute_direct.sgmlin121
-rw-r--r--doc-xc/src/sgml/ref/explain.sgmlin468
-rw-r--r--doc-xc/src/sgml/ref/fetch.sgmlin583
-rw-r--r--doc-xc/src/sgml/ref/grant.sgmlin672
-rw-r--r--doc-xc/src/sgml/ref/gtm.sgmlin420
-rw-r--r--doc-xc/src/sgml/ref/gtm_ctl.sgmlin241
-rw-r--r--doc-xc/src/sgml/ref/gtm_proxy.sgmlin360
-rw-r--r--doc-xc/src/sgml/ref/initdb.sgmlin443
-rw-r--r--doc-xc/src/sgml/ref/initgtm.sgmlin405
-rw-r--r--doc-xc/src/sgml/ref/insert.sgmlin328
-rw-r--r--doc-xc/src/sgml/ref/listen.sgmlin154
-rw-r--r--doc-xc/src/sgml/ref/load.sgmlin103
-rw-r--r--doc-xc/src/sgml/ref/lock.sgmlin249
-rw-r--r--doc-xc/src/sgml/ref/move.sgmlin126
-rw-r--r--doc-xc/src/sgml/ref/notify.sgmlin247
-rw-r--r--doc-xc/src/sgml/ref/pause_cluster.sgmlin70
-rw-r--r--doc-xc/src/sgml/ref/pg_basebackup.sgmlin437
-rw-r--r--doc-xc/src/sgml/ref/pg_config-ref.sgmlin458
-rw-r--r--doc-xc/src/sgml/ref/pg_controldata.sgmlin92
-rw-r--r--doc-xc/src/sgml/ref/pg_ctl-ref.sgml.jp596
-rw-r--r--doc-xc/src/sgml/ref/pg_ctl-ref.sgml.org597
-rw-r--r--doc-xc/src/sgml/ref/pg_ctl-ref.sgmlin663
-rw-r--r--doc-xc/src/sgml/ref/pg_dump.sgmlin1108
-rw-r--r--doc-xc/src/sgml/ref/pg_dumpall.sgmlin617
-rw-r--r--doc-xc/src/sgml/ref/pg_resetxlog.sgmlin226
-rw-r--r--doc-xc/src/sgml/ref/pg_restore.sgmlin838
-rw-r--r--doc-xc/src/sgml/ref/pgxc_clean-ref.sgmlin226
-rw-r--r--doc-xc/src/sgml/ref/pgxc_ddl.sgmlin313
-rw-r--r--doc-xc/src/sgml/ref/postgres-ref.sgmlin1014
-rw-r--r--doc-xc/src/sgml/ref/postmaster.sgmlin54
-rw-r--r--doc-xc/src/sgml/ref/prepare.sgmlin205
-rw-r--r--doc-xc/src/sgml/ref/prepare_transaction.sgmlin201
-rw-r--r--doc-xc/src/sgml/ref/psql-ref.sgmlin3604
-rw-r--r--doc-xc/src/sgml/ref/reassign_owned.sgmlin132
-rw-r--r--doc-xc/src/sgml/ref/reindex.sgmlin295
-rw-r--r--doc-xc/src/sgml/ref/reindexdb.sgmlin382
-rw-r--r--doc-xc/src/sgml/ref/release_savepoint.sgmlin149
-rw-r--r--doc-xc/src/sgml/ref/reset.sgmlin124
-rw-r--r--doc-xc/src/sgml/ref/revoke.sgmlin298
-rw-r--r--doc-xc/src/sgml/ref/rollback.sgmlin99
-rw-r--r--doc-xc/src/sgml/ref/rollback_prepared.sgmlin132
-rw-r--r--doc-xc/src/sgml/ref/rollback_to.sgmlin177
-rw-r--r--doc-xc/src/sgml/ref/savepoint.sgmlin155
-rw-r--r--doc-xc/src/sgml/ref/security_label.sgmlin206
-rw-r--r--doc-xc/src/sgml/ref/select.sgmlin1707
-rw-r--r--doc-xc/src/sgml/ref/select_into.sgmlin176
-rw-r--r--doc-xc/src/sgml/ref/set.sgmlin347
-rw-r--r--doc-xc/src/sgml/ref/set_constraints.sgmlin125
-rw-r--r--doc-xc/src/sgml/ref/set_role.sgmlin150
-rw-r--r--doc-xc/src/sgml/ref/set_session_auth.sgmlin128
-rw-r--r--doc-xc/src/sgml/ref/set_transaction.sgmlin215
-rw-r--r--doc-xc/src/sgml/ref/show.sgmlin227
-rw-r--r--doc-xc/src/sgml/ref/start_transaction.sgmlin115
-rw-r--r--doc-xc/src/sgml/ref/truncate.sgmlin233
-rw-r--r--doc-xc/src/sgml/ref/unlisten.sgmlin152
-rw-r--r--doc-xc/src/sgml/ref/unpause_cluster.sgmlin55
-rw-r--r--doc-xc/src/sgml/ref/update.sgmlin461
-rw-r--r--doc-xc/src/sgml/ref/vacuum.sgmlin319
-rw-r--r--doc-xc/src/sgml/ref/vacuumdb.sgmlin430
-rw-r--r--doc-xc/src/sgml/ref/values.sgmlin267
-rw-r--r--doc-xc/src/sgml/reference.sgmlin360
-rw-r--r--doc-xc/src/sgml/regress.sgmlin704
-rw-r--r--doc-xc/src/sgml/release-7.4.sgmlin4622
-rw-r--r--doc-xc/src/sgml/release-8.0.sgmlin5421
-rw-r--r--doc-xc/src/sgml/release-8.1.sgmlin5444
-rw-r--r--doc-xc/src/sgml/release-8.2.sgmlin6410
-rw-r--r--doc-xc/src/sgml/release-8.3.sgmlin6433
-rw-r--r--doc-xc/src/sgml/release-8.4.sgmlin5959
-rw-r--r--doc-xc/src/sgml/release-9.0.sgmlin4156
-rw-r--r--doc-xc/src/sgml/release-9.1.sgmlin2811
-rw-r--r--doc-xc/src/sgml/release-old.sgmlin6657
-rw-r--r--doc-xc/src/sgml/release-xc-1.0.sgmlin1214
-rw-r--r--doc-xc/src/sgml/release-xl-9.2.sgmlin527
-rw-r--r--doc-xc/src/sgml/release.sgmlin127
-rw-r--r--doc-xc/src/sgml/remove-node.sgmlin160
-rw-r--r--doc-xc/src/sgml/rowtypes.sgmlin332
-rw-r--r--doc-xc/src/sgml/rules.sgmlin2212
-rw-r--r--doc-xc/src/sgml/runtime.sgmlin3897
-rw-r--r--doc-xc/src/sgml/seg.sgmlin404
-rw-r--r--doc-xc/src/sgml/sepgsql.sgmlin552
-rw-r--r--doc-xc/src/sgml/sourcerepo.sgmlin268
-rw-r--r--doc-xc/src/sgml/sources.sgmlin792
-rw-r--r--doc-xc/src/sgml/spi.sgmlin4107
-rw-r--r--doc-xc/src/sgml/sql.sgmlin2195
-rw-r--r--doc-xc/src/sgml/sslinfo.sgmlin213
-rw-r--r--doc-xc/src/sgml/standalone-install.sgmlin42
-rw-r--r--doc-xc/src/sgml/start.sgmlin891
-rw-r--r--doc-xc/src/sgml/storage.sgmlin882
-rw-r--r--doc-xc/src/sgml/stylesheet-common.xsl88
-rw-r--r--doc-xc/src/sgml/stylesheet-fo.xsl9
-rw-r--r--doc-xc/src/sgml/stylesheet-hh.xsl35
-rw-r--r--doc-xc/src/sgml/stylesheet-man.xsl203
-rw-r--r--doc-xc/src/sgml/stylesheet.css96
-rw-r--r--doc-xc/src/sgml/stylesheet.dsl871
-rw-r--r--doc-xc/src/sgml/stylesheet.xsl35
-rw-r--r--doc-xc/src/sgml/syntax.sgmlin2670
-rw-r--r--doc-xc/src/sgml/tablefunc.sgmlin822
-rw-r--r--doc-xc/src/sgml/test-parser.sgmlin93
-rw-r--r--doc-xc/src/sgml/textsearch.sgmlin3962
-rw-r--r--doc-xc/src/sgml/trigger.sgmlin915
-rw-r--r--doc-xc/src/sgml/tsearch2.sgmlin207
-rw-r--r--doc-xc/src/sgml/typeconv.sgmlin1116
-rw-r--r--doc-xc/src/sgml/unaccent.sgmlin170
-rw-r--r--doc-xc/src/sgml/user-manag.sgmlin497
-rw-r--r--doc-xc/src/sgml/uuid-ossp.sgmlin178
-rw-r--r--doc-xc/src/sgml/vacuumlo.sgmlin158
-rw-r--r--doc-xc/src/sgml/version.sgmlin2
-rw-r--r--doc-xc/src/sgml/wal.sgmlin905
-rw-r--r--doc-xc/src/sgml/xaggr.sgmlin439
-rw-r--r--doc-xc/src/sgml/xc-constraint.sgmlin14
-rw-r--r--doc-xc/src/sgml/xconly.sgmlin7
-rw-r--r--doc-xc/src/sgml/xfunc.sgmlin3610
-rw-r--r--doc-xc/src/sgml/xindex.sgmlin1132
-rw-r--r--doc-xc/src/sgml/xlonly.sgmlin7
-rw-r--r--doc-xc/src/sgml/xml2.sgmlin491
-rw-r--r--doc-xc/src/sgml/xoper.sgmlin532
-rw-r--r--doc-xc/src/sgml/xplang.sgmlin274
-rw-r--r--doc-xc/src/sgml/xtypes.sgmlin312
-rw-r--r--src/Makefile1
-rw-r--r--src/bin/psql/Makefile7
-rw-r--r--src/pgxc/Makefile17
-rw-r--r--src/pgxc/tools/Makefile18
-rw-r--r--src/pgxc/tools/makesgml/.gitignore1
-rw-r--r--src/pgxc/tools/makesgml/Makefile37
-rw-r--r--src/pgxc/tools/makesgml/README63
-rw-r--r--src/pgxc/tools/makesgml/makesgml.c379
385 files changed, 10 insertions, 296261 deletions
diff --git a/GNUmakefile.in b/GNUmakefile.in
index 8bbd1cb83d..15fba9fce0 100644
--- a/GNUmakefile.in
+++ b/GNUmakefile.in
@@ -14,9 +14,9 @@ all:
+@echo "All of PostgreSQL successfully made. Ready to install."
docs:
- $(MAKE) -C doc-xc all
+ $(MAKE) -C doc all
-$(call recurse,world,doc-xc src config contrib,all)
+$(call recurse,world,doc src config contrib,all)
world:
+@echo "PostgreSQL, contrib, and documentation successfully made. Ready to install."
@@ -24,28 +24,28 @@ world:
world-contrib-recurse: world-src-recurse
html man:
- $(MAKE) -C doc-xc $@
+ $(MAKE) -C doc $@
install:
+@echo "PostgreSQL installation complete."
install-docs:
- $(MAKE) -C doc-xc install
+ $(MAKE) -C doc install
-$(call recurse,install-world,doc-xc src config contrib,install)
+$(call recurse,install-world,doc src config contrib,install)
install-world:
+@echo "PostgreSQL, contrib, and documentation installation complete."
# build src/ before contrib/
install-world-contrib-recurse: install-world-src-recurse
-$(call recurse,installdirs uninstall coverage init-po update-po,doc-xc src config)
+$(call recurse,installdirs uninstall coverage init-po update-po,doc src config)
-$(call recurse,distprep,doc-xc src config contrib)
+$(call recurse,distprep,doc src config contrib)
# clean, distclean, etc should apply to contrib too, even though
# it's not built by default
-$(call recurse,clean,doc-xc contrib src config)
+$(call recurse,clean,doc contrib src config)
clean:
rm -rf tmp_install/
# Garbage from autoconf:
@@ -54,7 +54,7 @@ clean:
# Important: distclean `src' last, otherwise Makefile.global
# will be gone too soon.
distclean maintainer-clean:
- $(MAKE) -C doc-xc $@
+ $(MAKE) -C doc $@
$(MAKE) -C contrib $@
$(MAKE) -C config $@
$(MAKE) -C src $@
diff --git a/doc-xc/KNOWN_BUGS b/doc-xc/KNOWN_BUGS
deleted file mode 100644
index 44dd8812b7..0000000000
--- a/doc-xc/KNOWN_BUGS
+++ /dev/null
@@ -1,3 +0,0 @@
-PostgreSQL has a single combined bugs, missing features, and todo list
-simply called TODO, in this directory. A current copy is always
-available on our web site.
diff --git a/doc-xc/MISSING_FEATURES b/doc-xc/MISSING_FEATURES
deleted file mode 100644
index 44dd8812b7..0000000000
--- a/doc-xc/MISSING_FEATURES
+++ /dev/null
@@ -1,3 +0,0 @@
-PostgreSQL has a single combined bugs, missing features, and todo list
-simply called TODO, in this directory. A current copy is always
-available on our web site.
diff --git a/doc-xc/Makefile b/doc-xc/Makefile
deleted file mode 100644
index 2e5e09ef88..0000000000
--- a/doc-xc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-#----------------------------------------------------------------------------
-#
-# PostgreSQL documentation top-level makefile
-#
-# Copyright (c) 1994, Regents of the University of California
-#
-# doc/Makefile
-#
-#----------------------------------------------------------------------------
-
-subdir = doc
-top_builddir = ..
-include $(top_builddir)/src/Makefile.global
-
-all distprep html man install installdirs uninstall clean distclean maintainer-clean maintainer-check:
- $(MAKE) -C src $@
diff --git a/doc-xc/README.mb.big5 b/doc-xc/README.mb.big5
deleted file mode 100644
index 529af95591..0000000000
--- a/doc-xc/README.mb.big5
+++ /dev/null
@@ -1,326 +0,0 @@
-PostgreSQL 7.0.1 multi-byte (MB) support README May 20 2000
-
- Tatsuo Ishii
- http://www.sra.co.jp/people/t-ishii/PostgreSQL/
-
-[��] 1. �P�¥ۤ��F�� (Tatsuo Ishii) ����!
- 2. �����������ҵL, ���Y�����~, ���p�� [email protected]
-
-
-0. ²��
-
-MB �䴩�O���F�� PostgreSQL ��B�z�h�줸�զr�� (multi-byte character),
-�Ҧp: EUC (Extended Unix Code), Unicode (�Τ@�X) �M Mule internal code
-(�h��y�����X). �b MB ���䴩�U, �A�i�H�b���W���ܦ� (regexp), LIKE ��
-��L�@�Ǩ禡���ϥΦh�줸�զr��. �w�]���s�X�t�Υi���M��A�w�� PostgreSQL
-�ɪ� initdb(1) �R�O, ��i�� createdb(1) �R�O�Ϋإ߸�Ʈw�� SQL �R�O�M�w.
-�ҥH�A�i�H���h�Ӥ��P�s�X�t�Ϊ���Ʈw.
-
-MB �䴩�]�ѨM�F�@�� 8 �줸��줸�զr���� (�]�t ISO-8859-1) ���������D,
-(�ڨèS�����Ҧ����������D���ѨM�F, �ڥu�O�T�{�F�j�k���հ��榨�\,
-�Ӥ@�Ǫk�y�r���b MB �׸ɤU�i�H�ϥ�. �p�G�A�b�ϥ� 8 �줸�r���ɵo�{�F
-������D, �гq����)
-
-1. �p��ϥ�
-
-�sĶ PostgreSQL �e, ���� configure �ɨϥ� multibyte ���ﶵ
-
- % ./configure --enable-multibyte[=encoding_system]
- % ./configure --enable-multibyte[=�s�X�t��]
-
-�䤤���s�X�t�Υi�H���w���U���䤤���@:
-
- SQL_ASCII ASCII
- EUC_JP Japanese EUC
- EUC_CN Chinese EUC
- EUC_KR Korean EUC
- EUC_TW Taiwan EUC
- UNICODE Unicode(UTF-8)
- MULE_INTERNAL Mule internal
- LATIN1 ISO 8859-1 English and some European languages
- LATIN2 ISO 8859-2 English and some European languages
- LATIN3 ISO 8859-3 English and some European languages
- LATIN4 ISO 8859-4 English and some European languages
- LATIN5 ISO 8859-5 English and some European languages
- KOI8 KOI8-R
- WIN Windows CP1251
- ALT Windows CP866
-
-�Ҧp:
-
- % ./configure --enable-multibyte=EUC_JP
-
-�p�G�ٲ����w�s�X�t��, ����w�]�ȴN�O SQL_ASCII.
-
-2. �p��]�w�s�X
-
-initdb �R�O�w�q PostgresSQL �w�˫᪺�w�]�s�X, �Ҧp:
-
- % initdb -E EUC_JP
-
-�N�w�]���s�X�]�w�� EUC_JP (Extended Unix Code for Japanese), �p�G�A���w
-�������r��, �A�]�i�H�� "--encoding" �Ӥ��� "-E". �p�G�S���ϥ� -E ��
---encoding ���ﶵ, ����sö�ɪ��]�w�|�����w�]��.
-
-�A�i�H�إߨϥΤ��P�s�X����Ʈw:
-
- % createdb -E EUC_KR korean
-
-�o�өR�O�|�إߤ@�ӥs�� "korean" ����Ʈw, �Ө�ĥ� EUC_KR �s�X.
-�t�~���@�Ӥ�k, �O�ϥ� SQL �R�O, �]�i�H�F��P�˪��ت�:
-
- CREATE DATABASE korean WITH ENCODING = 'EUC_KR';
-
-�b pg_database �t�γW��� (system catalog) �����@�� "encoding" �����,
-�N�O�ΨӬ����@�Ӹ�Ʈw���s�X. �A�i�H�� psql -l �ζi�J psql ��� \l ��
-�R�O�Ӭd�ݸ�Ʈw�ĥΦ�ؽs�X:
-
-$ psql -l
- List of databases
- Database | Owner | Encoding
----------------+---------+---------------
- euc_cn | t-ishii | EUC_CN
- euc_jp | t-ishii | EUC_JP
- euc_kr | t-ishii | EUC_KR
- euc_tw | t-ishii | EUC_TW
- mule_internal | t-ishii | MULE_INTERNAL
- regression | t-ishii | SQL_ASCII
- template1 | t-ishii | EUC_JP
- test | t-ishii | EUC_JP
- unicode | t-ishii | UNICODE
-(9 rows)
-
-3. �e�ݻP��ݽs�X���۰��ഫ
-
-[��: �e�ݪx���Ȥ�ݪ��{��, �i��O psql �R�O��Ķ��, �αĥ� libpq �� C
-�{��, Perl �{��, �Ϊ̬O�z�L ODBC ���������ε{��. �ӫ�ݴN�O�� PostgreSQL
-��Ʈw�����A�{��]
-
-PostgreSQL �䴩�Y�ǽs�X�b�e�ݻP��ݶ����۰��ഫ: [��: �o�̩ҿת��۰�
-�ഫ�O���A�b�e�ݤΫ�ݩҫŧi�ĥΪ��s�X���P, ���u�n PostgreSQL �䴩�o
-��ؽs�X�����ഫ, ���򥦷|���A�b�s���e���ഫ]
-
- encoding of backend available encoding of frontend
- --------------------------------------------------------------------
- EUC_JP EUC_JP, SJIS
-
- EUC_TW EUC_TW, BIG5
-
- LATIN2 LATIN2, WIN1250
-
- LATIN5 LATIN5, WIN, ALT
-
- MULE_INTERNAL EUC_JP, SJIS, EUC_KR, EUC_CN,
- EUC_TW, BIG5, LATIN1 to LATIN5,
- WIN, ALT, WIN1250
-
-�b�Ұʦ۰ʽs�X�ഫ���e, �A�����i�D PostgreSQL �A�n�b�e�ݱĥΦ�ؽs�X.
-���n�X�Ӥ�k�i�H�F��o�ӥت�:
-
-o �b psql �R�O��Ķ�����ϥ� \encoding �o�өR�O
-
-\encoding �o�өR�O�i�H���A���W�����e�ݽs�X, �Ҧp, �A�n�N�e�ݽs�X������ SJIS,
-�����:
-
- \encoding SJIS
-
-o �ϥ� libpq [��: PostgreSQL ��Ʈw�� C API �{���w] ���禡
-
-psql �� \encoding �R�O���u�O�h�I�s PQsetClientEncoding() �o�Ө禡�ӹF��ت�.
-
- int PQsetClientEncoding(PGconn *conn, const char *encoding)
-
-�W���� conn �o�ӰѼƥN���@�ӹ��ݪ��s�u, encoding �o�ӰѼƭn��A�Q�Ϊ��s�X,
-���p�����\�a�]�w�F�s�X, �K�|�Ǧ^ 0 ��, ���Ѫ��ܶǦ^ -1. �ܩ�ثe�s�u���s�X�i
-�Q�ΥH�U�禡�d��:
-
- int PQclientEncoding(const PGconn *conn)
-
-�o�̭n�`�N���O: �o�Ө禡�Ǧ^���O�s�X���N�� (encoding id, �O�Ӿ�ƭ�),
-�Ӥ��O�s�X���W�٦r�� (�p "EUC_JP"), �p�G�A�n�ѽs�X�N���o���s�X�W��,
-�����I�s:
-
-char *pg_encoding_to_char(int encoding_id)
-
-o �ϥ� PGCLIENTENCODING �o�������ܼ�
-
-�p�G�e�ݩ��]�w�F PGCLIENTENCODING �o�@�������ܼ�, �����ݷ|���s�X�۰��ഫ.
-
-[��] PostgreSQL 7.0.0 ~ 7.0.3 ���� bug -- ���{�o�������ܼ�
-
-o �ϥ� SET CLIENT_ENCODING TO �o�� SQL ���R�O
-
-�n�]�w�e�ݪ��s�X�i�H�ΥH�U�o�� SQL �R�O:
-
- SET CLIENT_ENCODING TO 'encoding';
-
-�A�]�i�H�ϥ� SQL92 ���y�k "SET NAMES" �F��P�˪��ت�:
-
- SET NAMES 'encoding';
-
-�d�ߥثe���e�ݽs�X�i�H�ΥH�U�o�� SQL �R�O:
-
- SHOW CLIENT_ENCODING;
-
-��������ӹw�]���s�X, �ΥH�U�o�� SQL �R�O:
-
- RESET CLIENT_ENCODING;
-
-[��] �ϥ� psql �R�O��Ķ����, ��ij���n�γo�Ӥ�k, �Х� \encoding
-
-4. ���� Unicode (�Τ@�X)
-
-�Τ@�X�M��L�s�X�����ഫ�i��n�b 7.1 ����~�|��{.
-
-5. �p�G�L�k�ഫ�|�o�ͤ����?
-
-���]�A�b��ݿ�ܤF EUC_JP �o�ӽs�X, �e�ݨϥ� LATIN1, (�Y�Ǥ��r���L�k�ഫ��
-LATIN1) �b�o�Ӫ��p�U, �Y�Ӧr���Y�����ন LATIN1 �r����, �N�|�Q�ন�H�U������:
-
- (�Q���i���)
-
-6. �ѦҸ��
-
-These are good sources to start learning various kind of encoding
-systems.
-
-ftp://ftp.ora.com/pub/examples/nutshell/ujip/doc/cjk.inf
- Detailed explanations of EUC_JP, EUC_CN, EUC_KR, EUC_TW
- appear in section 3.2.
-
-Unicode: http://www.unicode.org/
- The homepage of UNICODE.
-
- RFC 3629
- UTF-8 is defined here.
-
-5. History
-
-May 20, 2000
- * SJIS UDC (NEC selection IBM kanji) support contributed
- by Eiji Tokuya
- * Changes above will appear in 7.0.1
-
-Mar 22, 2000
- * Add new libpq functions PQsetClientEncoding, PQclientEncoding
- * ./configure --with-mb=EUC_JP
- now deprecated. use
- ./configure --enable-multibyte=EUC_JP
- instead
- * Add SQL_ASCII regression test case
- * Add SJIS User Defined Character (UDC) support
- * All of above will appear in 7.0
-
-July 11, 1999
- * Add support for WIN1250 (Windows Czech) as a client encoding
- (contributed by Pavel Behal)
- * fix some compiler warnings (contributed by Tomoaki Nishiyama)
-
-Mar 23, 1999
- * Add support for KOI8(KOI8-R), WIN(CP1251), ALT(CP866)
- (thanks Oleg Broytmann for testing)
- * Fix problem with MB and locale
-
-Jan 26, 1999
- * Add support for Big5 for fronend encoding
- (you need to create a database with EUC_TW to use Big5)
- * Add regression test case for EUC_TW
- (contributed by Jonah Kuo <[email protected]>)
-
-Dec 15, 1998
- * Bugs related to SQL_ASCII support fixed
-
-Nov 5, 1998
- * 6.4 release. In this version, pg_database has "encoding"
- column that represents the database encoding
-
-Jul 22, 1998
- * determine encoding at initdb/createdb rather than compile time
- * support for PGCLIENTENCODING when issuing COPY command
- * support for SQL92 syntax "SET NAMES"
- * support for LATIN2-5
- * add UNICODE regression test case
- * new test suite for MB
- * clean up source files
-
-Jun 5, 1998
- * add support for the encoding translation between the backend
- and the frontend
- * new command SET CLIENT_ENCODING etc. added
- * add support for LATIN1 character set
- * enhance 8 bit cleaness
-
-April 21, 1998 some enhancements/fixes
- * character_length(), position(), substring() are now aware of
- multi-byte characters
- * add octet_length()
- * add --with-mb option to configure
- * new regression tests for EUC_KR
- (contributed by "Soonmyung. Hong" <[email protected]>)
- * add some test cases to the EUC_JP regression test
- * fix problem in regress/regress.sh in case of System V
- * fix toupper(), tolower() to handle 8bit chars
-
-Mar 25, 1998 MB PL2 is incorporated into PostgreSQL 6.3.1
-
-Mar 10, 1998 PL2 released
- * add regression test for EUC_JP, EUC_CN and MULE_INTERNAL
- * add an English document (this file)
- * fix problems concerning 8-bit single byte characters
-
-Mar 1, 1998 PL1 released
-
-Appendix:
-
-[Here is a good documentation explaining how to use WIN1250 on
-Windows/ODBC from Pavel Behal. Please note that Installation step 1)
-is not necceary in 6.5.1 -- Tatsuo]
-
-Version: 0.91 for PgSQL 6.5
-Author: Pavel Behal
-Revised by: Tatsuo Ishii
-Licence: The Same as PostgreSQL
-
-Sorry for my Eglish and C code, I'm not native :-)
-
-!!!!!!!!!!!!!!!!!!!!!!!!! NO WARRANTY !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-Instalation:
-------------
-1) Change three affected files in source directories
- (I don't have time to create proper patch diffs, I don't know how)
-2) Compile with enabled locale and multibyte set to LATIN2
-3) Setup properly your instalation, do not forget to create locale
- variables in your profile (environment). Ex. (may not be exactly true):
- LC_ALL=cs_CZ.ISO8859-2
- LC_COLLATE=cs_CZ.ISO8859-2
- LC_CTYPE=cs_CZ.ISO8859-2
- LC_MONETARY=cs_CZ.ISO8859-2
- LC_NUMERIC=cs_CZ.ISO8859-2
- LC_TIME=cs_CZ.ISO8859-2
-4) You have to start the postmaster with locales set!
-5) Try it with Czech language, it have to sort
-5) Install ODBC driver for PgSQL into your M$ Windows
-6) Setup properly your data source. Include this line in your ODBC
- configuration dialog in field "Connect Settings:" :
- SET CLIENT_ENCODING = 'WIN1250';
-7) Now try it again, but in Windows with ODBC.
-
-Description:
-------------
-- Depends on proper system locales, tested with RH6.0 and Slackware 3.6,
- with cs_CZ.iso8859-2 loacle
-- Never try to set-up server multibyte database encoding to WIN1250,
- always use LATIN2 instead. There is not WIN1250 locale in Unix
-- WIN1250 encoding is useable only for M$W ODBC clients. The characters are
- on thy fly re-coded, to be displayed and stored back properly
-
-Important:
-----------
-- it reorders your sort order depending on your LC_... setting, so don't be
- confused with regression tests, they don't use locale
-- "ch" is corectly sorted only in some newer locales (Ex. RH6.0)
-- you have to insert money as '162,50' (with comma in aphostrophes!)
-- not tested properly
diff --git a/doc-xc/README.mb.jp b/doc-xc/README.mb.jp
deleted file mode 100644
index 7cafb2426e..0000000000
--- a/doc-xc/README.mb.jp
+++ /dev/null
@@ -1,814 +0,0 @@
-PostgreSQL 7.3 multi-byte (MB) support README 2002/10/21 $B:n@.(B
-
- $B@P0fC#IW(B
-
-$B"#$O$8$a$K(B
-
- PostgreSQL $B$K$*$1$k%^%k%A%P%$%H%5%]!<%H$O0J2<$N$h$&$JFCD'$r;}$C$F$$$^$9!%(B
-
- 1. $B%^%k%A%P%$%HJ8;z$H$7$F!$F|K\8l!$Cf9q8l$J$I$N3F9q$N(B EUC$B!$(BUnicode$B!$(B
- mule internal code, ISO-8859-* $B$,%G!<%?%Y!<%9:n@.;~$KA*Br2DG=!%(B
- $B%G!<%?%Y!<%9$K$O$3$N%(%s%3!<%G%#%s%0$N$^$^3JG<$5$l$^$9!%(B
- 2. $B%F!<%V%kL>$K%^%k%A%P%$%HJ8;z$,;HMQ2DG=(B
- 3. $B%+%i%`L>$K%^%k%A%P%$%HJ8;z$,;HMQ2DG=(B
- 4. $B%G!<%?$=$N$b$N$K$b%^%k%A%P%$%HJ8;z$,;HMQ2DG=(B
- 5. $B%^%k%A%P%$%HJ8;z$N@55,I=8=8!:w$,;HMQ2DG=(B
- 6. $B%^%k%A%P%$%HJ8;z$N(B LIKE $B8!:w$,;HMQ2DG=(B
- 7. character_length(), position(), substring() $B$J$I$NJ8;zNs4X?t$G(B
- $B$N%^%k%A%P%$%H%5%]!<%H(B
- 8. $B%U%m%s%H%(%s%IB&$N%(%s%3!<%G%#%s%0$,%P%C%/%(%s%IB&$H0[$k>l9g$K!$(B
- $B<+F0E*$K%(%s%3!<%G%#%s%0JQ49$r9T$J$$$^$9!%(B
- 9. $B%f!<%6Dj5A$N%(%s%3!<%G%#%s%0JQ49$r:[email protected]=!%(B
-
- $B%^%k%A%P%$%H%5%]!<%H$,07$&$3$H$N$G$-$k%(%s%3!<%G%#%s%0$O0J2<$K$J$j$^(B
- $B$9!%(B
-
- SQL_ASCII ASCII
- EUC_JP $BF|K\8l(B EUC
- EUC_CN GB $B$r%Y!<%9$K$7$?CfJ8(BEUC$B!%(Bcode set 2 $B$O(B
- SS2+2$B%P%$%H%3!<%I(B = 3$B%P%$%HI=8=$G$9!%(B
- EUC_KR $B4Z9q8l(B EUC$B!%(B
- JOHAB $B%O%s%0%k%Y!<%9$N4Z9q8l(BEUC.
- EUC_TW $BBfOQ$N(B EUC$B!%(Bcode set 2 $B$O(B
- SS2+$BLLHV9f(B+2$B%P%$%H%3!<%I(B = 4$B%P%$%HI=8=$G$9!%(B
- UNICODE UTF-8$B!%$?$@$7%5%]!<%H$9$k$N$O(B UCS-2 $B$NHO0O!$(B
- $B$9$J$o$A(B 0xffff $B$^$G$G$9!%(B
- MULE_INTERNAL mule $B$NFbIt%3!<%I!%$?$@$7!$(BType N $B$NITDjD9J8;z$O(B
- $B%5%]!<%H$7$F$$$^$;$s!%(B
- LATIN1 $B$+$i(B LATIN10$B$^$G(B
- ISO_8859_1 $B$+$i(B 16$B$^$G(B
- $B%-%j%kJ8;z(B KOI8(KOI8-R), WIN(CP1251), ALT(CP866)$B$r%5%]!<%H(B
- $B$7$F$$$^$9!%$b$A$m$s(B ISO 8859-5 $B$b;HMQ2DG=$G$9!%(B
- $B$3$N>l9g!$(B"LATIN5" $B$H$7$F;XDj$7$F2<$5$$!%(B
- WIN1256 $B%"%i%V=t9q8l(BWindows$BMQ%(%s%3!<%G%#%s%0(B.
- TCVN $B%Y%H%J%`8l(B."ABC"$B$d(B"VSCII"$B$b;HMQ2DG=(B.
- WIN874 $B%?%$8l(B.
-
- $B%U%m%s%H%(%s%IB&$G$O$5$i$K0J2<$N%(%s%3!<%G%#%s%0$,;HMQ$G$-$^$9!%(B
-
- SJIS $B%7%U%H(BJIS(MS932$B$H$[$\8_49(B)
- BIG5 $BBfOQ$d9a9A$G;HMQ$5$l$F$$$kCf9q8l!%(BEUC_TW$B$H8_49(B
- $B@-$,$"$j$^$9!%(B
- GBK Windows-936
- UHC Windows-949
- WIN1250 Windows-1250
- GB18030 GB18030
-
-$B"#F|K\8l$r;HMQ$9$k$3$H$N$G$-$k%(%s%3!<%G%#%s%0(B
-
- $BA*Br$NL\0B$H$7$F$O!$1Q8l$HF|K\8l$7$+;H$o$J$$>l9g$O(B EUC_JP($BF1MM$K!$Cf(B
- $B9q8l$7$+;H$o$J$$>l9g$O(B EUC_CN... $B$J$I$H$J$j$^$9(B)$B!$$=$NB>$N8@8l$b;H$$$?(B
- $B$$>l9g$O(B UNICODE $B$b$7$/$O(B MULE_INTERNAL $B$H$J$k$G$7$g$&!%(B
-
- $BCm0U!'(BMULE_INTERNAL $B$rA*$V$H!$$?$/$5$s$NJ8;z=89g$KBP1~$G$-$FJXMx$G$9(B
- $B$,!$@55,I=8=$GJ#?t$NJ8;z=89g$K$^$?$,$k$h$&$JHO0O;XDj(B($B$?$H$($P!$(B[a-$BHO(B]
- $B$H$+!$(B[abc$BHO0O(B]$B$N$h$&$J(B)$B$O;H$($^$;$s!%J#?t$NHO0O;XDj$G0[$J$kJ8;z=89g(B
- $B$r;H$&$N$O9=$$$^$;$s(B($B$?$H$($P(B [abc][$BHO(B-$B0O(B])$B!%$^$?!$(B[^a] $B$N$h$&$JI=8=(B
- $B$O!$(B"a" $B$NB0$9$kJ8;z=89g(B($B$3$N>l9g!$(BUS-ASCII)$B$K$*$$$F(B "a" $B0J30$G$"$k(B
- $B$3$H$rI=$7$^$9!%7h$7$F4A;z$dJ?2>L>$J$I(B "a" $B0J30$r$9$Y$FI=$9$o$1$G$O(B
- $B$J$$$3$H$KCm0U$7$F2<$5$$!%(B
-
-$B"#%$%s%9%H!<%k(B
-
- PostgreSQL 7.3$B$+$i$O(Bconfigure$B$N%*%W%7%g%s;XDj$NM-L5$K4X$o$i$:!$%^%k(B
- $B%A%P%$%H%5%]!<%H$,M-8z$K$J$C$F$$$^$9$N$G!$FC$K(Bconifgure$B;~$K%^%k%A%P(B
- $B%$%HMQ$NFCJL$J%*%W%7%g%s$r;XDj$9$kI,MW$O$"$j$^$;$s!%(B
-
-$B"#(Binitdb/createdb/create database $B$K$*$1$k%(%s%3!<%G%#%s%0$N;XDj$K$D$$$F(B
-
- initdb $B$G$O0J2<$N%*%W%7%g%s$G%(%s%3!<%G%#%s%0$,;XDj$G$-$^$9!%(B
-
- -E $B%(%s%3!<%G%#%s%0(B
- --encoding=$B%(%s%3!<%G%#%s%0(B
-
- $B$3$3$G;XDj$7$?%(%s%3!<%G%#%s%0$O!$0J8e(B createdb/create database $B$G%((B
- $B%s%3!<%G%#%s%0$r>JN,$7$?>l9g$K@_Dj$5$l$k%(%s%3!<%G%#%s%0$K$J$j$^$9!%(B
- -E $B$^$?$O(B --encoding $B%*%W%7%g%s$r>JN,$7$?>l9g$O!$%(%s%3!<%G%#%s%0$H(B
- $B$7$F(BSQL_ASCII$B$,:NMQ$5$l$F$7$^$&$N$G!$F|K\8l$r%G%U%)%k%H$G;HMQ$9$k>l(B
- $B9g$O!$(B
-
- -E EUC_JP
-
- $B$"$k$$$O(B
-
- --encoding=EUC_JP
-
- $B$H$7$FI,$:L@<(E*$K%(%s%3!<%G%#%s%0$r;XDj$7$F$/$@$5$$!%(B
-
- $B$J$*!$(BPostgreSQL 7.3$B0J9_%m%1!<%k%5%]!<%H$,I,$:M-8z$K$J$C$F$$$^$9$,!$(B
- $B$3$l$OF|K\8l$J$I$r;HMQ$9$k:]$K$O2?$N%a%C%j%H$b$J$$$P$+$j$G$J$/!$>c32(B
- $B$N860x$K$J$C$?$j!$(BLIKE$B8!:w$d@55,I=8=8!:w$G%$%s%G%C%/%9$,M-8z$K$J$i$J(B
- $B$$$J$I$NLdBj$r0z$-5/$3$9$N$G!$L58z$K$7$F$*$/$3$H$r$*$9$9$a$7$^$9!%%m(B
- $B%1!<%k%5%]!<%H$rL58z$K$9$k$?$a$K$O!$(B
-
- --no-locale
-
- $B%*%W%7%g%s$r;XDj$7$^$9!%(B
-
- createdb $B$G$O0J2<$N%*%W%7%g%s$G%(%s%3!<%G%#%s%0$,;XDj$G$-$^$9!%(B
-
- -E $B%(%s%3!<%G%#%s%0(B
- --encoding=$B%(%s%3!<%G%#%s%0(B
-
- create database $B$G$O0J2<$N%*%W%7%g%s$G%(%s%3!<%G%#%s%0$,;XDj$G$-$^$9!%(B
-
- CREATE DATABASE dbanme WITH ENCODING = '$B%(%s%3!<%G%#%s%0(B';
-
- LOCATION $B$rF1;~$K;XDj$9$k>l9g$O0J2<$N$h$&$K$J$j$^$9!%(B
-
- CREATE DATABASE dbanme WITH LOCATION = 'path' ENCODING = '$B%(%s%3!<%G%#%s%0(B';
-
- createdb/create database $B$G$O!$%(%s%3!<%G%#%s%0;XDj$r>JN,$7$?>l9g$O!$(Binitdb
- $B$G;XDj$7$?%(%s%3!<%G%#%s%0$,:NMQ$5$l$^$9!%$3$l$O!$(Binitdb $B$,:n@.$9$k(B
- $B%F%s%W%l!<%H%G!<%?%Y!<%9(B(template1)$B$N(B encoding $B%"%H%j%S%e!<%H$r7Q>5(B
- $B$9$k$+$i$G$9!%(B
-
- $B%G!<%?%Y!<%9$N%(%s%3!<%G%#%s%0$O!$(Bpsql -l$B!$(Bpsql $B$N(B \l $B$G;2>H$G$-$^$9!%(B
-
-$ psql -l
- List of databases
- Database | Owner | Encoding
----------------+---------+---------------
- euc_cn | t-ishii | EUC_CN
- euc_jp | t-ishii | EUC_JP
- euc_kr | t-ishii | EUC_KR
- euc_tw | t-ishii | EUC_TW
- mule_internal | t-ishii | MULE_INTERNAL
- regression | t-ishii | SQL_ASCII
- template1 | t-ishii | EUC_JP
- test | t-ishii | EUC_JP
- unicode | t-ishii | UNICODE
-(9 rows)
-
-$B"#J8;z7?$N%G!<%?7?$K$D$$$F(B
-
- 7.2$B$G$O!$(BCHAR(n)$B$H(BVARCHAR(n)$B$N(B n $B$OJ8;z?t$r0UL#$7$^$9!%(Bn $B$,%P%$%H?t$r(B
- $B0UL#$9$k(B 7.1 $B0JA0$H$O0[$J$j$^$9$N$G$4Cm0U2<$5$$!%(B
-
- $BNc$r<($7$^$9!%(B
-
- 7.2$B$G$O!$(BCHAR(1)$B$K(B"$B$"(B"$B$r3JG<$G$-$^$9$,!$(B7.1$B0JA0$G$O3JG<$G$-$^$;$s$3(B
- $B$l$O!$(B"$B$"(B"$B$r3JG<$9$k$?$a$K>/$J$/$H$b(B2$B%P%$%H0J>e$rMW$9$k$+$i$G$9!%(B
- $B5U$K!$(B"a" $B$O(B1$B%P%$%H$7$+>CHq$7$J$$$?$a!$(B7.1$B$G$b(B CHAR(1) $B$K3JG<$G$-$^(B
- $B$9!%(B
-
- $B$J$*!$(B7.2$B$G$O!$(B7.1$B$^$G$H0[$J$j!$(BCHAR(n)$B$K3JG<$G$-$J$$(B n $BJ8;z$h$jBg$-(B
- $B$$J8;zNs$O(B n $BJ8;z$G@Z$j<N$F$i$l$k$N$G$O$J$/!$%(%i!<$K$J$k$3$H$K$4Cm(B
- $B0U2<$5$$!%$3$l$O!$%^%k%A%P%$%HBP1~$NM-L5$K4X$o$i$:!$J8;zNs$N07$$$,(B
- SQL$BI8=`$K1h$&$h$&$KJQ$C$?$+$i$G$9!%(B
-
-$B"#%U%m%s%H%(%s%I$H%P%C%/%(%s%I$N<+F0%(%s%3!<%G%#%s%0JQ49$K$D$$$F(B
-
- $B%P%C%/%(%s%I(B($B%G!<%?%Y!<%9(B)$B$H(B psql $B$J$I$N%U%m%s%H%(%s%I$N%(%s%3!<%G%#(B
- $B%s%0$O0lCW$7$F$$$k$N$,86B'$G$9$,!$$$$/$D$+$N%(%s%3!<%G%#%s%0$K$D$$$F(B
- $B$O%P%C%/%(%s%I$H%U%m%s%H%(%s%I$N4V$G0[$J$k$b$N$r;HMQ$9$k$3$H$,$G$-$^(B
- $B$9!%$3$N>l9g!$<+F0E*$K%P%C%/%(%s%I$G%(%s%3!<%G%#%s%0JQ49$,9T$o$l$^$9!%(B
-
- $B%P%C%/%(%s%I$N%(%s%3!<%G%#%s%0(B $B5vMF$5$l$k%U%m%s%H%(%s%I$N(B
- $B%(%s%3!<%G%#%s%0(B
- ----------------------------------------------------------------
- EUC_JP EUC_JP, SJIS, UNICODE
-
- EUC_TW EUC_TW, BIG5, UNICODE
-
- EUC_CN EUC_CN, UNICODE
-
- EUC_KR EUC_KR, UNICODE
-
- JOHAB JOHAB, UNICODE
-
- LATIN1,3,4 LATIN1,3,4, UNICODE
-
- LATIN2 LATIN2, WIN1250, UNICODE
-
- LATIN5 LATIN5, WIN, ALT, UNICODE
-
- LATIN6,7,8,9,10 LATIN6,7,8,9,10, UNICODE
-
- ISO_8859_5,6,7,8 ISO_8859_5,6,7,8, UNICODE
-
- WIN1256 WIN1256, UNICODE
-
- TCVN TCVN, UNICODE
-
- WIN874 WIN874, UNICODE
-
- MULE_INTERNAL EUC_JP, SJIS, EUC_KR, EUC_CN,
- EUC_TW, BIG5, LATIN1$B$+$i(B5,
- WIN, ALT, WIN1250
-
- UNICODE EUC_JP, SJIS, EUC_KR, UHC,
- EUC_CN, GBK, EUC_TW, BIG5,
- LATIN1$B$+$i(B10, ISO_8859_5$B$+$i(B8,
- WIN, ALT, WIN1250, WIN1256,
- TCVN, WIN874, JOHAB
- ----------------------------------------------------------------
-
- $B%P%C%/%(%s%I$H%U%m%s%H%(%s%I$N%(%s%3!<%G%#%s%0$,0[$J$k>l9g!$$=$N$3$H(B
- $B$r%P%C%/%(%s%I$KEA$($kI,MW$,$"$j$^$9!%$=$N$?$a$NJ}K!$,$$$/$D$+$"$j$^(B
- $B$9!%(B
-
-o psql $B$N(B \encoding $B%3%^%s%I$r;H$&J}K!(B
-
- psql$B$G$O!$(B\encoding$B%3%^%s%I$r;H$C$FF0E*$K%U%m%s%H%(%s%IB&$NJ8;z%3!<(B
- $B%I$r@ZBX$($k$3$H$,$G$-$^$9!%Nc(B:
-
- \encoding SJIS
-
-o libpq $B$N4X?t(B PQsetClientEncoding $B$r;H$&J}K!(B
-
- 7.0 $B$+$i?7$7$$(B libpq $B4X?t(B PQsetClientEncoding $B$,DI2C$5$l$F$$$^$9!%(B
-
- PQsetClientEncoding(PGconn *conn, const char *encoding)
-
- $B$3$N4X?t$r;H$($P!$%3%M%/%7%g%sKh$K%(%s%3!<%G%#%s%0$r@ZBX$($k$3$H$,$G(B
- $B$-$^$9!%8=:_$N%(%s%3!<%G%#%s%0$NLd$$9g$o$;$O(B
-
- int PQclientEncoding(const PGconn *conn)
-
- $B$G$9!%(B
-
-o postgresql.conf $B$G@_Dj$9$kJ}K!(B
-
- $B%U%m%s%H%(%s%I$N%G%U%)%k%H%(%s%3!<%G%#%s%0$r;XDj$9$k$K$O!$(B
- postgresql.conf $B$N(B client_encoding $B$r;XDj$7$^$9!%;XDjNc(B:
-
- client_encoding = SJIS
-
-o $B4D6-JQ?t(B PGCLIENTENCODING $B$r;H$&J}K!(B
-
- (1) postmaster $B5/F0;~$K4D6-JQ?t$r@_Dj$9$kJ}K!(B
-
- $B4D6-JQ?t(B PGCLIENTENCODING $B$r@_Dj$9$k$3$H$K$h$j!$(B postgresql.conf $B$G(B
- $B%(%s%3!<%G%#%s%0$r;XDj$9$k$N$HF1$88z2L$,F@$i$l$^$9!%$?$@$7!$$3$l$ONr(B
- $B;KE*7P0^$+$i;D$5$l$F$$$k5!G=$G!$:#8e$O$3$N5!G=$rMxMQ$7$J$$$3$H$r$*$9(B
- $B$9$a$7$^$9!%@_DjNc(B:
-
- export PGCLIENTENCODING=SJIS postmaster -S
-
- (2) $B%/%i%$%"%s%H!$%U%m%s%H%(%s%IKh$K%(%s%3!<%G%#%s%0$r@_Dj$7$?$$>l9g(B
-
- $B$=$N%U%m%s%H%(%s%I(B($B$?$H$($P(B psql)$B$r5/F0$9$kA0$K4D6-JQ?t(B
- PGCLIENTENCODING $B$r@_Dj$7$^$9!%(B
-
-o set client_encoding $B%3%^%s%I$r;H$&J}K!(B
-
- SET CLIENT_ENCODING SQL$B%3%^%s%I$r;H$C$FF0E*$K%U%m%s%H%(%s%I$N%(%s%3!<(B
- $B%G%#%s%0$rJQ99$G$-$^$9!%Nc(B:
-
- SET CLIENT_ENCODING TO SJIS;
-
-$B"#8=:_@_Dj$5$l$F$$$k%U%m%s%H%(%s%IB&$N%(%s%3!<%G%#%s%0$rD4$Y$k(B
-
- $B8=:_@_Dj$5$l$F$$$k%U%m%s%H%(%s%IB&$N%(%s%3!<%G%#%s%0$O(B
-
- show client_encoding;
-
- $B$G;2>H$G$-$^$9(B($B>.J8;z$GI=<($5$l$^$9(B)$B!%(B
-
-$B"#%G%U%)%k%H$N%(%s%3!<%G%#%s%0$X$NI|5"(B
-
- SQL$B%3%^%s%I(B:
-
- RESET CLIENT_ENCODING;
-
- $B$O!$%G%U%)%k%H$N%U%m%s%H%(%s%I%(%s%3!<%G%#%s%0@_Dj$KI|5"$5$;$^$9!%(B
- postmaster$B$rN)$A>e$2$k$H$-$K(B postgresql.conf $B$N(B client_encoding $B$d4D(B
- $B6-JQ?t(B PGCLIENTENCODING $B$,@_Dj$5$l$F$$$k$H$=$N%(%s%3!<%G%#%s%0$K!$$=(B
- $B$&$G$J$1$l$P%G!<%?%Y!<%9$N%(%s%3!<%G%#%s%0$HF1$8$K$J$j$^$9!%(B
-
-$B"#L@<(E*$J%(%s%3!<%G%#%s%0JQ49(B
-
- 7.2$B$G$O!$(Bconvert$B$H$$$&4X?t$r;H$$!$L@<(E*$J%(%s%3!<%G%#%s%0JQ49$,$G$-(B
- $B$^$9!%(B
-
- convert(string text, [src_encoding name,] dest_encoding name)
-
- $B$3$3$G(Bsrc_encoding$B$O(Btext$B$N%(%s%3!<%G%#%s%0L>$G$9!%>JN,$9$k$H!$%G!<%?(B
- $B%Y!<%9%(%s%3!<%G%#%s%0L>$HF1$8$G$"$k$H8+$J$5$l$^$9!%(Bdest_encoding$B$O!$(B
- $BJQ498e$N%(%s%3!<%G%#%s%0L>$G$9!%(B
-
- $BNc$r<($7$^$9!%(B
-
- SELECT convert(text, EUC_JP) FROM unicode_tbl;
-
- $B$O!$(BUnicode$B$N%F!<%V%k(Bunicode_tbl$B$N(Btext$BNs$r(BEUC_JP$B$KJQ49$7$FJV$7$^$9!%(B
-
- 7.3$B$G$O$5$i$K(BSQL$BI8=`$N(BCONVERT$B4X?t$,;H$($^$9!%(BSQL$BI8=`$N(BCONVERT$B$O(B
- PostgreSQL$B$N(BCONVERT$B$H5!G=$O$[$H$s$IF1$8$G$9$,!$8F$S=P$77A<0$,0[$j$^(B
- $B$9!%(B
-
- SELECT convert(text using euc_jp_to_utf8) FROM unicode_tbl;
-
- "using" $B$N8e$N0z?t$O!V%3%s%P!<%8%g%sL>!W$G$9!%$3$NNc$G$O!$(BEUC_JP $B$+(B
- $B$i(B UTF-8 $B$KJQ49$9$k%3%s%P!<%8%g%s$r;XDj$7$F$$$^$9!%Dj5A:Q$N%3%s%P!<(B
- $B%8%g%s$K$D$$$F$O!$%f!<%6!<%:%,%$%I$N(B "String Functions and
- Operators" $B$NI=(B"Built-in Conversions" $B$r8+$F$/$@$5$$!%(B
-
-$B"#%(%s%3!<%G%#%s%0JQ49ITG=$N>l9g$N=hM}(B
-
- $B%P%C%/%(%s%IB&$N%(%s%3!<%G%#%s%0$H%U%m%s%H%(%s%IB&$N%(%s%3!<%G%#%s%0(B
- $B$,$$$D$bAj8_JQ49$G$-$k$H$O8B$j$^$;$s!%6KC<$JOC!$%P%C%/%(%s%IB&$,(B
- EUC_JP $B$J$N$K!$%U%m%s%H%(%s%IB&$,(B EUC_KR $B$@$C$?$i$I$&$J$k$G$7$g$&!%(B
- $B$3$N>l9g(B PostgreSQL $B$OJQ49$G$-$J$$%3!<%I$r(B 16$B?JI=8=$KJQ49$7$^$9!%(B
- $B$?$H$($P!$(B"(bdae)" $B$N$h$&$K!%$J$*!$$3$N(B 16$B?JI=8=$O(B mule
- internal code $B$N%3!<%I$G$"$k$3$H$KCm0U$7$F2<$5$$!%$3$l$O!$D>@\%U%m%s(B
- $B%H%(%s%I(B <--> $B%P%C%/%(%s%I$N%(%s%3!<%G%#%s%0$rJQ49$9$k$N$G$O$J$/!$0l(B
- $BEYFbItI=8=$G$"$k(B mule internal code $B$r7PM3$7$F$$$k$?$a$G$9!%(B
-
- $B$J$*!$(BUnicode$B$H$=$l0J30$N%(%s%3!<%G%#%s%0$NJQ49$@$1$ONc30$G!$(BNOTICE
- $B%a%C%;!<%8$,I=<($5$l!$JQ49ITG=$NJ8;z$OL5;k$5$l$^$9!%(B
-
-$B"#%G%U%)%k%H%3%s%P!<%8%g%s(B
-
- $B%G%U%)%k%H%3%s%P!<%8%g%s$O!$%P%C%/%(%s%I$H%U%m%s%H%(%s%I$H$N4V$N%(%s(B
- $B%3!<%G%#%s%0$N<+F0JQ49$K;H$o$l$kFCJL$J%3%s%P!<%8%g%s$G$9!%%G%U%)%k%H(B
- $B%3%s%P!<%8%g%s$O3F!9$N(B{$B%9%-!<%^!$%=!<%9%(%s%3!<%G%#%s%0!$%G%9%F%#%M!<(B
- $B%7%g%s%(%s%3!<%G%#%s%0(B}$B$NAH$_9g$o$;$K$*$$$F!$$?$@0l8D$@$1B8:_$7$^$9!%(B
- $B>e5-$G@bL@$7$?AH$_9~$_:Q$N%3%s%P!<%8%g%s$O!$(Bpg_catalog$B%9%-!<%^$K$*$$(B
- $B$FDj5A$5$l$F$*$j!$%9%-!<%^%5!<%A%Q%9$N@_Dj$K4X$o$i$:I,$:MxMQ$G$-$k%3(B
- $B%s%P!<%8%g%s$K$J$C$F$$$^$9!%(B
-
- $B5U$K8@$&$H!$(B pg_catalog $B0J30$N%9%-!<%^$K%G%U%)%k%H%3%s%P!<%8%g%s$r:n(B
- $B@.$9$k$3$H$K$h$j!$%G%U%)%k%H%3%s%P!<%8%g%s$r<+M3$KA*Br$9$k$3$H$b$G$-(B
- $B$k$o$1$G$9!%$?$H$($P(B SJIS $B$H$NJQ49$K$*$$$F!$(BPostgreSQL $B$,MQ0U$7$F$$(B
- $B$k(B MS932$B8_49(B $B$NJQ49$G$O$J$/!$(BJIS $B5,3J$N%7%U%H%8%9$KAjEv$9$kJQ49$r9T(B
- $B$&$h$&$J%3%s%P!<%8%g%s$r:n@.$9$k$3$H$b2DG=$G$9!%(B
-
-$B"#%f!<%6Dj5A%3%s%P!<%8%g%s$N:n@.(B
-
- PostgreSQL 7.3$B0J9_!$%f!<%6Dj5A$N%3%s%P!<%8%g%s$r:n@.$G$-$k$h$&$K$J$C(B
- $B$F$$$^$9!%%3%s%P!<%8%g%s$NDj5A$O(B CREATE CONVERSION $B$H$$$&(B SQL $B%3%^%s(B
- $B%I$r;H$C$F9T$$$^$9!%(B
-
- CREATE [DEFAULT] CONVERSION conversion_name
- FOR source_encoding
- TO dest_encoding FROM funcname
-
- $B>\:Y$O%j%U%!%l%s%9%^%K%e%"%k$r$4Mw2<$5$$!%(B
-
-$B"#(BSJIS$B%f!<%6Dj5AJ8;z$X$NBP1~(B
-
- 7.0 $B$+$i(B SJIS$B%f!<%6Dj5AJ8;z(B (UDC) $B$KBP1~$7$F$$$^$9!%(BUDC $B$r$I$&07$&$+(B
- $B$H8@$&$3$H$K$D$$$FCf>r$5$s(B([email protected])$B$+$iLdBjDs5/$H>\:Y$J2r@b$r(B
- $BD:$-$^$7$?$N$G!$;29M$N$?$a$K$3$N%I%-%e%a%s%H$N:G8e$KIU$1$F$*$-$^$9!%(B
- $B$^$?!$$3$NLdBj$K$D$$$F$O!$(BPostgreSQL$BF|K\8l%a!<%j%s%0%j%9%H$N(B
- [pgsql-jp 12288] (1999/12/17$BIU(B)$B$H(B [pgsql-jp 12486] (2000/1/5$BIU(B) $B$+$i(B
- $B;O$^$k%9%l%C%I$G5DO@$r8+$k$3$H$,$G$-$^$9(B($B%a!<%k$N%"!<%+%$%V$O(B
- http://www.sra.co.jp/people/t-ishii/PostgreSQL/ $B$G;2>H$G$-$^$9(B)$B!%(B
-
- $B$3$3$G$O!$$=$l$i$N5DO@$r$U$^$(!$4JC1$K2r@b$7$^$9!%(B
-
- PostgreSQL$B$G$O!$F|K\8l$r;HMQ$9$k:]$K%P%C%/%(%s%IB&$N%(%s%3!<%G%#%s%0(B
- $B$r(B EUC_JP $B$^$?$O(B MULE_INTERNAL or Unicode $B$K$9$kI,MW$,$"$j$^$9!%(B
- MULE_INTERNAL $B$O(B EUC_JP $B$KJ8;z=89g$rI=$9%3!<%I$rIU$1$?$b$N$J$N$G!$K\(B
- $B<AE*$KF1$8$G$9!%$^$?!$(BUnicode <---> SJIS $BJQ49$O8=:_$N$H$3$m%5%]!<%H(B
- $B$5$l$F$$$^$;$s$N$GL5;k$7$^$9!%$7$?$,$C$F!$$3$3$G$O(B EUC_JP $B$H(B SJIS $B$N(B
- $BAj8_JQ49$N$_$r9M$($^$9!%(B
-
- $BM=HwCN<1(B
-
- $B0l8}$K(B EUC_JP $B$H$$$C$F$b!$<B:]$K$OCf?H$OJ#?t$NJ8;z=89g$+$i@.$jN)$C$F(B
- $B$$$^$9!%(B
-
- G0: JIS ROMAN (ASCII $B$H$[$\F1$8(B)
- G1: JIS X 0208 (JIS $B4A;z(B)
- G2: JIS X 0201 (1$B%P%$%H%+%J(B)
- G3: JIS X 0212 (JIS $BJd=u4A;z(B)
-
- $B0lJ}(B SJIS $B$O$3$N$&$A4pK\E*$K(B G0, G1, G2 $B$r%5%]!<%H$7$F$*$j!$(BG3 $B$O%5(B
- $B%]!<%H$7$F$$$^$;$s!%$7$?$,$C$F!$(BSJIS $B$O(B EUC_JP $B$NItJ,=89g$H$_$J$9$3(B
- $B$H$,$G$-!$<B:](B PostgreSQL 6.5 $B$^$G$O$3$N9M$($G<BAu$5$l$F$$$^$7$?!%(B
-
- $B$H$3$m$,!$(BWindows PC $B$N(B SJIS $B$N@$3&$G$O!$>e5-(B JIS $B5,3J$GDj5A$5$l$F$$(B
- $B$J$$J8;z%3!<%I$,0lItMxMQ$5$l$F$*$j!$$3$NItJ,(B (UDC) $B$O=>Mh(B PostgreSQL
- $B$G$OA4$/9MN8$5$l$F$$$^$;$s$G$7$?!%<B:](B UDC $B$r4^$`(B SJIS $B$r(B EUC_JP $B$K(B
- $BJQ49$9$k$H$-$KIT@5$JJQ49$,9T$o$l$F$$$^$7$?!%$=$3$G(B PostgreSQL 7.0 $B$G(B
- $B$O!$$^$:$3$NLdBj$r2r7h$9$k$3$H$K$7$^$7$?!%(B
-
- $B$^$?!$(BUDC $B$NMxMQJ}$K$D$$$F$OI8=`5,3J$N$h$&$J$b$N$O$"$j$^$;$s$,!$<B$O(B
- $B6H3&CDBN$G$N<h$j7h$a$,$"$j!$$$$o$f$k%G%U%!%/%H%9%?%s%@!<%I$J$i$PB8:_(B
- $B$9$k$3$H$,J,$+$j$^$7$?!%$=$3$G$3$l$K$D$$$F$b$G$-$k$@$1%5%]!<%H$9$k$3(B
- $B$H$K$7$^$7$?!%(B
-
- PostgreSQL 7.0 $B$G$N(B UDC $BBP1~$N<BAu(B
-
- (1) $B%f!<%6Dj5AJ8;zNN0h$O(B JIS $B$N%f!<%6Dj5AJ8;zNN0h$K%^%C%T%s%0$9$k!%(B
- SJIS $B$H(B EUC_JP $B$G(B1$BBP(B1$B$NBP1~$K$J$j$^$9!%(B
-
- - SJIS $B%f!<%6Dj5AJ8;zNN0h(B A ($B2>>N(B)
- 95$B!A(B104 $B6h(B $B"+"*(B $BF|K\8l(B EUC / G1 (JIS X 0208) 85$B!A(B95 $B6h(B
-
- - SJIS $B%f!<%6Dj5AJ8;zNN0h(B B ($B2>>N(B)
- 105$B!A(B114 $B6h(B $B"+"*(B $BF|K\8l(B EUC / G3 (JIS X 0212) 85$B!A(B95 $B6h(B
-
- (2) IBM $B3HD%J8;zNN0h(B (SJIS 115$B!A(B120 $B6h(B)
-
- $BJQ49%F!<%V%k$K$h$C$F(B G1 (JIS X 0208)$B$H!$(BG3 (JIS X 0212)$B$KJQ49$5$l$^(B
- $B$9!%$J$*!$$3$NJQ49$K$*$$$F$O!$(BSJIS --> EUC_JP $B$GJQ49$7!$:F$S(B EUC_JP --
- > SJIS $B$KJQ49$9$k$H85$N(B SJIS $B$KLa$i$J$$$3$H$,$"$j$^$9!%$^$?!$(BEUC_JP --
- > SJIS $B$NJQ49$G$O!$$9$Y$F$NJ8;z$rJQ49$G$-$k$o$1$G$O$J$$$N$G!$$=$N>l(B
- $B9g$OJQ49ITG=J8;z$H$7$F!V".!W$KCV$-49$($^$9!%(B
-
- *$B6H3&CDBN$N<h$j7h$a$G$O!$JQ49ITG=J8;z$O!V<BAu0MB8!W$H$J$C$F$$$^$9$,!$(B
- Solaris $B$r$O$8$a!$B?$/$N%7%9%F%`$,!V".!W$rJQ49ITG=J8;z$K:NMQ$7$F$$$^(B
- $B$9!%(BPostgreSQL$B$b$3$l$K9g$o$;$^$7$?!%(B
-
- (3) NEC $BA*Dj(B IBM $B3HD%J8;zNN0h(B (SJIS 89$B!A(B92 $B6h(B)
-
- PostgreSQL 7.0$B$G$O$9$Y$FJQ49ITG=J8;z!V".!W$KCV$-49$($i$l$^$9!%(B
-
- PostgreSQL 7.0.1$B0J9_$G$O!$0lC6(B IBM $B3HD%J8;zNN0h$KJQ49$5$l$?8e!$(BG1
- (JIS X 0208)$B$H!$(BG3 (JIS X 0212)$B$KJQ49$5$l$^$9!%(B
-
-$B<U<-!'(B
-
- o $BFA2H(B@$B;06(1?M"%5!<%S%9$5$s$+$i!$(BNEC $BA*Dj(B IBM $B4A;zBP1~%Q%C%A$rDs6!$7(B
- $B$F$$$?$@$-$^$7$?!%(B
-
- o $B3F<oJ8;z%;%C%H!$%3!<%I7O$K$D$$$F!$F|K\8l(B PostgreSQL $B%a!<%j%s%0%j%9%H(B
- $B$N%a%s%P$NJ}$+$i%"%I%P%$%9$rD:$-$^$7$?!%$3$3$K46<U$7$^$9!%(B
- $B$^$?!$(BSJIS $BBP1~$K$D$$$F$O!$;T@n(B@$B$*CcBg$5$s$N%Q%C%A$r;29M$K$5$;$F$$(B
- $B$?$@$-$^$7$?!%(B
-
- o SJIS$B%f!<%6Dj5AJ8;z(B (UDC) $B$r$I$&07$&$+$H8@$&$3$H$K$D$$$FCf>r$5$s(B
- ([email protected])$B$+$iLdBjDs5/$H>\:Y$J2r@b$rD:$-$^$7$?!%(B
-
-$B"#(BUnicode$B$H$=$l0J30$N%(%s%3!<%G%#%s%0$H$NAj8_JQ49$K$D$$$F(B
-
- PostgreSQL 7.1$B$+$i(BUnicode$B$H$=$l0J30$N%(%s%3!<%G%#%s%0$H$NAj8_JQ49$,(B
- $B2DG=$K$J$j$^$7$?!%$3$NJQ49$O$4$/0lIt$NJ8;z%3!<%I(B(ISO 8859-1)$B$r$N$>$-!$(B
- $B%m%8%C%/$K$h$kJQ49$,$G$-$J$$$?$a!$JQ49$N:]$K$O%F!<%V%k$,I,MW$K$J$j$^(B
- $B$9!%(BPostgreSQL$B$N<BAu$G$O!$(BUnicode$BJQ49%F!<%V%k$O(B Unicode organization
- $B$,Ds6!$9$k$b$N$r;HMQ!$$3$l$r(BPerl$B%W%m%0%i%`$G(BC$B8@8l$N%F!<%V%k$KJQ49$7(B
- $B$F:n@.$7$F$$$^$9(B(Perl$B%W%m%0%i%`$O(BNARITA Tomio$B;a:n@.$N(Blv$B%P!<%8%g%s(B
- 4.3.6 $B$KIUB0$9$k$b$N$r2~B$$N>e!$MxMQ$7$F$$$^$9(B)$B!%(BUnicode
- organization$B$NDs6!$9$kJQ49%F!<%V%k$O:FG[I[$,5v2D$5$l$F$$$J$$$?$a!$(B
- PostgreSQL$B$N%=!<%9%3!<%I$K$O4^$^$l$F$$$^$;$s!%0J2<!$;HMQ$7$?JQ49%F!<(B
- $B%V%k$rNs5s$7$^$9!%(B
-
- $B%(%s%3!<%G%#%s%0(B $BJQ49%F!<%V%k(B
- ============================================================
- ISO 8859-1 $B$J$7(B
- ISO 8859-2 8859-2.TXT
- ISO 8859-3 8859-3.TXT
- ISO 8859-4 8859-4.TXT
- ISO 8859-5 8859-5.TXT
- ISO 8859-6 8859-6.TXT
- ISO 8859-7 8859-7.TXT
- ISO 8859-8 8859-8.TXT
- ISO 8859-9 8859-9.TXT
- ISO 8859-10 8859-10.TXT
- ISO 8859-13 8859-13.TXT
- ISO 8859-14 8859-14.TXT
- ISO 8859-15 8859-15.TXT
- ISO 8859-16 8859-16.TXT
- EUC_JP JIS0201.TXT, JIS0208.TXT, JIS0212.TXT,
- CP932.TXT, sjis.map
- SJIS CP932.TXT
- EUC_CN GB2312.TXT
- GBK CP936.TXT
- EUC_KR KSX1001.TXT
- UHC CP949.TXT
- JOHAB JOHAB.TXT
- EUC_TW CNS11643.TXT
- Big5 BIG5.TXT
- WIN1256 CP1256.TXT
- TCVN CP1258.TXT
- WIN874 CP874.TXT
- ============================================================
-
-$B<U<-!'(B
-
- o $BFA2H(B@$B;06(1?M"%5!<%S%9$5$s$+$i!$(BCP932.TXT$B$h$j@8@.$7$?(BSJIS$BMQ$NJQ49%F!<(B
- $B%V%k$rDs6!$7$F$$$?$@$-$^$7$?!%$3$l$K$h$j!$(BIBM $B3HD%J8;zNN0h(B (SJIS
- 115$B!A(B120 $B6h(B), NEC $BA*Dj(B IBM $B3HD%J8;zNN0h(B (SJIS 89$B!A(B92 $B6h(B)$B$KBP1~$9$k(B
- $B$3$H$,$G$-$k$h$&$K$J$j$^$7$?!%(B
-
-
-$B;29M(B1$B!'(B
-
- Pavel Behal$B;a$K$h$jDs6!$5$l$?(BWIN1250$B%5%]!<%H$G$9$,!$(BWindows$B4D6-$G$N(B
- $BMxMQ$N;EJ}$K$D$$$F;29M$K$J$k%I%-%e%a%s%H$,IUB0$7$F$$$k$N$G!$$3$3$KE:(B
- $BIU$7$F$*$-$^$9!%(B
-
- -------------------------------------------------------------------
-Version: 0.91 for PgSQL 6.5
-Author: Pavel Behal
-Revised by: Tatsuo Ishii
-Licence: The Same as PostgreSQL
-
-Sorry for my Eglish and C code, I'm not native :-)
-
-!!!!!!!!!!!!!!!!!!!!!!!!! NO WARRANTY !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-Instalation:
-------------
-1) Change three affected files in source directories
- (I don't have time to create proper patch diffs, I don't know how)
- [PostgreSQL 6.5.1$B$G$O$3$N%9%F%C%W$OI,MW$"$j$^$;$s!%(B-- $B@P0f(B]
-2) Compile with enabled locale and multibyte set to LATIN2
-3) Setup properly your instalation, do not forget to create locale
- variables in your profile (environment). Ex. (may not be exactly true):
- LC_ALL=cs_CZ.ISO8859-2
- LC_COLLATE=cs_CZ.ISO8859-2
- LC_CTYPE=cs_CZ.ISO8859-2
- LC_MONETARY=cs_CZ.ISO8859-2
- LC_NUMERIC=cs_CZ.ISO8859-2
- LC_TIME=cs_CZ.ISO8859-2
-4) You have to start the postmaster with locales set!
-5) Try it with Czech language, it have to sort
-5) Install ODBC driver for PgSQL into your M$ Windows
-6) Setup properly your data source. Include this line in your ODBC
- configuration dialog in field "Connect Settings:" :
- SET CLIENT_ENCODING = 'WIN1250';
-7) Now try it again, but in Windows with ODBC.
-
-Description:
-------------
-- Depends on proper system locales, tested with RH6.0 and Slackware 3.6,
- with cs_CZ.iso8859-2 loacle
-- Never try to set-up server multibyte database encoding to WIN1250,
- always use LATIN2 instead. There is not WIN1250 locale in Unix
-- WIN1250 encoding is useable only for M$W ODBC clients. The characters are
- on thy fly re-coded, to be displayed and stored back properly
-
-Important:
-----------
-- it reorders your sort order depending on your LC_... setting, so don't be
- confused with regression tests, they don't use locale
-- "ch" is corectly sorted only in some newer locales (Ex. RH6.0)
-- you have to insert money as '162,50' (with comma in aphostrophes!)
-- not tested properly
- -------------------------------------------------------------------
-
-$B;29M(B2$B!'(BSJIS$B%f!<%6Dj5AJ8;z(B (UDC) $B$r$I$&07$&$+$H8@$&$3$H$K$D$$$FCf>r$5$s(B
- ([email protected])$B$+$i$$$?$@$$$?LdBjDs5/$H2r@b$G$9!%(B
-
--------------------------- $B0zMQ3+;O(B ----------------------------------
----
-1. SJIS $B%3!<%I$NHO0O(B
-
- 1 $B%P%$%HL\(B 0x81 - 0x9F$B!$(B0xE0 - 0xFC
- 2 $B%P%$%HL\(B 0x40 - 0x7E$B!$(B0x80 - 0xFC
-
- $B$$$o$f$k!V30;zNN0h!W$NHO0O(B:
-
- - X0208 $B6&DL<+M3NN0h(B
-
- |--------------------
- | 85 $B6h(B 0xEB40 $B!A(B
- |...
- |--------------------
- | 89 $B6h(B 0xED40 $B!A(B ; 89$B!A(B92 $B6h$O(B
- |... ; $B!V(BNEC $BA*Dj(B IBM $B3HD%J8;zNN0h!W(B
- |-------------------- ; $B$H8F$P$l$k(B
- | 93 $B6h(B 0xEF40 $B!A(B
- | 94 $B6h(B 0xEF9F $B!A(B 0xEFFC
-
- - $B%f!<%6Dj5AJ8;zNN0h(B
-
- |--------------------
- | 95 $B6h(B 0xF040 $B!A(B ; 95$B!A(B104 $B6h(B
- |... ; $B!V%f!<%6Dj5AJ8;zNN0h(B A$B!W(B($B2>>N(B)
- |--------------------
- |105 $B6h(B 0xF540 $B!A(B ; 105$B!A(B114 $B6h(B
- |... ; $B!V%f!<%6Dj5AJ8;zNN0h(B B$B!W(B($B2>>N(B)
- |--------------------
- |115 $B6h(B 0xFA40 $B!A(B ; 115$B!A(B120 $B6h$O0lHL$K(B
- |... ; $B!V(BIBM $B3HD%J8;zNN0h!W(B
- |120 $B6h(B ... ; $B$H8F$P$l$k(B
- |--------------------
-
----
-2. i-mode $BC<Kv$,;H$C$F$$$k?^7AJ8;z%3!<%I$NHO0O(B
-
- 0xF89F - 0xF8FC (112 $B6h(B)
- 0xF940 - 0xF949 (113 $B6h(B)
- 0xF972 - 0xF97E (113 $B6h(B)
- 0xF980 - 0xF990 (113 $B6h(B)
- 0xF9B0 (114 $B6h(B)
-
----
-3. $B0lHLE*$J(B EUC $BF|K\8l%3!<%I$NDj5A(B
-
- G0 : [0x21-0x7E] ; $B$$$o$f$k(B JIS ROMAN
- G1 : [0xA1-0xFE] [0xA1-0xFE] ; JIS X 0208
- G2 : 0x8E [0xA1-0xDF] ; JIS X 0201 $B%+%J(B
- G3 : 0x8F [0xA1-0xFE] [0x21-0x7E] ; JIS X 0212 $BJd=u4A;z(B
-
----
-[$BLdBjE@(B]
-
-SJIS 95$B!A(B120 $B6h$O(B JIS X0208 $B$K3:Ev$9$kNN0h$,B8:_$7$J$$(B
-$B$?$a!$$3$NNN0h$N(B EUC - SJIS $BJ8;z%3!<%IJQ49$O3F%Y%s%@$K(B
-$B$h$C$F0[$J$k$N$G$O$J$$$+!$$H$$$&$N$,@P0fMM$+$i$N$4;XE&(B
-$B$G$7$?!%(B
-
----
-[$B5DO@(B]
-
-$BD4::$N7k2L!$(BSJIS 95$B!A(B120 $B6h$r(B EUC $B$KJQ49$9$k$?$a$NI8=`E*$J(B
-$B%k!<%k$,$J$$$o$1$G$O$J$$!$$H$$$&$3$H$,$o$+$j$^$7$?!%>\:Y$O(B
-$B8e=R$N;29M;qNA$r$4Mw$$$?$@$/$H$7$F!$$3$3$G$O$=$N%k!<%k$r(B
-$B4JC1$K$4@bL@$$$?$7$^$9!%(B
-
- - SJIS $B%f!<%6Dj5AJ8;zNN0h(B A ($B2>>N(B)
- 95$B!A(B104 $B6h(B $B"+"*(B $BF|K\8l(B EUC / G1 85$B!A(B95 $B6h(B
-
- $B$?$H$($P(B SJIS $B$N(B (95, 1) = 0xF040 $B$O(B
- EUC $B$N(B 0xF5A1 $B$K$J$j$^$9!%(B
-
- - SJIS $B%f!<%6Dj5AJ8;zNN0h(B B ($B2>>N(B)
- 105$B!A(B114 $B6h(B $B"+"*(B $BF|K\8l(B EUC / G3 85$B!A(B95 $B6h(B
-
- $B$?$H$($P(B SJIS $B$N(B (105, 1) = 0xF540 $B$O(B
- EUC $B$N(B 0x8FF5A1 $B$K$J$j$^$9!%(B
-
- - IBM $B3HD%J8;zNN0h(B
- 115$B!A(B120 $B6h(B
-
- JIS X 0208 ($BF|K\8l(B EUC / G1)$B!$(BJIS X 0212
- ($BF|K\8l(B EUC / G3) $B$K3:Ev$9$kJ8;z$,$"$k>l9g(B
- $B$O$=$NJ8;z$K%^%C%T%s%0!%$=$&$G$J$$>l9g$O(B
- $BF|K\8l(B EUC / G3 83$B!A(B84 $B6h$r!$6hE@%3!<%I$N>e0L(B
- $B$+$i=g$K3d$jEv$F$F$$$/(B ($BJQ49%F!<%V%kJ}<0(B)
-
-$B$3$N;EMM$O!$9-$/;H$o$l$F$$$k(B SJIS $B$H(B EUC $B$N%^%C%T%s%0$,%Y%s%@$K(B
-$B$h$C$F0[$J$k$?$a!$Aj8_1?MQ$N:]$KLdBj$K$J$C$F$$$k$3$H$+$i!$(B1996
-$BG/$K(B OSF $BF|K\%Y%s%@6(5D2q$,8!F$:n@.$7$?Js9p=q$,%Y!<%9$K$J$C$F$$(B
-$B$k$h$&$G$9!%(B
-
-Solaris $B$N%I%-%e%a%s%H$K$O!V(BTOG $BF|K\%Y%s%@6(5D2q?d>)(B EUC$B!&%7%U%H(B
-JIS $B%3!<%IJQ49;EMM!W$K$b$H$E$/$H=q$$$F$"$j!$(BSolaris 2.6 $B$+$iF3F~(B
-$B$7$F$$$k$N$@$=$&$G!$;d$+$i8+$l$P;v<B>e$NI8=`$H9M$($F$bIT<+A3$G$O(B
-$B$J$$$H46$8$^$9!%(B
-
-$B$J$*!$>/$J$/$H$b(B 1996 $BG/Ev;~$K$*$$$F$O!$(BOracle $B$d(B Sybase $B$O(B
-SJIS $B$N%f!<%6Dj5A(B/$B%Y%s%@Dj5AJ8;zNN0h$r(B EUC $B$KJQ49$9$k:]!$H=JLIT(B
-$B2DG=J8;z$H$7$F07$C$F$$$k$i$7$$$H$$$&$3$H$bJdB-$7$F$*$-$^$9!%(B
-
----
-[$B;29M;qNA(B]
-
-// URL $B$,D9$$$N$G!$ESCf$G@Z$l$J$$$H$$$$$N$G$9$,(B...
-
--$B!VF|K\8l(B EUC$B!&%7%U%H(B JIS $B%3!<%IJQ49;EMM$H%3!<%I7O<BBVD4::!W(B
- 1966, OSF $BF|K\%Y%s%@6(5D2q(B
- http://www.opengroup.or.jp/jvc/cde/sjis-euc.html
-
--$B!VJ8;z%3!<%IJQ495,B'!W(B
- Solaris 7$B!$(BJFP $B%f!<%6!<%:%,%$%I(B
- http://docs.sun.com/ab2/coll.139.3/JFPUG/@Ab2PageView/11683?Ab2Lang=ja&Ab2Enc=euc-jp
-
--$B!VF|K\8lJ8;z%3!<%I!W(B
- Solaris 7$B!$(BJFP $B%f!<%6!<%:%,%$%I(B
- http://docs.sun.com/ab2/coll.139.3/JFPUG/@Ab2PageView/879;td=5?Ab2Lang=ja&Ab2Enc=euc-jp
-
- // $BFf$N!V(B1$B!A(B20 $B6h!W$N5-=R$O$3$3$+$i$-$F$$$^$9!%(B
-
----
--------------------------- $B0zMQ$3$3$^$G(B ---------------------------------
-
-$B2~DjMzNr!'(B
-
- 2002/10/21
- * $B%^%k%A%P%$%HBP1~$,%*%W%7%g%s$G$O$J$/!$8GDj$GI,$:AH$_9~$^$l$k(B
- $B$h$&$K$J$j$^$7$?!%(B
-
- * CREATE CONVERSION/DROP CONVERSION$B$NDI2C!%$3$l$K$H$b$J$$!$%((B
- $B%s%3!<%G%#%s%0JQ494X?t$,%m!<%@%V%k4X?t$K$J$j!$%P%C%/%(%s%I$N(B
- $B%m!<%I%b%8%e!<%k%5%$%:$,(B7.2$B$h$j$b>.$5$/$J$C$F$$$^$9!%$^$?!$(B
- SQL$BI8=`$N(BCONVERT$B4X?t$rDI2C$7$^$7$?!%(B
- * $B$$$/$D$+%(%s%3!<%G%#%s%0$,DI2C$5$l$F$$$^$9!%(B
- * $B0J>e!$(B7.3$B$KH?1G$5$l$^$9!%(B
-
- 2001/10/01
- * CONVERT$B$NDI2C!%(Blpad/rpad/trim/btrim/ltrim/rtrim/translate$B$N(B
- $B%^%k%A%P%$%HBP1~DI2C!%(Bchar/varchar$B$G%P%$%H?t$G$O$J$/!$J8;z?t(B
- $B$G%5%$%:$rDj5A$9$k$h$&$KJQ99!%0J>e!$(B7.2$B$KH?1G$5$l$^$9!%(B
-
- 2001/2/15
- * $BFA2H(B@$B;06(1?M"%5!<%S%9$5$s$+$i!$(BCP932.TXT$B$h$j@8@.$7$?(BSJIS$BMQ$N(B
- $BJQ49%F!<%V%k$rDs6!$7$F$$$?$@$-$^$7$?!%(B7.1$B$KH?1G$5$l$^$9!%(B
-
- 2001/1/6
- * UNICODE$B$HB>$N%(%s%3!<%G%#%s%0$H$NAj8_JQ495!G=$rDI2C!%(B
- * 7.1$B$KH?1G$5$l$^$9!%(B
-
- 2000/5/20
- * NEC $BA*Dj(B IBM $B4A;zBP1~$rDI2C$7$^$7$?!%$3$l$O(B $BFA2H(B@$B;06(1?M"%5!<%S%9(B
- $B$5$s$+$i$N(B contribute $B$G$9!%(B
- * $B$3$l$i$O!$(B7.0.1 $B$KH?1G$5$l$^$9!%(B
-
- 2000/3/22
- * PQsetClientEncoding, PQclientEncoding $B$r(Blibpq $B4X?t$KDI2C!$(B
- $B%3%M%/%7%g%sKh$K%(%s%3!<%G%#%s%0$rJQ992DG=$K!%(B
- * SJIS $B%f!<%6Dj5AJ8;z(B (UDC) $B$X$NBP1~(B
- * ./configure --with-mb=EUC_JP $B$+$i(B
- ./configure --enable-multibyte=EUC_JP $B$KJQ99(B
- * SQL_ASCII $B$N(B regression test $BDI2C(B
- * $B$3$l$i$O(B 7.0 $B$KH?1G$5$l$^$9!%(B
-
- 1999/7/11 WIN1250(Windows$BMQ$N%A%'%38l(B)$B%5%]!<%H$rDI2C$7$^$7$?!%(B
- * WIN1250 $B$,%U%m%s%H%(%s%IB&$N%(%s%3!<%G%#%s%0$H$7$FMxMQ$G$-$k$h(B
- $B$&$K$J$j$^$7$?!%$3$N>l9g!$%P%C%/%(%s%IB&$N%(%s%3!<%G%#%s%0$O(B
- LATIN2 $B$^$?$O(B MULE_INTERNAL $B$H$7$^$9!%(B
- (contributed by Pavel Behal)
- * backend/utils/mb/conv.c$B$K$*$1$k7?$NIT@09g$r=$@5$7$^$7$?!%(B
- (contributed by Tomoaki Nishiyama)
- * $B$3$l$i$O(B6.5.1$B$KH?1G$5$l$^$9!%(B
-
- 1999/3/23 $B%-%j%kJ8;z%5%]!<%HDI2CB>(B(6.5 $B$KH?1G:Q(B)
- * $B%(%s%3!<%G%#%s%0$H$7$F(B KOI8(KOI8-R), WIN(CP1251), ALT(CP866) $B$r(B
- $B%5%]!<%H$7$F$$$^$9!%$3$l$i$O!$%U%m%s%H%(%s%I!$%P%C%/%(%s%I!$(B
- $B$I$A$i$N%(%s%3!<%G%#%s%0$H$7$F$b;HMQ2DG=$G$"$j!$%(%s%3!<%G%#%s%0$N(B
- $BAj8_JQ49$,2DG=$G$9!%$^$?!$=>Mh$+$i%5%]!<%H$7$F$$$k(B ISO 8859-5 $B$b(B
- $BF1MM$K;HMQ2DG=$G$9!%(B
- $B%-%j%kJ8;z%5%]!<%H$O!$(BOleg Broytmann <[email protected]> $B;a$N(B
- $B%j%/%(%9%H5Z$S6(NO$K$h$j<B8=$7$^$7$?!%$3$l$O!$=>Mh$+$i$"$k(B
- RCODE $B%5%]!<%H$N5!G=$r<h$j9~$`$b$N$G$b$"$j$^$9!%(B
- * MB $B$H(B locale $B$rF1;~$K;XDj$7$?>l9g$KBgJ8;z!?>.J8;z$rL5;k$7$?(B
- $B@55,I=8=8!:w$,@5>o$KF0:n$7$J$$%P%0$r=$@5(B
-
- 1999/1/26 Big5 $B%5%]!<%HDI2C(B(6.4.2-patched/6.5 $B$KH?1G:Q(B)
- * Big5 $B$,%U%m%s%H%(%s%IB&$N%(%s%3!<%G%#%s%0$H$7$FMxMQ$G$-$k$h(B
- $B$&$K$J$j$^$7$?!%$3$N>l9g!$%P%C%/%(%s%IB&$N%(%s%3!<%G%#%s%0$O(B
- EUC_TW $B$^$?$O(B MULE_INTERNAL $B$H$7$^$9!%(B
- * EUC_TW $B$N(B regression test $B%1!<%9$rDI2C(B
- (contributed by Jonah Kuo <[email protected]>)
-
- 1998/12/16 $BK\%I%-%e%a%s%H=$@5(B(6.4.2 $B$KH?1G:Q(B)$B!%(B
- * Makefile.custom $B$G(B MB=EUC_JP $B$J$I$H@_Dj$9$kJ}K!$O(B 6.4 $B0J9_(B
- $B%5%]!<%H$5$l$F$$$J$$$N$G:o=|$7$?!%(B
- * $BJ8;z%3!<%I(B $B"*(B $B%(%s%3!<%G%#%s%0!$%/%i%$%"%s%H"*%U%m%s%H%(%s%I(B
- $B%5!<%P"*%P%C%/%(%s%I(B $B$K$=$l$>$l8l6g$r=$@5!%(B
-
- 1998/12/15 6.4 $B8~$1%P%0=$@5%Q%C%A%j%j!<%9(B(6.4.2 $B$KH?1G:Q(B)$B!%(B
- * SQL_ASCII $B%5%]!<%H$N%P%0=$@5(B
-
- 1998/11/21 6.4 $B8~$1%P%0=$@5%Q%C%A%j%j!<%9(B(6.4.2 $B$KH?1G:Q(B)$B!%(B
- * BINARY CURSOR $B$NLdBj$r=$@5(B
- * pg_dumpall $B$N%P%0=$@5(B
-
- 1998/11/5 6.4 $B%j%j!<%9!%(B
- * pg_database $B$N(B encoding $B%+%i%`$,(B MB $B$,M-8z$G$J$$$H$-$K$b(B
- $BDI2C$5$l$k$h$&$K$J$C$?!%$=$N$?$a!$(BMB $B$,M-8z$G$J$$$H$-$K$O!$(B
- ASCII $B$N%(%s%3!<%G%#%s%0$rI=$9(B SQL_ASCII $B$r?7$7$$%(%s%3!<%G%#%s%0(B
- $B$H$7$FDI2C$7$?!%$3$l$K$H$b$J$$!$%(%s%3!<%G%#%s%0L>$KBP1~$9$k(B
- $B%(%s%3!<%G%#%s%0(BID$B$,(B SQL_ASCII $B$r(B 0 $B$H$9$kHV9f$KJQ99$K$J$C$?!%(B
-
- 1998/7/22 6.4 $B&A8~$1$K%Q%C%A$r%j%j!<%9!%(B
- * initdb/createdb/create database $B$G%P%C%/%(%s%IB&$N(B
- $B%(%s%3!<%G%#%s%0$r@_Dj$-$k5!G=<BAu!%$3$N$?$a!$%7%9%F%`%+%?%m(B
- $B%0$N(B pg_database $B$K?7$7$$%+%i%`(B encoding $B$rDI2C(B(MB$B$,M-8z$J;~$@$1(B)
- * copy $B$,(B PGCLIENTENCODING $B$KBP1~(B
- * SQL92 $B$N(B "SET NAMES" $B$r%5%]!<%H(B(MB$B$,M-8z$J;~$@$1(B)
- * LATIN2-5 $B$r%5%]!<%H(B
- * regression test $B$K(B unicode $B$N%F%9%H%1!<%9$rDI2C(B
- * MB $B@lMQ$N(B regression $B%F%9%H%G%#%l%/%H%j(B test/mb $B$rDI2C(B
- * $B%=!<%9%U%!%$%k$NCV$->l=j$rBgI}8+D>$7!%(BMB $B4X78$O(B
- include/mb, backend/utils/mb $B$KCV$/$h$&$K$7$?(B
-
- 1998/5/25 $B%P%0=$@5(B(mb_b3.patch $B$H$7$F(B pgsql-jp ML $B$K%j%j!<%9!$(B
- $BK\2H$G$O(B 6.4 snapshot $B$K<h$j9~$^$l$kM=Dj(B)
-
- 1998/5/18 $B5!G=DI2C!?%P%0=$@5(B(mb_b2.patch $B$H$7$F(B pgsql-jp ML $B$K%j%j!<%9!$(B
- $BK\2H$G$O(B 6.4 snapshot $B$K<h$j9~$^$l$kM=Dj(B)
- * $B4D6-JQ?t(B PGCLIENTENCODING $B$N%5%]!<%H!%%U%m%s%H%(%s%IB&$N(B
- $B%(%s%3!<%G%#%s%0$r;XDj$9$k!%8=:_!$(BSJIS, EUC_*, MULE_INTERNAL,
- LATIN1 $B$,;XDj$G$-$k!%$^$?!$(B
- set client_encoding to 'sjis';
- $B$G$b2DG=(B
- * 8bit $BJ8;z$,EO$k$HLdBj$,5/$-$k2U=j$K$G$-$k$@$1BP1~(B
-
- 1998/4/21 $B5!G=DI2C!?%P%0=$@5(B(mb_b1.patch $B$H$7$F(B pgsql-jp ML $B$K%j%j!<%9!$(B
- $BK\2H$G$O(B 6.4 snapshot $B$K<h$j9~$^$l$F$$$k(B)
- * character_length(), position(), substring() $B$N%^%k%A%P%$%H(B
- $BBP1~(B
- * octet_length() $BDI2C(B $B"*(B initdb $B$N$d$jD>$7I,MW(B
- * configure $B$N%*%W%7%g%s$K(B MB $B%5%]!<%HDI2C(B
- (ex. configure --with-mb=EUC_JP)
- * EUC_KR $B$N(B regression test $BDI2C(B
- ("Soonmyung. Hong" <[email protected]> $B$5$sDs6!(B)
- * EUC_JP $B$N(B regression test $B$K(B character_length(), position(),
- substring(), octet_length() $BDI2C(B
- * regress.sh $B$N(B SystemV $B$K$*$1$kHs8_49@-=$@5(B
- * toupper(), tolower() $B$K(B 8bit $BJ8;z$,EO$k$HMn$A$k$3$H$,(B
- $B$"$k$N$r=$@5(B
-
- 1998/3/25 PostgreSQL 6.3.1 $B%j%j!<%9!$(BMB PL2 $B$,<h$j9~$^$l$k(B
-
- 1998/3/10 PL2 $B$r%j%j!<%9(B
- * EUC_JP, EUC_CN, MULE_INTERNAL $B$N(B regression test $B$rDI2C(B
- (EUC_CN $B$N%G!<%?$O(B [email protected] $B$5$sDs6!(B)
- * regexp $B$K$*$$$F!$(Bisalpha $B$J$I$K(B unsigend char $B0J30$NCM$,(B
- $BEO$i$J$$$h$&$K%,!<%I$r$+$1$k(B
- * $B1Q8l$N%I%-%e%a%s%H$rDI2C(B
- * MB $B$rDj5A$7$J$$>l9g$KH/@8$9$k%P%0$r=$@5(B
-
- 1998/3/1 PL1 $B$r%j%j!<%9(B
-
-$B0J>e!%(B
diff --git a/doc-xc/TODO b/doc-xc/TODO
deleted file mode 100644
index 4b7b3da476..0000000000
--- a/doc-xc/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-The TODO list is now maintained at:
-
- http://wiki.postgresql.org/wiki/Todo
diff --git a/doc-xc/bug.template b/doc-xc/bug.template
deleted file mode 100644
index 392394fed8..0000000000
--- a/doc-xc/bug.template
+++ /dev/null
@@ -1,53 +0,0 @@
-If PostgreSQL failed to compile on your computer or you found a bug,
-please fill out this form and e-mail it to [email protected].
-
-If your bug report has security implications and you'd prefer that it not
-become immediately visible in public archives, don't send it to postgres-xl-bugs.
-Security issues can be reported privately to [email protected].
-
-If you not only found the problem but solved it and generated a patch
-then e-mail it to [email protected] instead. Please use the
-command "diff -c" to generate the patch.
-
-You may also enter a bug report at http://sourceforge.net/projects/postgres-xl/
-instead of e-mailing this form.
-
-============================================================================
- POSTGRES-XL BUG REPORT TEMPLATE
-============================================================================
-
-
-Your name :
-Your email address :
-
-
-System Configuration:
----------------------
- Architecture (example: Intel Pentium) :
-
- Operating System (example: Linux 2.4.18) :
-
- Postgres-XL version (example: Postgres-XL 9.2): Postgres-XL 9.2
-
- Compiler used (example: gcc 3.3.5) :
-
-
-Please enter a FULL description of your problem:
-------------------------------------------------
-
-
-
-
-
-Please describe a way to repeat the problem. Please try to provide a
-concise reproducible example, if at all possible:
-----------------------------------------------------------------------
-
-
-
-
-
-If you know how this problem might be fixed, list the solution below:
----------------------------------------------------------------------
-
-
diff --git a/doc-xc/src/Makefile b/doc-xc/src/Makefile
deleted file mode 100644
index b0d4f1f506..0000000000
--- a/doc-xc/src/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# doc/src/Makefile
-
-subdir = doc/src
-top_builddir = ../..
-include $(top_builddir)/src/Makefile.global
-
-all distprep html man install installdirs uninstall clean distclean maintainer-clean maintainer-check:
- $(MAKE) -C sgml $@
diff --git a/doc-xc/src/sgml/.gitignore b/doc-xc/src/sgml/.gitignore
deleted file mode 100644
index e1b84b490f..0000000000
--- a/doc-xc/src/sgml/.gitignore
+++ /dev/null
@@ -1,33 +0,0 @@
-# Stuff shipped in tarballs
-/html/
-/html-stamp
-/man1/
-/man3/
-/man7/
-/man-stamp
-# Other popular build targets
-/HISTORY
-/INSTALL
-/regress_README
-/postgres-US.pdf
-/postgres-A4.pdf
-/postgres.html
-/postgres.txt
-# GENERATED_SGML
-/features-supported.sgml
-/features-unsupported.sgml
-/errcodes-table.sgml
-/version.sgml
-/bookindex.sgml
-/HTML.index
-# Assorted byproducts from building the above
-/postgres.xml
-/HISTORY.html
-/INSTALL.html
-/regress_README.html
-/postgres-US.aux
-/postgres-US.log
-/postgres-US.out
-/postgres-A4.aux
-/postgres-A4.log
-/postgres-A4.out
diff --git a/doc-xc/src/sgml/Makefile b/doc-xc/src/sgml/Makefile
deleted file mode 100644
index 9c69b15f21..0000000000
--- a/doc-xc/src/sgml/Makefile
+++ /dev/null
@@ -1,421 +0,0 @@
-#----------------------------------------------------------------------------
-#
-# PostgreSQL documentation makefile
-#
-# doc/src/sgml/Makefile
-#
-#----------------------------------------------------------------------------
-
-# This makefile is for building and installing the documentation.
-# When a release tarball is created, the documentation files are
-# prepared using the distprep target. In Git-based trees these files
-# don't exist, unless explicitly built, so we skip the installation in
-# that case.
-
-
-# Make "html" the default target, since that is what most people tend
-# to want to use.
-html:
-
-subdir = doc-xc/src/sgml
-top_builddir = ../../..
-include $(top_builddir)/src/Makefile.global
-
-MAKESGMLDIR = $(top_builddir)/src/pgxc/tools/makesgml
-MAKESGML = $(MAKESGMLDIR)/makesgml
-
-
-all: html man
-
-distprep: html distprep-man
-
-
-ifndef COLLATEINDEX
-COLLATEINDEX = $(DOCBOOKSTYLE)/bin/collateindex.pl
-endif
-
-ifndef JADE
-JADE = jade
-endif
-SGMLINCLUDE = -D . -D $(srcdir)
-
-ifndef NSGMLS
-NSGMLS = nsgmls
-endif
-
-ifndef OSX
-OSX = osx
-endif
-
-ifndef XSLTPROC
-XSLTPROC = xsltproc
-endif
-
-VERSION = 9.2
-
-override XSLTPROCFLAGS += --stringparam pg.version '$(VERSION)'
-
-
-GENERATED_SGML = bookindex.sgml version.sgml \
- features-supported.sgml features-unsupported.sgml errcodes-table.sgml
-
-ALLSGMLIN := $(wildcard $(srcdir)/*.sgmlin $(srcdir)/ref/*.sgmlin)
-
-#ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML)
-ALLSGML := $(ALLSGMLIN:%sgmlin=%sgml) $(GENERATED_SGML)
-
-ALLSGMLTOREMOVE := $(ALLSGMLIN:%sgmlin=%sgml)
-
-
-# Sometimes we don't want this one.
-ALMOSTALLSGML := $(filter-out %bookindex.sgml,$(ALLSGML))
-
-ifdef DOCBOOKSTYLE
-CATALOG = -c $(DOCBOOKSTYLE)/catalog
-endif
-
-# Enable some extra warnings
-# -wfully-tagged needed to throw a warning on missing tags
-# for older tool chains, 2007-08-31
-# Note: try "make SPFLAGS=-wxml" to catch a lot of other dubious constructs,
-# in particular < and & that haven't been made into entities. It's far too
-# noisy to turn on by default, unfortunately.
-override SPFLAGS += -wall -wno-unused-param -wno-empty -wfully-tagged
-
-##
-## SGML files
-##
-
-sgml-files: $(ALLSGMLIN:%sgmlin=%sgml)
-
-##
-## Man pages
-##
-
-man distprep-man: man-stamp
-
-man-stamp: stylesheet-man.xsl postgres.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_MAN_FLAGS) $^
- touch $@
-
-
-##
-## HTML
-##
-
-.PHONY: draft
-
-JADE.html.call = $(JADE) $(JADEFLAGS) $(SPFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -t sgml -i output-html
-
-# The draft target creates HTML output in draft mode, without index (for faster build).
-draft: postgres.sgml $(ALMOSTALLSGML) stylesheet.dsl
- $(MKDIR_P) html
- $(JADE.html.call) -V draft-mode $<
- cp $(srcdir)/stylesheet.css html/
-
-html: html-stamp
-
-html-stamp: postgres.sgml $(ALLSGML) stylesheet.dsl
- $(MKDIR_P) html
- $(JADE.html.call) -i include-index $<
- cp $(srcdir)/stylesheet.css html/
- touch $@
-
-# single-page HTML
-postgres.html: postgres.sgml $(ALLSGML) stylesheet.dsl
- $(JADE.html.call) -V nochunks -V rootchunk -V '(define %root-filename% #f)' -V '(define use-output-dir #f)' -i include-index $<
-
-# single-page text
-postgres.txt: postgres.html
- $(LYNX) -force_html -dump -nolist $< > $@
-
-HTML.index: postgres.sgml $(ALMOSTALLSGML) stylesheet.dsl
- @$(MKDIR_P) html
- $(JADE.html.call) -V html-index $<
-
-bookindex.sgml: HTML.index
- LC_ALL=C $(PERL) $(COLLATEINDEX) -f -g -i 'bookindex' -o $@ $<
-
-# Technically, this should depend on Makefile.global, but then
-# version.sgml would need to be rebuilt after every configure run,
-# even in distribution tarballs. So this is cheating a bit, but it
-# will achieve the goal of updating the version number when it
-# changes.
-version.sgml: $(top_srcdir)/configure
- { \
- echo "<!ENTITY version \"$(VERSION)\">"; \
- echo "<!ENTITY majorversion \"$(MAJORVERSION)\">"; \
- } > $@
-
-features-supported.sgml: $(top_srcdir)/src/backend/catalog/sql_feature_packages.txt $(top_srcdir)/src/backend/catalog/sql_features.txt
- $(PERL) $(srcdir)/mk_feature_tables.pl YES $^ > $@
-
-features-unsupported.sgml: $(top_srcdir)/src/backend/catalog/sql_feature_packages.txt $(top_srcdir)/src/backend/catalog/sql_features.txt
- $(PERL) $(srcdir)/mk_feature_tables.pl NO $^ > $@
-
-errcodes-table.sgml: $(top_srcdir)/src/backend/utils/errcodes.txt generate-errcodes-table.pl
- $(PERL) $(srcdir)/generate-errcodes-table.pl $< > $@
-
-##
-## Print
-##
-
-
-# RTF to allow minor editing for hardcopy
-%.rtf: %.sgml $(ALLSGML)
- $(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -t rtf -V rtf-backend -i output-print -i include-index postgres.sgml
-
-# TeX
-# Regular TeX and pdfTeX have slightly differing requirements, so we
-# need to distinguish the path we're taking.
-
-JADE.tex.call = $(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d $(srcdir)/stylesheet.dsl -t tex -V tex-backend -i output-print -i include-index
-
-%-A4.tex-ps: %.sgml $(ALLSGML)
- $(JADE.tex.call) -V texdvi-output -V '%paper-type%'=A4 -o $@ $<
-
-%-US.tex-ps: %.sgml $(ALLSGML)
- $(JADE.tex.call) -V texdvi-output -V '%paper-type%'=USletter -o $@ $<
-
-%-A4.tex-pdf: %.sgml $(ALLSGML)
- $(JADE.tex.call) -V texpdf-output -V '%paper-type%'=A4 -o $@ $<
-
-%-US.tex-pdf: %.sgml $(ALLSGML)
- $(JADE.tex.call) -V texpdf-output -V '%paper-type%'=USletter -o $@ $<
-
-%.dvi: %.tex-ps
- @rm -f $*.aux $*.log
-# multiple runs are necessary to create proper intra-document links
- jadetex $<
- jadetex $<
- jadetex $<
-
-# PostScript from TeX
-postgres.ps:
- $(error Invalid target; use postgres-A4.ps or postgres-US.ps as targets)
-
-%.ps: %.dvi
- dvips -o $@ $<
-
-postgres.pdf:
- $(error Invalid target; use postgres-A4.pdf or postgres-US.pdf as targets)
-
-%.pdf: %.tex-pdf
- @rm -f $*.aux $*.log $*.out
-# multiple runs are necessary to create proper intra-document links
- pdfjadetex $<
- pdfjadetex $<
- pdfjadetex $<
-
-# Cancel built-in suffix rules, interfering with PS building
-.SUFFIXES:
-.SUFFIXES: .sgml .sgmlin
-
-INC_LIST = -I PG -I EN
-#INC_LIST = -I PGXC -I EN
-
-.sgmlin.sgml:
- $(MAKE) -C $(MAKESGMLDIR)
- $(MAKESGML) -i $< -o $@ $(INC_LIST) $(EXC_LIST)
-
-
-# This generates an XML version of the flow-object tree. It's useful
-# for debugging DSSSL code, and possibly to interface to some other
-# tools that can make use of this.
-%.fot: %.sgml $(ALLSGML)
- $(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -t fot -i output-print -i include-index -o $@ $<
-
-
-##
-## Semi-automatic generation of some text files.
-##
-
-JADE.text = $(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -i output-text -t sgml
-LYNX = lynx
-
-INSTALL HISTORY regress_README: % : %.html
- $(PERL) -p -e 's/<H(1|2)$$/<H\1 align=center/g' $< | $(LYNX) -force_html -dump -nolist -stdin > $@
-
-INSTALL.html: standalone-install.sgml installation.sgml version.sgml
- $(JADE.text) -V nochunks standalone-install.sgml installation.sgml > $@
-
-HISTORY.html: generate_history.pl $(wildcard $(srcdir)/release*.sgml)
- $(PERL) $< "$(srcdir)" release.sgml >tempfile_HISTORY.sgml
- $(JADE.text) -V nochunks tempfile_HISTORY.sgml > $@
- rm tempfile_HISTORY.sgml
-
-regress_README.html: regress.sgml
- ( echo '<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook V4.2//EN" ['; \
- echo '<!ENTITY % standalone-ignore "IGNORE">'; \
- echo '<!ENTITY % standalone-include "INCLUDE"> ]>'; \
- cat $< ) >tempfile_regress_README.sgml
- $(JADE.text) -V nochunks tempfile_regress_README.sgml > $@
- rm tempfile_regress_README.sgml
-
-
-##
-## XSLT processing
-##
-
-# For obscure reasons, gmake 3.81 complains about circular dependencies
-# if we try to do "make all" in a VPATH build without the explicit
-# $(srcdir) on the postgres.sgml dependency in this rule. gmake bug?
-postgres.xml: $(srcdir)/postgres.sgml $(ALMOSTALLSGML)
- $(OSX) -D. -x lower $< >postgres.xmltmp
- $(PERL) -p -e 's/\[(amp|copy|egrave|gt|lt|mdash|nbsp|ouml|pi|quot|uuml) *\]/\&\1;/g;' \
- -e '$$_ .= qq{<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">\n} if $$. == 1;' \
- <postgres.xmltmp > $@
- rm postgres.xmltmp
-# ' hello Emacs
-
-xslthtml: stylesheet.xsl postgres.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $^
-
-htmlhelp: stylesheet-hh.xsl postgres.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) $^
-
-%-A4.fo: stylesheet-fo.xsl %.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) --stringparam paper.type A4 -o $@ $^
-
-%-US.fo: stylesheet-fo.xsl %.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) --stringparam paper.type USletter -o $@ $^
-
-
-##
-## Experimental Texinfo targets
-##
-
-DB2X_TEXIXML = db2x_texixml
-DB2X_XSLTPROC = db2x_xsltproc
-MAKEINFO = makeinfo
-
-%.texixml: %.xml
- $(DB2X_XSLTPROC) -s texi -g output-file=$(basename $@) $< -o $@
-
-%.texi: %.texixml
- $(DB2X_TEXIXML) --encoding=iso-8859-1//TRANSLIT $< --to-stdout > $@
-
-%.info: %.texi
- $(MAKEINFO) --enable-encoding --no-split --no-validate $< -o $@
-
-
-##
-## Check
-##
-
-# Quick syntax check without style processing
-check maintainer-check: postgres.sgml $(ALMOSTALLSGML) check-tabs
- $(NSGMLS) $(SPFLAGS) $(SGMLINCLUDE) -s $<
-
-
-##
-## Install
-##
-
-install: install-html
-
-ifneq ($(PORTNAME), sco)
-install: install-man
-endif
-
-installdirs:
- $(MKDIR_P) '$(DESTDIR)$(htmldir)'/html $(addprefix '$(DESTDIR)$(mandir)'/man, 1 3 $(sqlmansectnum))
-
-uninstall:
- rm -f '$(DESTDIR)$(htmldir)/html/'* $(addprefix '$(DESTDIR)$(mandir)'/man, 1/* 3/* $(sqlmansectnum)/*)
-
-
-## Install html
-
-install-html: html installdirs
- cp -R $(call vpathsearch,html) '$(DESTDIR)$(htmldir)'
-
-
-## Install man
-
-install-man: man installdirs
-
-sqlmansect ?= 7
-sqlmansectnum = $(shell expr X'$(sqlmansect)' : X'\([0-9]\)')
-
-# Before we install the man pages, we massage the section numbers to
-# follow the local conventions.
-#
-ifeq ($(sqlmansectnum),7)
-install-man:
- cp -R $(foreach dir,man1 man3 man7,$(call vpathsearch,$(dir))) '$(DESTDIR)$(mandir)'
-
-else # sqlmansectnum != 7
-fix_sqlmansectnum = sed -e '/^\.TH/s/"7"/"$(sqlmansect)"/' \
- -e 's/\\fR(7)/\\fR($(sqlmansectnum))/g' \
- -e '1s/^\.so man7/.so man$(sqlmansectnum)/g;1s/^\(\.so.*\)\.7$$/\1.$(sqlmansect)/g'
-
-man: fixed-man-stamp
-
-fixed-man-stamp: man-stamp
- @$(MKDIR_P) $(addprefix fixedman/,man1 man3 man$(sqlmansectnum))
- for file in $(call vpathsearch,man1)/*.1; do $(fix_sqlmansectnum) $$file >fixedman/man1/`basename $$file` || exit; done
- for file in $(call vpathsearch,man3)/*.3; do $(fix_sqlmansectnum) $$file >fixedman/man3/`basename $$file` || exit; done
- for file in $(call vpathsearch,man7)/*.7; do $(fix_sqlmansectnum) $$file >fixedman/man$(sqlmansectnum)/`basename $$file | sed s/\.7$$/.$(sqlmansect)/` || exit; done
-
-install-man:
- cp -R $(foreach dir,man1 man3 man$(sqlmansectnum),fixedman/$(dir)) '$(DESTDIR)$(mandir)'
-
-clean: clean-man clean-sgml
-
-.PHONY: clean-man
-clean-man:
- rm -rf fixedman/ fixed-man-stamp
-
-.PHONY: clean-sgml:
-clean-sgml:
- rm -rf $(ALLSGML)
-
-endif # sqlmansectnum != 7
-
-# tabs are harmless, but it is best to avoid them in SGML files
-check-tabs:
- @( ! grep ' ' $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) ) || (echo "Tabs appear in SGML files"; exit 1)
-
-##
-## Clean
-##
-
-# This allows removing some files from the distribution tarballs while
-# keeping the dependencies satisfied.
-.SECONDARY: postgres.xml $(GENERATED_SGML) HTML.index
-.SECONDARY: INSTALL.html HISTORY.html regress_README.html
-.SECONDARY: %-A4.tex-ps %-US.tex-ps %-A4.tex-pdf %-US.tex-pdf
-
-clean:
-# text --- these are shipped, but not in this directory
- rm -f INSTALL HISTORY regress_README
- rm -f INSTALL.html HISTORY.html regress_README.html
-# single-page output
- rm -f postgres.html postgres.txt
-# print
- rm -f *.rtf *.tex-ps *.tex-pdf *.dvi *.aux *.log *.ps *.pdf *.out *.fot
-# index
- rm -f HTML.index $(GENERATED_SGML)
-# XSLT
- rm -f postgres.xml postgres.xmltmp htmlhelp.hhp toc.hhc index.hhk *.fo
-# Texinfo
- rm -f *.texixml *.texi *.info db2texi.refs
-# sgml
- rm -f $(ALLSGMLTOREMOVE)
-
-distclean: clean
-
-maintainer-clean: distclean
-# HTML
- rm -fr html/ html-stamp
-# man
- rm -rf man1/ man3/ man7/ man-stamp
-
-.PHONY: sgmlfiles
-
-INC_LIST = -I XL -I EN
-EXC_LIST = -E PG -E JP
-
-sgmlfiles:
- ./makesgmlfiles $(INC_LIST) $(EXC_LIST)
diff --git a/doc-xc/src/sgml/Makefile.xc.wk b/doc-xc/src/sgml/Makefile.xc.wk
deleted file mode 100644
index 5bf5fce8b9..0000000000
--- a/doc-xc/src/sgml/Makefile.xc.wk
+++ /dev/null
@@ -1,407 +0,0 @@
-#----------------------------------------------------------------------------
-#
-# PostgreSQL documentation makefile
-#
-# $PostgreSQL: pgsql/doc/src/sgml/Makefile,v 1.148 2010/06/12 21:40:31 tgl Exp $
-#
-#----------------------------------------------------------------------------
-
-# This makefile is for building and installing the documentation.
-# When a release tarball is created, the documentation files are
-# prepared using the distprep target. In Git-based trees these files
-# don't exist, unless explicitly built, so we skip the installation in
-# that case.
-
-
-# Make "html" the default target, since that is what most people tend
-# to want to use.
-html:
-
-subdir = doc/src/sgml
-top_builddir = ../../..
-include $(top_builddir)/src/Makefile.global
-
-
-all: html man
-
-distprep: html distprep-man
-
-
-ifndef COLLATEINDEX
-COLLATEINDEX = $(DOCBOOKSTYLE)/bin/collateindex.pl
-endif
-
-ifndef JADE
-JADE = jade
-endif
-SGMLINCLUDE = -D . -D $(srcdir)
-
-ifndef NSGMLS
-NSGMLS = nsgmls
-endif
-
-ifndef OSX
-OSX = osx
-endif
-
-ifndef XSLTPROC
-XSLTPROC = xsltproc
-endif
-
-override XSLTPROCFLAGS += --stringparam pg.version '$(VERSION)'
-
-
-GENERATED_SGML = bookindex.sgml version.sgml \
- features-supported.sgml features-unsupported.sgml
-
-ALLSGMLIN := $(wildcard $(srcdir)/*.sgmlin $(srcdir)/ref/*.sgmlin)
-
-#ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML)
-ALLSGML := $(ALLSGMLIN:%sgmlin=%sgml) $(GENERATED_SGML)
-
-ALLSGMLTOREMOVE := $(ALLSGMLIN:%sgmlin=%sgml)
-
-
-# Sometimes we don't want this one.
-ALMOSTALLSGML := $(filter-out %bookindex.sgml,$(ALLSGML))
-
-ifdef DOCBOOKSTYLE
-CATALOG = -c $(DOCBOOKSTYLE)/catalog
-endif
-
-# Enable some extra warnings
-# -wfully-tagged needed to throw a warning on missing tags
-# for older tool chains, 2007-08-31
-# Note: try "make SPFLAGS=-wxml" to catch a lot of other dubious constructs,
-# in particular < and & that haven't been made into entities. It's far too
-# noisy to turn on by default, unfortunately.
-override SPFLAGS += -wall -wno-unused-param -wno-empty -wfully-tagged
-
-##
-## Man pages
-##
-
-man distprep-man: man-stamp
-
-man-stamp: stylesheet-man.xsl postgres.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_MAN_FLAGS) $^
- touch $@
-
-
-##
-## HTML
-##
-
-.PHONY: draft
-
-JADE.html.call = $(JADE) $(JADEFLAGS) $(SPFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -t sgml -i output-html
-
-# The draft target creates HTML output in draft mode, without index (for faster build).
-draft: postgres.sgml $(ALMOSTALLSGML) stylesheet.dsl
- $(MKDIR_P) html
- $(JADE.html.call) -V draft-mode $<
- cp $(srcdir)/stylesheet.css html/
-
-html: html-stamp
-
-html-stamp: postgres.sgml $(ALLSGML) stylesheet.dsl
- $(MKDIR_P) html
- $(JADE.html.call) -i include-index $<
- cp $(srcdir)/stylesheet.css html/
- touch $@
-
-# single-page HTML
-postgres.html: postgres.sgml $(ALLSGML) stylesheet.dsl
- $(JADE.html.call) -V nochunks -V rootchunk -V '(define %root-filename% #f)' -V '(define use-output-dir #f)' -i include-index $<
-
-# single-page text
-postgres.txt: postgres.html
- $(LYNX) -force_html -dump -nolist $< > $@
-
-HTML.index: postgres.sgml $(ALMOSTALLSGML) stylesheet.dsl
- @$(MKDIR_P) html
- $(JADE.html.call) -V html-index $<
-
-bookindex.sgml: HTML.index
- LC_ALL=C $(PERL) $(COLLATEINDEX) -f -g -i 'bookindex' -o $@ $<
-
-# Technically, this should depend on Makefile.global, but then
-# version.sgml would need to be rebuilt after every configure run,
-# even in distribution tarballs. So this is cheating a bit, but it
-# will achieve the goal of updating the version number when it
-# changes.
-version.sgml: $(top_srcdir)/configure
- { \
- echo "<!entity version \"$(VERSION)\">"; \
- echo "<!entity majorversion \"$(MAJORVERSION)\">"; \
- } > $@
-
-features-supported.sgml: $(top_srcdir)/src/backend/catalog/sql_feature_packages.txt $(top_srcdir)/src/backend/catalog/sql_features.txt
- $(PERL) $(srcdir)/mk_feature_tables.pl YES $^ > $@
-
-features-unsupported.sgml: $(top_srcdir)/src/backend/catalog/sql_feature_packages.txt $(top_srcdir)/src/backend/catalog/sql_features.txt
- $(PERL) $(srcdir)/mk_feature_tables.pl NO $^ > $@
-
-
-##
-## Print
-##
-
-
-# RTF to allow minor editing for hardcopy
-%.rtf: %.sgml $(ALLSGML)
- $(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -t rtf -V rtf-backend -i output-print -i include-index postgres.sgml
-
-# TeX
-# Regular TeX and pdfTeX have slightly differing requirements, so we
-# need to distinguish the path we're taking.
-
-JADE.tex.call = $(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d $(srcdir)/stylesheet.dsl -t tex -V tex-backend -i output-print -i include-index
-
-%-A4.tex-ps: %.sgml $(ALLSGML)
- $(JADE.tex.call) -V texdvi-output -V '%paper-type%'=A4 -o $@ $<
-
-%-US.tex-ps: %.sgml $(ALLSGML)
- $(JADE.tex.call) -V texdvi-output -V '%paper-type%'=USletter -o $@ $<
-
-%-A4.tex-pdf: %.sgml $(ALLSGML)
- $(JADE.tex.call) -V texpdf-output -V '%paper-type%'=A4 -o $@ $<
-
-%-US.tex-pdf: %.sgml $(ALLSGML)
- $(JADE.tex.call) -V texpdf-output -V '%paper-type%'=USletter -o $@ $<
-
-%.dvi: %.tex-ps
- @rm -f $*.aux $*.log
-# multiple runs are necessary to create proper intra-document links
- jadetex $<
- jadetex $<
- jadetex $<
-
-# PostScript from TeX
-postgres.ps:
- $(error Invalid target; use postgres-A4.ps or postgres-US.ps as targets)
-
-%.ps: %.dvi
- dvips -o $@ $<
-
-postgres.pdf:
- $(error Invalid target; use postgres-A4.pdf or postgres-US.pdf as targets)
-
-%.pdf: %.tex-pdf
- @rm -f $*.aux $*.log $*.out
-# multiple runs are necessary to create proper intra-document links
- pdfjadetex $<
- pdfjadetex $<
- pdfjadetex $<
-
-# Cancel built-in suffix rules, interfering with PS building
-.SUFFIXES:
-.SUFFIXES: .sgml .sgmlin
-
-INC_LIST = -I PG -I EN
-#INC_LIST = -I PGXC -I EN
-
-.sgmlin.sgml:
- makesgml -i $< -o $@ $(INC_LIST) $(EXC_LIST)
-
-
-# This generates an XML version of the flow-object tree. It's useful
-# for debugging DSSSL code, and possibly to interface to some other
-# tools that can make use of this.
-%.fot: %.sgml $(ALLSGML)
- $(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -t fot -i output-print -i include-index -o $@ $<
-
-
-##
-## Semi-automatic generation of some text files.
-##
-
-JADE.text = $(JADE) $(JADEFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -i output-text -t sgml
-LYNX = lynx
-
-INSTALL HISTORY regress_README: % : %.html
- $(PERL) -p -e 's/<H(1|2)$$/<H\1 align=center/g' $< | $(LYNX) -force_html -dump -nolist -stdin > $@
-
-INSTALL.html: standalone-install.sgml installation.sgml version.sgml
- $(JADE.text) -V nochunks standalone-install.sgml installation.sgml > $@
-
-HISTORY.html: generate_history.pl $(wildcard $(srcdir)/release*.sgml)
- $(PERL) $< "$(srcdir)" release.sgml >tempfile_HISTORY.sgml
- $(JADE.text) -V nochunks tempfile_HISTORY.sgml > $@
- rm tempfile_HISTORY.sgml
-
-regress_README.html: regress.sgml
- ( echo '<!doctype chapter PUBLIC "-//OASIS//DTD DocBook V4.2//EN" ['; \
- echo '<!entity % standalone-ignore "IGNORE">'; \
- echo '<!entity % standalone-include "INCLUDE"> ]>'; \
- cat $< ) >tempfile_regress_README.sgml
- $(JADE.text) -V nochunks tempfile_regress_README.sgml > $@
- rm tempfile_regress_README.sgml
-
-
-##
-## XSLT processing
-##
-
-# For obscure reasons, gmake 3.81 complains about circular dependencies
-# if we try to do "make all" in a VPATH build without the explicit
-# $(srcdir) on the postgres.sgml dependency in this rule. gmake bug?
-postgres.xml: $(srcdir)/postgres.sgml $(ALMOSTALLSGML)
- $(OSX) -D. -x lower $< >postgres.xmltmp
- $(PERL) -p -e 's/\[(amp|copy|egrave|gt|lt|mdash|nbsp|ouml|pi|quot|uuml) *\]/\&\1;/g;' \
- -e '$$_ .= qq{<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">\n} if $$. == 1;' \
- <postgres.xmltmp > $@
- rm postgres.xmltmp
-# ' hello Emacs
-
-xslthtml: stylesheet.xsl postgres.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $^
-
-htmlhelp: stylesheet-hh.xsl postgres.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) $^
-
-%-A4.fo: stylesheet-fo.xsl %.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) --stringparam paper.type A4 -o $@ $^
-
-%-US.fo: stylesheet-fo.xsl %.xml
- $(XSLTPROC) $(XSLTPROCFLAGS) --stringparam paper.type USletter -o $@ $^
-
-
-##
-## Experimental Texinfo targets
-##
-
-DB2X_TEXIXML = db2x_texixml
-DB2X_XSLTPROC = db2x_xsltproc
-MAKEINFO = makeinfo
-
-%.texixml: %.xml
- $(DB2X_XSLTPROC) -s texi -g output-file=$(basename $@) $< -o $@
-
-%.texi: %.texixml
- $(DB2X_TEXIXML) --encoding=iso-8859-1//TRANSLIT $< --to-stdout > $@
-
-%.info: %.texi
- $(MAKEINFO) --enable-encoding --no-split --no-validate $< -o $@
-
-
-##
-## Check
-##
-
-# Quick syntax check without style processing
-check: postgres.sgml $(ALMOSTALLSGML) check-tabs
- $(NSGMLS) $(SPFLAGS) $(SGMLINCLUDE) -s $<
-
-
-##
-## Install
-##
-
-install: install-html
-
-ifneq ($(PORTNAME), sco)
-install: install-man
-endif
-
-installdirs:
- $(MKDIR_P) '$(DESTDIR)$(htmldir)'/html $(addprefix '$(DESTDIR)$(mandir)'/man, 1 3 $(sqlmansectnum))
-
-uninstall:
- rm -f '$(DESTDIR)$(htmldir)/html/'* $(addprefix '$(DESTDIR)$(mandir)'/man, 1/* 3/* $(sqlmansectnum)/*)
-
-
-## Install html
-
-install-html: html installdirs
- cp -R $(call vpathsearch,html) '$(DESTDIR)$(htmldir)'
-
-
-## Install man
-
-install-man: man installdirs
-
-sqlmansect ?= 7
-sqlmansectnum = $(shell expr X'$(sqlmansect)' : X'\([0-9]\)')
-
-# Before we install the man pages, we massage the section numbers to
-# follow the local conventions.
-#
-ifeq ($(sqlmansectnum),7)
-install-man:
- cp -R $(foreach dir,man1 man3 man7,$(call vpathsearch,$(dir))) '$(DESTDIR)$(mandir)'
-
-else # sqlmansectnum != 7
-fix_sqlmansectnum = sed -e '/^\.TH/s/"7"/"$(sqlmansect)"/' \
- -e 's/\\fR(7)/\\fR($(sqlmansectnum))/g' \
- -e '1s/^\.so man7/.so man$(sqlmansectnum)/g;1s/^\(\.so.*\)\.7$$/\1.$(sqlmansect)/g'
-
-man: fixed-man-stamp
-
-fixed-man-stamp: man-stamp
- @$(MKDIR_P) $(addprefix fixedman/,man1 man3 man$(sqlmansectnum))
- for file in $(call vpathsearch,man1)/*.1; do $(fix_sqlmansectnum) $$file >fixedman/man1/`basename $$file` || exit; done
- for file in $(call vpathsearch,man3)/*.3; do $(fix_sqlmansectnum) $$file >fixedman/man3/`basename $$file` || exit; done
- for file in $(call vpathsearch,man7)/*.7; do $(fix_sqlmansectnum) $$file >fixedman/man$(sqlmansectnum)/`basename $$file | sed s/\.7$$/.$(sqlmansect)/` || exit; done
-
-install-man:
- cp -R $(foreach dir,man1 man3 man$(sqlmansectnum),fixedman/$(dir)) '$(DESTDIR)$(mandir)'
-
-clean: clean-man clean-sgml
-
-.PHONY: clean-man
-clean-man:
- rm -rf fixedman/ fixed-man-stamp
-
-.PHONY: clean-sgml:
-clean-sgml:
- rm -rf $(ALLSGML)
-
-endif # sqlmansectnum != 7
-
-# tabs are harmless, but it is best to avoid them in SGML files
-check-tabs:
- @( ! grep ' ' $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) ) || (echo "Tabs appear in SGML files"; exit 1)
-
-##
-## Clean
-##
-
-# This allows removing some files from the distribution tarballs while
-# keeping the dependencies satisfied.
-.SECONDARY: postgres.xml $(GENERATED_SGML) HTML.index
-.SECONDARY: INSTALL.html HISTORY.html regress_README.html
-.SECONDARY: %-A4.tex-ps %-US.tex-ps %-A4.tex-pdf %-US.tex-pdf
-
-clean:
-# text --- these are shipped, but not in this directory
- rm -f INSTALL HISTORY regress_README
- rm -f INSTALL.html HISTORY.html regress_README.html
-# single-page output
- rm -f postgres.html postgres.txt
-# print
- rm -f *.rtf *.tex-ps *.tex-pdf *.dvi *.aux *.log *.ps *.pdf *.out *.fot
-# index
- rm -f HTML.index $(GENERATED_SGML)
-# XSLT
- rm -f postgres.xml postgres.xmltmp htmlhelp.hhp toc.hhc index.hhk *.fo
-# Texinfo
- rm -f *.texixml *.texi *.info db2texi.refs
-# sgml
- rm -f $(ALLSGMLTOREMOVE)
-
-distclean: clean
-
-maintainer-clean: distclean
-# HTML
- rm -fr html/ html-stamp
-# man
- rm -rf man1/ man3/ man7/ man-stamp
-
-.PHONY: sgmlfiles
-
-INC_LIST = -I XC -I EN
-EXC_LIST = -E PG -E JP
-
-sgmlfiles:
- ./makesgmlfiles $(INC_LIST) $(EXC_LIST)
diff --git a/doc-xc/src/sgml/README.links b/doc-xc/src/sgml/README.links
deleted file mode 100644
index 2668b00d7f..0000000000
--- a/doc-xc/src/sgml/README.links
+++ /dev/null
@@ -1,45 +0,0 @@
-<!-- doc/src/sgml/README.links -->
-
-Linking within SGML documents can be confusing, so here is a summary:
-
-
-Intra-document Linking
-----------------------
-
-<xref>
- use to get chapter/section # from the title of the target
- link, or xreflabel if defined at the target; has no close tag
- http://www.oasis-open.org/docbook/documentation/reference/html/xref.html
-
-<link>
- use to supply text for the link, requires </link>
- http://www.oasis-open.org/docbook/documentation/reference/html/link.html
-
-linkend=
- controls the target of the link/xref, required
-
-endterm=
- for <xref>, allows the text of the link/xref to be taken from a
- different link target title
-
-
-External Linking
-----------------
-
-<ulink>
- like <link>, but uses a URL (not a document target); requires
- </ulink>; if no text is specified, the URL appears as the link
- text
- http://www.oasis-open.org/docbook/documentation/reference/html/ulink.html
-
-url=
- used by <ulink> to specify the URL, required
-
-
-Guidelines
-----------
-
-o If you want to supply text, use <link>, else <xref>
-o Do not use text with <ulink> so the URL appears in printed output
-o Specific nouns like GUC variables, SQL commands, and contrib modules
- usually have xreflabels
diff --git a/doc-xc/src/sgml/acronyms.sgmlin b/doc-xc/src/sgml/acronyms.sgmlin
deleted file mode 100644
index f61d477b35..0000000000
--- a/doc-xc/src/sgml/acronyms.sgmlin
+++ /dev/null
@@ -1,832 +0,0 @@
-<!-- doc/src/sgml/acronyms.sgml -->
-
-<appendix id="acronyms">
- <title>Acronyms</title>
-
- <para>
-<!## PG>
- This is a list of acronyms commonly used in the <productname>PostgreSQL</>
- documentation and in discussions about <productname>PostgreSQL</>.
-<!## end>
-<!## XC>
- This is a list of acronyms commonly used in the <productname>Postgres-XC</>
- documentation and in discussions about <productname>Postgres-XC</>.
-<!## end>
-<!## XL>
- This is a list of acronyms commonly used in the <productname>Postgres-XL</>
- documentation and in discussions about <productname>Postgres-XL</>.
-<!## end>
-
- <variablelist>
-
- <varlistentry>
- <term><acronym>ANSI</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/American_National_Standards_Institute">
- American National Standards Institute</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>API</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/API">Application Programming Interface</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>ASCII</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Ascii">American Standard
- Code for Information Interchange</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>BKI</acronym></term>
- <listitem>
- <para>
- <link linkend="bki">Backend Interface</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>CA</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Certificate_authority">Certificate Authority</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>CIDR</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing">Classless
- Inter-Domain Routing</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>CPAN</acronym></term>
- <listitem>
- <para>
- <ulink url="http://www.cpan.org/">Comprehensive Perl Archive Network</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>CRL</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Certificate_revocation_list">Certificate
- Revocation List</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>CSV</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Comma-separated_values">Comma
- Separated Values</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>CTE</acronym></term>
- <listitem>
- <para>
- <link linkend="queries-with">Common Table Expression</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>CVE</acronym></term>
- <listitem>
- <para>
- <ulink url="http://cve.mitre.org/">Common Vulnerabilities and Exposures</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>DBA</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Database_administrator">Database
- Administrator</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>DBI</acronym></term>
- <listitem>
- <para>
- <ulink url="http://dbi.perl.org/">Database Interface (Perl)</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>DBMS</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Dbms">Database Management
- System</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>DDL</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Data_Definition_Language">Data
- Definition Language</ulink>, SQL commands such as <command>CREATE
- TABLE</>, <command>ALTER USER</>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>DML</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Data_Manipulation_Language">Data
- Manipulation Language</ulink>, SQL commands such as <command>INSERT</>,
- <command>UPDATE</>, <command>DELETE</>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>DST</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Daylight_saving_time">Daylight
- Saving Time</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>ECPG</acronym></term>
- <listitem>
- <para>
-<!## PG>
- <link linkend="ecpg">Embedded C for PostgreSQL</link>
-<!## end>
-<!## XC>
- <link linkend="ecpg">Embedded C for Postgres-XC</link>
-<!## end>
-<!## XL>
- <link linkend="ecpg">Embedded C for Postgres-XL</link>
-<!## end>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>ESQL</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Embedded_SQL">Embedded
- SQL</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>FAQ</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/FAQ">Frequently Asked
- Questions</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>FSM</acronym></term>
- <listitem>
- <para>
- <link linkend="storage-fsm">Free Space Map</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>GEQO</acronym></term>
- <listitem>
- <para>
- <link linkend="geqo">Genetic Query Optimizer</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>GIN</acronym></term>
- <listitem>
- <para>
- <link linkend="GIN">Generalized Inverted Index</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>GiST</acronym></term>
- <listitem>
- <para>
- <link linkend="GiST">Generalized Search Tree</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>Git</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Git_(software)">Git</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>GMT</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/GMT">Greenwich Mean Time</ulink>
- </para>
- </listitem>
- </varlistentry>
-
-<!## XC>
- <varlistentry>
- <term><acronym>GTM</acronym></term>
- <listitem>
- <para>
- Global Transaction Manager
- </para>
- </listitem>
- </varlistentry>
-<!## end>
-<!## XL>
- <varlistentry>
- <term><acronym>GTM</acronym></term>
- <listitem>
- <para>
- Global Transaction Manager
- </para>
- </listitem>
- </varlistentry>
-<!## end>
-
- <varlistentry>
- <term><acronym>GSSAPI</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Generic_Security_Services_Application_Program_Interface">Generic
- Security Services Application Programming Interface</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>GUC</acronym></term>
- <listitem>
- <para>
- <link linkend="config-setting">Grand Unified Configuration</link>,
-<!## PG>
- the <productname>PostgreSQL</> subsystem that handles server configuration
-<!## end>
-<!## XC>
- the <productname>Postgres-XC</> subsystem that handles server configuration
-<!## end>
-<!## XL>
- the <productname>Postgres-XL</> subsystem that handles server configuration
-<!## end>
- </para>
- </listitem>
- </varlistentry>
-
-<!## XC>
- <varlistentry>
- <term><acronym>GXID</acronym></term>
- <listitem>
- <para>
- Global Transaction Identifier
- </para>
- </listitem>
- </varlistentry>
-<!## end>
-<!## XL>
- <varlistentry>
- <term><acronym>GXID</acronym></term>
- <listitem>
- <para>
- Global Transaction Identifier
- </para>
- </listitem>
- </varlistentry>
-<!## end>
-
- <varlistentry>
- <term><acronym>HBA</acronym></term>
- <listitem>
- <para>
- <link linkend="auth-pg-hba-conf">Host-Based Authentication</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>HOT</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://git.postgresql.org/gitweb?p=postgresql.git;a=blob;f=src/backend/access/heap/README.HOT;hb=HEAD">Heap-Only
- Tuples</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>IEC</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/International_Electrotechnical_Commission">International
- Electrotechnical Commission</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>IEEE</acronym></term>
- <listitem>
- <para>
- <ulink url="http://standards.ieee.org/">Institute of Electrical and
- Electronics Engineers</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>IPC</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Inter-process_communication">Inter-Process
- Communication</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>ISO</acronym></term>
- <listitem>
- <para>
- <ulink url="http://www.iso.org/iso/home.htm">International Organization for
- Standardization</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>ISSN</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Issn">International Standard
- Serial Number</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>JDBC</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Java_Database_Connectivity">Java
- Database Connectivity</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>LDAP</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol">Lightweight
- Directory Access Protocol</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>MSVC</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Visual_C++"><productname>Microsoft
- Visual C</productname></ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>MVCC</acronym></term>
- <listitem>
- <para>
- <link linkend="mvcc">Multi-Version Concurrency Control</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>NLS</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Internationalization_and_localization">National
- Language Support</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>ODBC</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Open_Database_Connectivity">Open
- Database Connectivity</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>OID</acronym></term>
- <listitem>
- <para>
- <link linkend="datatype-oid">Object Identifier</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>OLAP</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Olap">Online Analytical
- Processing</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>OLTP</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/OLTP">Online Transaction
- Processing</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>ORDBMS</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/ORDBMS">Object-Relational
- Database Management System</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>PAM</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Pluggable_Authentication_Modules">Pluggable
- Authentication Modules</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>PGSQL</acronym></term>
- <listitem>
- <para>
-<!## PG>
- <link linkend="postgres"><productname>PostgreSQL</></link>
-<!## end>
-<!## XC>
- <link linkend="postgres"><productname>Postgres-XC</></link>
-<!## end>
-<!## XL>
- <link linkend="postgres"><productname>Postgres-XL</></link>
-<!## end>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>PGXS</acronym></term>
- <listitem>
- <para>
-<!## PG>
- <link linkend="extend-pgxs"><productname>PostgreSQL</> Extension System</link>
-<!## end>
-<!## XC>
- <link linkend="extend-pgxs"><productname>Postgres-XC</> Extension System</link>
-<!## end>
-<!## XL>
- <link linkend="extend-pgxs"><productname>Postgres-XL</> Extension System</link>
-<!## end>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>PID</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Process_identifier">Process Identifier</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>PITR</acronym></term>
- <listitem>
- <para>
- <link linkend="continuous-archiving">Point-In-Time
- Recovery</link> (Continuous Archiving)
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>PL</acronym></term>
- <listitem>
- <para>
- <link linkend="server-programming">Procedural Languages (server-side)</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>POSIX</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/POSIX">Portable Operating
- System Interface</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>RDBMS</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Relational_database_management_system">Relational
- Database Management System</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>RFC</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Request_for_Comments">Request For
- Comments</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>SGML</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/SGML">Standard Generalized
- Markup Language</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>SPI</acronym></term>
- <listitem>
- <para>
- <link linkend="spi">Server Programming Interface</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>SQL</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/SQL">Structured Query Language</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>SRF</acronym></term>
- <listitem>
- <para>
- <link linkend="xfunc-c-return-set">Set-Returning Function</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>SSH</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Secure_Shell">Secure
- Shell</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>SSL</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Secure_Sockets_Layer">Secure Sockets Layer</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>SSPI</acronym></term>
- <listitem>
- <para>
- <ulink url="http://msdn.microsoft.com/en-us/library/aa380493%28VS.85%29.aspx">Security
- Support Provider Interface</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>SYSV</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/System_V">Unix System V</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>TCP/IP</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Transmission_Control_Protocol">Transmission
- Control Protocol (TCP) / Internet Protocol (IP)</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>TID</acronym></term>
- <listitem>
- <para>
- <link linkend="datatype-oid">Tuple Identifier</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>TOAST</acronym></term>
- <listitem>
- <para>
- <link linkend="storage-toast">The Oversized-Attribute Storage Technique</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>TPC</acronym></term>
- <listitem>
- <para>
- <ulink url="http://www.tpc.org/">Transaction Processing
- Performance Council</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>URL</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/URL">Uniform Resource
- Locator</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>UTC</acronym></term>
- <listitem>
- <para>
- <ulink
- url="http://en.wikipedia.org/wiki/Coordinated_Universal_Time">Coordinated
- Universal Time</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>UTF</acronym></term>
- <listitem>
- <para>
- <ulink url="http://www.unicode.org/">Unicode Transformation
- Format</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>UTF8</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/Utf8">Eight-Bit Unicode
- Transformation Format</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>UUID</acronym></term>
- <listitem>
- <para>
- <link linkend="datatype-uuid">Universally Unique Identifier</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>WAL</acronym></term>
- <listitem>
- <para>
- <link linkend="wal">Write-Ahead Log</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>XID</acronym></term>
- <listitem>
- <para>
- <link linkend="datatype-oid">Transaction Identifier</link>
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><acronym>XML</acronym></term>
- <listitem>
- <para>
- <ulink url="http://en.wikipedia.org/wiki/XML">Extensible Markup
- Language</ulink>
- </para>
- </listitem>
- </varlistentry>
-
- </variablelist>
- </para>
-
-</appendix>
diff --git a/doc-xc/src/sgml/add-node.sgmlin b/doc-xc/src/sgml/add-node.sgmlin
deleted file mode 100644
index b8bf1816f4..0000000000
--- a/doc-xc/src/sgml/add-node.sgmlin
+++ /dev/null
@@ -1,273 +0,0 @@
-<!-- doc/src/sgml/add-node.sgml -->
-
-<chapter id="add-node">
- <title>Adding a New Node</title>
-
- <indexterm zone="add-node">
- <primary>Add a new node</primary>
- </indexterm>
-
-&xlonly;
-
- <para>
- This chapter outlines steps to add a new Coordinator or a Datanode to a running cluster.
- Note that an easier way to do this is to make use of the pgxc_ctl utility.
- </para>
-
- <para>
-
- </para>
-
- <sect1 id="add-node-coordinator">
- <title>Adding a New Coordinator</title>
-
- <indexterm zone="add-node-coordinator">
- <primary>Add a new coordinator</primary>
- </indexterm>
-
- <para>
- The following steps should be performed to add a new coordinator to a running cluster:
- </para>
-
- <para>
- <orderedlist>
- <listitem>
- <para>Initialize the new coordinator. The following example initilizes a coordinator named coord_3.</para>
- <programlisting>
- /usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data_cord3 --nodename coord_3
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Make necessary changes in postgresql.conf of the new coordinator,
- in particular specify new coordinator name and pooler port.
- </para>
- </listitem>
-
- <listitem>
- <para>
- Connect to any of the existing coordinators and lock the cluster for backup, do not close this session.
- The following example assumes a coordinator is running on port 5432. Make sure the function call returns true.
- The detailed description of the function <function>pgxc_lock_for_backup</> can be found
- in <xref linkend="functions-pgxc-add-new-node">
- </para>
- <programlisting>
- ./psql postgres -p 5432
- select pgxc_lock_for_backup();
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Connect to any of the existing coordinators and take backup of the database.
- Please note that only schema (i.e. no data) is to be dumped.
- Also note the use of <option>--include-nodes</>, so that the <command>CREATE TABLE</> contains <command>TO NODE</> clause.
- Similarly <option>--dump-nodes</> ensures that the dump does contain existing nodes and node groups.
- </para>
- <programlisting>
- ./pg_dumpall -p 5432 -s --include-nodes --dump-nodes --file=/some/valid/path/some_file_name.sql
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Start the new coordinator specifying <option>--restoremode</> while starting.
- The following example starts the new coordinator on port 5455
- </para>
- <programlisting>
- ./postgres --restoremode -D ../data_cord3 -p 5455
- </programlisting>
- <para>
- You can use <literal>pg_ctl</literal> with <option>-Z restoremode</option> option.
- </para>
- <programlisting>
- ./pg_ctl start -Z restoremode -D ../data_coord3 -p 5455
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Restore the backup (taken in step 4) by connecting to the new coordinator directly.
- </para>
- <programlisting>
- ./psql -d postgres -f /some/valid/path/some_file_name.sql -p 5455
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Quit the new coordinator.
- </para>
- </listitem>
-
- <listitem>
- <para>
- Start the new coordinator specifying <option>--coordinator</> while starting.
- The following example starts the new coordinator on port 5455
- </para>
- <programlisting>
- ./postgres --coordinator -D ../data_cord3 -p 5455
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Create the new coordinator on rest of the coordinators and reload configuration.
- The following example creates coord_3, with host localhost and port 5455.
- </para>
- <programlisting>
- CREATE NODE COORD_3 WITH (HOST = 'localhost', type = 'coordinator', PORT = 5455);
- SELECT pgxc_pool_reload();
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Quit the session of step 3, this will unlock the cluster. The new coordinator is now ready.
- </para>
- </listitem>
-
- </orderedlist>
- </para>
-
- </sect1>
-
- <sect1 id="add-node-datanode">
- <title>Adding a New Datanode</title>
-
- <indexterm zone="add-node-datanode">
- <primary>Add a new Datanode</primary>
- </indexterm>
-
- <para>
- Following steps should be performed to add a new datanode to a running cluster:
- </para>
-
- <para>
- <orderedlist>
-
- <listitem>
- <para>
- Initialize the new datanode. The following example initializes a new datanode named data_node_3.
- </para>
- <programlisting>
- /usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data3 --nodename data_node_3
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Make the necessary changes in postgresql.conf of the new datanode, in particular specify new datanode name
- </para>
- </listitem>
-
- <listitem>
- <para>
- Connect to any of the existing coordinators and lock the cluster for backup, do not close this session.
- The following example assumes a coordinator is running on port 5432. Make sure the function call returns true.
- The detailed description of the function <function>pgxc_lock_for_backup</> can be found
- in <xref linkend="functions-pgxc-add-new-node">
- </para>
- <programlisting>
- ./psql postgres -p 5432
- select pgxc_lock_for_backup();
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Connect to any of the existing datanodes and take backup of the database.
- Please note that only schema (i.e. no data) is to be dumped.
- The following example assumes that a datanode is running on port 15432.
- </para>
- <programlisting>
- ./pg_dumpall -p 15432 -s --file=/some/valid/path/some_file_name.sql
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Start the new datanode specifying <option>--restoremode</> while starting the it.
- The following example starts the new datanode on port 35432.
- </para>
- <programlisting>
- ./postgres --restoremode -D ../data3 -p 35432
- </programlisting>
- <para>
- You can use <literal>pg_ctl</literal> with <option>-Z restoremode</option> option.
- </para>
- <programlisting>
- ./pg_ctl start -Z restoremode -D ../data3 -p 5455
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Restore the backup (taken in step 4) by connecting to the new datanode directly.
- </para>
- <programlisting>
- ./psql -d postgres -f /some/valid/path/some_file_name.sql -p 35432
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Quit the new datanode.
- </para>
- </listitem>
-
- <listitem>
- <para>
- Start the new datanode specifying --datanode while starting.
- </para>
- <programlisting>
- ./postgres --datanode -D ../data3 -p 35432
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Create the new datanode on all the coordinators and reload configuration.
- The following example creates data_node_3, with host localhost and port 35432.
- </para>
- <programlisting>
- CREATE NODE DATA_NODE_3 WITH (HOST = 'localhost', type = 'datanode', PORT = 35432);
- SELECT pgxc_pool_reload();
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Create the new datanode on all the other datanodes too and reload configuration.
- The following example creates data_node_3, with host localhost and port 35432.
- </para>
- <programlisting>
- EXECUTE DIRECT ON (DATA_NODE_1) 'CREATE NODE DATA_NODE_3 WITH (HOST = ''localhost'', type = ''datanode'', PORT = 35432)';
- EXECUTE DIRECT ON (DATA_NODE_2) 'CREATE NODE DATA_NODE_3 WITH (HOST = ''localhost'', type = ''datanode'', PORT = 35432)';
- EXECUTE DIRECT ON (DATA_NODE_3) 'ALTER NODE DATA_NODE_3 WITH (HOST = ''localhost'', type = ''datanode'', PORT = 35432)';
- EXECUTE DIRECT ON (DATA_NODE_1) 'SELECT pgxc_pool_reload()';
- EXECUTE DIRECT ON (DATA_NODE_2) 'SELECT pgxc_pool_reload()';
- EXECUTE DIRECT ON (DATA_NODE_3) 'SELECT pgxc_pool_reload()';
- </programlisting>
- </listitem>
-
- <listitem>
- <para>
- Quit the session of step 3, this will unlock the cluster.
- </para>
- </listitem>
-
- <listitem>
- <para> The new datanode is now ready.
- Redistribute existing data by using <command>ALTER TABLE
-<replaceable>my_table</replaceable> ADD NODE (DATA_NODE_3)</>.
- </para>
- </listitem>
-
- </orderedlist>
- </para>
-
- </sect1>
-
-</chapter>
diff --git a/doc-xc/src/sgml/adminpack.sgmlin b/doc-xc/src/sgml/adminpack.sgmlin
deleted file mode 100644
index fef740a815..0000000000
--- a/doc-xc/src/sgml/adminpack.sgmlin
+++ /dev/null
@@ -1,47 +0,0 @@
-<!-- doc/src/sgml/adminpack.sgml -->
-
-<sect1 id="adminpack" xreflabel="adminpack">
- <title>adminpack</title>
-
- <indexterm zone="adminpack">
- <primary>adminpack</primary>
- </indexterm>
-&common;
- <para>
- <filename>adminpack</> provides a number of support functions which
- <application>pgAdmin</> and other administration and management tools can
- use to provide additional functionality, such as remote management
- of server log files.
- </para>
-
- <sect2>
- <title>Functions Implemented</title>
-
-&common;
- <para>
- The functions implemented by <filename>adminpack</> can only be run by a
- superuser. Here's a list of these functions:
-
-<programlisting>
-int8 pg_catalog.pg_file_write(fname text, data text, append bool)
-bool pg_catalog.pg_file_rename(oldname text, newname text, archivename text)
-bool pg_catalog.pg_file_rename(oldname text, newname text)
-bool pg_catalog.pg_file_unlink(fname text)
-setof record pg_catalog.pg_logdir_ls()
-
-/* Renaming of existing backend functions for pgAdmin compatibility */
-int8 pg_catalog.pg_file_read(fname text, data text, append bool)
-bigint pg_catalog.pg_file_length(text)
-int4 pg_catalog.pg_logfile_rotate()
-</programlisting>
- </para>
-
- <note>
- <para>
- Functions of this module run only on the Coordinator you're connecting.
- </para>
- </note>
-
- </sect2>
-
-</sect1>
diff --git a/doc-xc/src/sgml/advanced.sgmlin b/doc-xc/src/sgml/advanced.sgmlin
deleted file mode 100644
index daae1dc8b2..0000000000
--- a/doc-xc/src/sgml/advanced.sgmlin
+++ /dev/null
@@ -1,929 +0,0 @@
-<!-- doc/src/sgml/advanced.sgml -->
-
- <chapter id="tutorial-advanced">
- <title>Advanced Features</title>
-
- <sect1 id="tutorial-advanced-intro">
- <title>Introduction</title>
-
-&common;
- <para>
-<!## PG>
- In the previous chapter we have covered the basics of using
- <acronym>SQL</acronym> to store and access your data in
- <productname>PostgreSQL</productname>. We will now discuss some
- more advanced features of <acronym>SQL</acronym> that simplify
- management and prevent loss or corruption of your data. Finally,
- we will look at some <productname>PostgreSQL</productname>
- extensions.
-<!## end>
-<!## XC>
- In the previous chapter we have covered the basics of using
- <acronym>SQL</acronym> to store and access your data in
- <productname>Postgres-XC</productname>. We will now discuss some
- more advanced features of <acronym>SQL</acronym> that simplify
- management and prevent loss or corruption of your data. Finally,
- we will look at some <productname>Postgres-XC</productname>
- extensions.
-<!## end>
-<!## XL>
- In the previous chapter we have covered the basics of using
- <acronym>SQL</acronym> to store and access your data in
- <productname>Postgres-XL</productname>. We will now discuss some
- more advanced features of <acronym>SQL</acronym> that simplify
- management and prevent loss or corruption of your data. Finally,
- we will look at some <productname>Postgres-XL</productname>
- extensions.
-<!## end>
- </para>
-
- <para>
- This chapter will on occasion refer to examples found in <xref
- linkend="tutorial-sql"> to change or improve them, so it will be
- useful to have read that chapter. Some examples from
- this chapter can also be found in
- <filename>advanced.sql</filename> in the tutorial directory. This
- file also contains some sample data to load, which is not
- repeated here. (Refer to <xref linkend="tutorial-sql-intro"> for
- how to use the file.)
- </para>
- </sect1>
-
-
- <sect1 id="tutorial-views">
- <title>Views</title>
-
- <indexterm zone="tutorial-views">
- <primary>view</primary>
- </indexterm>
-&common;
- <para>
- Refer back to the queries in <xref linkend="tutorial-join">.
- Suppose the combined listing of weather records and city location
- is of particular interest to your application, but you do not want
- to type the query each time you need it. You can create a
- <firstterm>view</firstterm> over the query, which gives a name to
- the query that you can refer to like an ordinary table:
-
-<programlisting>
-CREATE VIEW myview AS
- SELECT city, temp_lo, temp_hi, prcp, date, location
- FROM weather, cities
- WHERE city = name;
-
-SELECT * FROM myview;
-</programlisting>
- </para>
-
- <para>
- Making liberal use of views is a key aspect of good SQL database
- design. Views allow you to encapsulate the details of the
- structure of your tables, which might change as your application
- evolves, behind consistent interfaces.
- </para>
-
- <para>
- Views can be used in almost any place a real table can be used.
- Building views upon other views is not uncommon.
- </para>
- </sect1>
-
-
- <sect1 id="tutorial-fk">
- <title>Foreign Keys</title>
-
- <indexterm zone="tutorial-fk">
- <primary>foreign key</primary>
- </indexterm>
-
- <indexterm zone="tutorial-fk">
- <primary>referential integrity</primary>
- </indexterm>
-&common;
- <para>
- Recall the <classname>weather</classname> and
- <classname>cities</classname> tables from <xref
- linkend="tutorial-sql">. Consider the following problem: You
- want to make sure that no one can insert rows in the
- <classname>weather</classname> table that do not have a matching
- entry in the <classname>cities</classname> table. This is called
- maintaining the <firstterm>referential integrity</firstterm> of
- your data. In simplistic database systems this would be
- implemented (if at all) by first looking at the
- <classname>cities</classname> table to check if a matching record
- exists, and then inserting or rejecting the new
- <classname>weather</classname> records. This approach has a
- number of problems and is very inconvenient, so
-<!## PG>
- <productname>PostgreSQL</productname> can do this for you.
-<!## end>
-<!## XC>
- <productname>Postgres-XC</productname> can do this for you.
-<!## end>
-<!## XL>
- <productname>Postgres-XL</productname> can do this for you.
-<!## end>
- </para>
-
- <para>
- The new declaration of the tables would look like this:
-
-<programlisting>
-CREATE TABLE cities (
- city varchar(80) primary key,
- location point
-);
-
-CREATE TABLE weather (
- city varchar(80) references cities(city),
- temp_lo int,
- temp_hi int,
- prcp real,
- date date
-);
-</programlisting>
-
- Now try inserting an invalid record:
-
-<programlisting>
-INSERT INTO weather VALUES ('Berkeley', 45, 53, 0.0, '1994-11-28');
-</programlisting>
-
-<screen>
-ERROR: insert or update on table "weather" violates foreign key constraint "weather_city_fkey"
-DETAIL: Key (city)=(Berkeley) is not present in table "cities".
-</screen>
- </para>
-
- <para>
- The behavior of foreign keys can be finely tuned to your
- application. We will not go beyond this simple example in this
- tutorial, but just refer you to <xref linkend="ddl">
- for more information. Making correct use of
- foreign keys will definitely improve the quality of your database
- applications, so you are strongly encouraged to learn about them.
- </para>
-
-<!## XC>
-&xconly;
- <para>
- Please note that primary key and reference key are both allowed
- only when these columns are distribution keys when tables are
- distributed. As a default, <productname>Postgres-XC</>
- distributes each row of tables based upon the value of the first
- column of the table. You can choose any column as a basis of
- table distribution, or you can have copies of a table in all the
- Datanodes.
- </para>
- <para>
- Please refer to <xref linkend="sql-select"> for details.
- </para>
-
- </sect1>
-
-
- <sect1 id="tutorial-transactions">
- <title>Transactions</title>
-
- <indexterm zone="tutorial-transactions">
- <primary>transaction</primary>
- </indexterm>
-
-&common;
- <para>
- <firstterm>Transactions</> are a fundamental concept of all database
- systems. The essential point of a transaction is that it bundles
- multiple steps into a single, all-or-nothing operation. The intermediate
- states between the steps are not visible to other concurrent transactions,
- and if some failure occurs that prevents the transaction from completing,
- then none of the steps affect the database at all.
- </para>
-
- <para>
- For example, consider a bank database that contains balances for various
- customer accounts, as well as total deposit balances for branches.
- Suppose that we want to record a payment of $100.00 from Alice's account
- to Bob's account. Simplifying outrageously, the SQL commands for this
- might look like:
-
-<programlisting>
-UPDATE accounts SET balance = balance - 100.00
- WHERE name = 'Alice';
-UPDATE branches SET balance = balance - 100.00
- WHERE name = (SELECT branch_name FROM accounts WHERE name = 'Alice');
-UPDATE accounts SET balance = balance + 100.00
- WHERE name = 'Bob';
-UPDATE branches SET balance = balance + 100.00
- WHERE name = (SELECT branch_name FROM accounts WHERE name = 'Bob');
-</programlisting>
- </para>
-
- <para>
- The details of these commands are not important here; the important
- point is that there are several separate updates involved to accomplish
- this rather simple operation. Our bank's officers will want to be
- assured that either all these updates happen, or none of them happen.
- It would certainly not do for a system failure to result in Bob
- receiving $100.00 that was not debited from Alice. Nor would Alice long
- remain a happy customer if she was debited without Bob being credited.
- We need a guarantee that if something goes wrong partway through the
- operation, none of the steps executed so far will take effect. Grouping
- the updates into a <firstterm>transaction</> gives us this guarantee.
- A transaction is said to be <firstterm>atomic</>: from the point of
- view of other transactions, it either happens completely or not at all.
- </para>
-
- <para>
- We also want a
- guarantee that once a transaction is completed and acknowledged by
- the database system, it has indeed been permanently recorded
- and won't be lost even if a crash ensues shortly thereafter.
- For example, if we are recording a cash withdrawal by Bob,
- we do not want any chance that the debit to his account will
- disappear in a crash just after he walks out the bank door.
- A transactional database guarantees that all the updates made by
- a transaction are logged in permanent storage (i.e., on disk) before
- the transaction is reported complete.
- </para>
-
- <para>
- Another important property of transactional databases is closely
- related to the notion of atomic updates: when multiple transactions
- are running concurrently, each one should not be able to see the
- incomplete changes made by others. For example, if one transaction
- is busy totalling all the branch balances, it would not do for it
- to include the debit from Alice's branch but not the credit to
- Bob's branch, nor vice versa. So transactions must be all-or-nothing
- not only in terms of their permanent effect on the database, but
- also in terms of their visibility as they happen. The updates made
- so far by an open transaction are invisible to other transactions
- until the transaction completes, whereupon all the updates become
- visible simultaneously.
- </para>
-
- <para>
-<!## end>
-<!## XL>
-&xlonly;
- <para>
- Please note that primary key and reference key are both allowed
- only when these columns are distribution keys when tables are
- distributed. As a default, <productname>Postgres-XL</>
- distributes each row of tables based upon the value of the first
- column of the table. You can choose any column as a basis of
- table distribution, or you can have copies of a table in all the
- Datanodes by specifying that it should be distributed by replication.
- </para>
- <para>
- Please refer to <xref linkend="sql-select"> for details.
- </para>
-
- </sect1>
-
-
- <sect1 id="tutorial-transactions">
- <title>Transactions</title>
-
- <indexterm zone="tutorial-transactions">
- <primary>transaction</primary>
- </indexterm>
-
-&common;
- <para>
- <firstterm>Transactions</> are a fundamental concept of all database
- systems. The essential point of a transaction is that it bundles
- multiple steps into a single, all-or-nothing operation. The intermediate
- states between the steps are not visible to other concurrent transactions,
- and if some failure occurs that prevents the transaction from completing,
- then none of the steps affect the database at all.
- </para>
-
- <para>
- For example, consider a bank database that contains balances for various
- customer accounts, as well as total deposit balances for branches.
- Suppose that we want to record a payment of $100.00 from Alice's account
- to Bob's account. Simplifying outrageously, the SQL commands for this
- might look like:
-
-<programlisting>
-UPDATE accounts SET balance = balance - 100.00
- WHERE name = 'Alice';
-UPDATE branches SET balance = balance - 100.00
- WHERE name = (SELECT branch_name FROM accounts WHERE name = 'Alice');
-UPDATE accounts SET balance = balance + 100.00
- WHERE name = 'Bob';
-UPDATE branches SET balance = balance + 100.00
- WHERE name = (SELECT branch_name FROM accounts WHERE name = 'Bob');
-</programlisting>
- </para>
-
- <para>
- The details of these commands are not important here; the important
- point is that there are several separate updates involved to accomplish
- this rather simple operation. Our bank's officers will want to be
- assured that either all these updates happen, or none of them happen.
- It would certainly not do for a system failure to result in Bob
- receiving $100.00 that was not debited from Alice. Nor would Alice long
- remain a happy customer if she was debited without Bob being credited.
- We need a guarantee that if something goes wrong partway through the
- operation, none of the steps executed so far will take effect. Grouping
- the updates into a <firstterm>transaction</> gives us this guarantee.
- A transaction is said to be <firstterm>atomic</>: from the point of
- view of other transactions, it either happens completely or not at all.
- </para>
-
- <para>
- We also want a
- guarantee that once a transaction is completed and acknowledged by
- the database system, it has indeed been permanently recorded
- and won't be lost even if a crash ensues shortly thereafter.
- For example, if we are recording a cash withdrawal by Bob,
- we do not want any chance that the debit to his account will
- disappear in a crash just after he walks out the bank door.
- A transactional database guarantees that all the updates made by
- a transaction are logged in permanent storage (i.e., on disk) before
- the transaction is reported complete.
- </para>
-
- <para>
- Another important property of transactional databases is closely
- related to the notion of atomic updates: when multiple transactions
- are running concurrently, each one should not be able to see the
- incomplete changes made by others. For example, if one transaction
- is busy totalling all the branch balances, it would not do for it
- to include the debit from Alice's branch but not the credit to
- Bob's branch, nor vice versa. So transactions must be all-or-nothing
- not only in terms of their permanent effect on the database, but
- also in terms of their visibility as they happen. The updates made
- so far by an open transaction are invisible to other transactions
- until the transaction completes, whereupon all the updates become
- visible simultaneously.
- </para>
-<!## end>
-
- <para>
-<!## PG>
- In <productname>PostgreSQL</>, a transaction is set up by surrounding
- the SQL commands of the transaction with
- <command>BEGIN</> and <command>COMMIT</> commands. So our banking
- transaction would actually look like:
-<!## end>
-<!## XC>
- In <productname>Postgres-XC</>, a transaction is set up by surrounding
- the SQL commands of the transaction with
- <command>BEGIN</> and <command>COMMIT</> commands. So our banking
- transaction would actually look like:
-<!## end>
-<!## XL>
- In <productname>Postgres-XL</>, a transaction is set up by surrounding
- the SQL commands of the transaction with
- <command>BEGIN</> and <command>COMMIT</> commands. So our banking
- transaction would actually look like:
-<!## end>
-
-<programlisting>
-BEGIN;
-UPDATE accounts SET balance = balance - 100.00
- WHERE name = 'Alice';
--- etc etc
-COMMIT;
-</programlisting>
- </para>
-
- <para>
- If, partway through the transaction, we decide we do not want to
- commit (perhaps we just noticed that Alice's balance went negative),
- we can issue the command <command>ROLLBACK</> instead of
- <command>COMMIT</>, and all our updates so far will be canceled.
- </para>
-
- <para>
-<!## PG>
- <productname>PostgreSQL</> actually treats every SQL statement as being
-<!## end>
-<!## XC>
- <productname>Postgres-XC</> actually treats every SQL statement as being
-<!## end>
-<!## XL>
- <productname>Postgres-XL</> actually treats every SQL statement as being
-<!## end>
- executed within a transaction. If you do not issue a <command>BEGIN</>
- command,
- then each individual statement has an implicit <command>BEGIN</> and
- (if successful) <command>COMMIT</> wrapped around it. A group of
- statements surrounded by <command>BEGIN</> and <command>COMMIT</>
- is sometimes called a <firstterm>transaction block</>.
- </para>
-
- <note>
- <para>
- Some client libraries issue <command>BEGIN</> and <command>COMMIT</>
- commands automatically, so that you might get the effect of transaction
- blocks without asking. Check the documentation for the interface
- you are using.
- </para>
- </note>
-
-<!## PG>
-<!-- NOTE:
- XC does not support savepoint yet.
--->
- <para>
- It's possible to control the statements in a transaction in a more
- granular fashion through the use of <firstterm>savepoints</>. Savepoints
- allow you to selectively discard parts of the transaction, while
- committing the rest. After defining a savepoint with
- <command>SAVEPOINT</>, you can if needed roll back to the savepoint
- with <command>ROLLBACK TO</>. All the transaction's database changes
- between defining the savepoint and rolling back to it are discarded, but
- changes earlier than the savepoint are kept.
- </para>
-
- <para>
- After rolling back to a savepoint, it continues to be defined, so you can
- roll back to it several times. Conversely, if you are sure you won't need
- to roll back to a particular savepoint again, it can be released, so the
- system can free some resources. Keep in mind that either releasing or
- rolling back to a savepoint
- will automatically release all savepoints that were defined after it.
- </para>
-<!## end>
-
- <para>
- All this is happening within the transaction block, so none of it
- is visible to other database sessions. When and if you commit the
- transaction block, the committed actions become visible as a unit
- to other sessions, while the rolled-back actions never become visible
- at all.
- </para>
-
-<!## PG>
-<!-- NOTE:
- Again, XC does not supoprt savepoint yet.
--->
-
- <para>
- Remembering the bank database, suppose we debit $100.00 from Alice's
- account, and credit Bob's account, only to find later that we should
- have credited Wally's account. We could do it using savepoints like
- this:
-
-<programlisting>
-BEGIN;
-UPDATE accounts SET balance = balance - 100.00
- WHERE name = 'Alice';
-SAVEPOINT my_savepoint;
-UPDATE accounts SET balance = balance + 100.00
- WHERE name = 'Bob';
--- oops ... forget that and use Wally's account
-ROLLBACK TO my_savepoint;
-UPDATE accounts SET balance = balance + 100.00
- WHERE name = 'Wally';
-COMMIT;
-</programlisting>
- </para>
-
- <para>
- This example is, of course, oversimplified, but there's a lot of control
- possible in a transaction block through the use of savepoints.
- Moreover, <command>ROLLBACK TO</> is the only way to regain control of a
- transaction block that was put in aborted state by the
- system due to an error, short of rolling it back completely and starting
- again.
- </para>
-
-<!## end>
-
- </sect1>
-
-<!-- Window Functions are not supported yet -->
-
- <sect1 id="tutorial-window">
- <title>Window Functions</title>
-
- <indexterm zone="tutorial-window">
- <primary>window function</primary>
- </indexterm>
-
- <para>
- A <firstterm>window function</> performs a calculation across a set of
- table rows that are somehow related to the current row. This is comparable
- to the type of calculation that can be done with an aggregate function.
- But unlike regular aggregate functions, use of a window function does not
- cause rows to become grouped into a single output row &mdash; the
- rows retain their separate identities. Behind the scenes, the window
- function is able to access more than just the current row of the query
- result.
- </para>
-
- <para>
- Here is an example that shows how to compare each employee's salary
- with the average salary in his or her department:
-
-<programlisting>
-SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM empsalary;
-</programlisting>
-
-<screen>
- depname | empno | salary | avg
------------+-------+--------+-----------------------
- develop | 11 | 5200 | 5020.0000000000000000
- develop | 7 | 4200 | 5020.0000000000000000
- develop | 9 | 4500 | 5020.0000000000000000
- develop | 8 | 6000 | 5020.0000000000000000
- develop | 10 | 5200 | 5020.0000000000000000
- personnel | 5 | 3500 | 3700.0000000000000000
- personnel | 2 | 3900 | 3700.0000000000000000
- sales | 3 | 4800 | 4866.6666666666666667
- sales | 1 | 5000 | 4866.6666666666666667
- sales | 4 | 4800 | 4866.6666666666666667
-(10 rows)
-</screen>
-
- The first three output columns come directly from the table
- <structname>empsalary</>, and there is one output row for each row in the
- table. The fourth column represents an average taken across all the table
- rows that have the same <structfield>depname</> value as the current row.
- (This actually is the same function as the regular <function>avg</>
- aggregate function, but the <literal>OVER</> clause causes it to be
- treated as a window function and computed across an appropriate set of
- rows.)
- </para>
-
- <para>
- A window function call always contains an <literal>OVER</> clause
- directly following the window function's name and argument(s). This is what
- syntactically distinguishes it from a regular function or aggregate
- function. The <literal>OVER</> clause determines exactly how the
- rows of the query are split up for processing by the window function.
- The <literal>PARTITION BY</> list within <literal>OVER</> specifies
- dividing the rows into groups, or partitions, that share the same
- values of the <literal>PARTITION BY</> expression(s). For each row,
- the window function is computed across the rows that fall into the
- same partition as the current row.
- </para>
-
- <para>
- You can also control the order in which rows are processed by
- window functions using <literal>ORDER BY</> within <literal>OVER</>.
- (The window <literal>ORDER BY</> does not even have to match the
- order in which the rows are output.) Here is an example:
-
-<programlisting>
-SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY salary DESC) FROM empsalary;
-</programlisting>
-
-<screen>
- depname | empno | salary | rank
------------+-------+--------+------
- develop | 8 | 6000 | 1
- develop | 10 | 5200 | 2
- develop | 11 | 5200 | 2
- develop | 9 | 4500 | 4
- develop | 7 | 4200 | 5
- personnel | 2 | 3900 | 1
- personnel | 5 | 3500 | 2
- sales | 1 | 5000 | 1
- sales | 4 | 4800 | 2
- sales | 3 | 4800 | 2
-(10 rows)
-</screen>
-
- As shown here, the <function>rank</> function produces a numerical rank
- within the current row's partition for each distinct <literal>ORDER BY</>
- value, in the order defined by the <literal>ORDER BY</> clause.
- <function>rank</> needs no explicit parameter, because its behavior
- is entirely determined by the <literal>OVER</> clause.
- </para>
-
- <para>
- The rows considered by a window function are those of the <quote>virtual
- table</> produced by the query's <literal>FROM</> clause as filtered by its
- <literal>WHERE</>, <literal>GROUP BY</>, and <literal>HAVING</> clauses
- if any. For example, a row removed because it does not meet the
- <literal>WHERE</> condition is not seen by any window function.
- A query can contain multiple window functions that slice up the data
- in different ways by means of different <literal>OVER</> clauses, but
- they all act on the same collection of rows defined by this virtual table.
- </para>
-
- <para>
- We already saw that <literal>ORDER BY</> can be omitted if the ordering
- of rows is not important. It is also possible to omit <literal>PARTITION
- BY</>, in which case there is just one partition containing all the rows.
- </para>
-
- <para>
- There is another important concept associated with window functions:
- for each row, there is a set of rows within its partition called its
- <firstterm>window frame</>. Many (but not all) window functions act only
- on the rows of the window frame, rather than of the whole partition.
- By default, if <literal>ORDER BY</> is supplied then the frame consists of
- all rows from the start of the partition up through the current row, plus
- any following rows that are equal to the current row according to the
- <literal>ORDER BY</> clause. When <literal>ORDER BY</> is omitted the
- default frame consists of all rows in the partition.
- <footnote>
- <para>
- There are options to define the window frame in other ways, but
- this tutorial does not cover them. See
- <xref linkend="syntax-window-functions"> for details.
- </para>
- </footnote>
- Here is an example using <function>sum</>:
- </para>
-
-<programlisting>
-SELECT salary, sum(salary) OVER () FROM empsalary;
-</programlisting>
-
-<screen>
- salary | sum
---------+-------
- 5200 | 47100
- 5000 | 47100
- 3500 | 47100
- 4800 | 47100
- 3900 | 47100
- 4200 | 47100
- 4500 | 47100
- 4800 | 47100
- 6000 | 47100
- 5200 | 47100
-(10 rows)
-</screen>
-
- <para>
- Above, since there is no <literal>ORDER BY</> in the <literal>OVER</>
- clause, the window frame is the same as the partition, which for lack of
- <literal>PARTITION BY</> is the whole table; in other words each sum is
- taken over the whole table and so we get the same result for each output
- row. But if we add an <literal>ORDER BY</> clause, we get very different
- results:
- </para>
-
-<programlisting>
-SELECT salary, sum(salary) OVER (ORDER BY salary) FROM empsalary;
-</programlisting>
-
-<screen>
- salary | sum
---------+-------
- 3500 | 3500
- 3900 | 7400
- 4200 | 11600
- 4500 | 16100
- 4800 | 25700
- 4800 | 25700
- 5000 | 30700
- 5200 | 41100
- 5200 | 41100
- 6000 | 47100
-(10 rows)
-</screen>
-
- <para>
- Here the sum is taken from the first (lowest) salary up through the
- current one, including any duplicates of the current one (notice the
- results for the duplicated salaries).
- </para>
-
- <para>
- Window functions are permitted only in the <literal>SELECT</literal> list
- and the <literal>ORDER BY</> clause of the query. They are forbidden
- elsewhere, such as in <literal>GROUP BY</>, <literal>HAVING</>
- and <literal>WHERE</literal> clauses. This is because they logically
- execute after the processing of those clauses. Also, window functions
- execute after regular aggregate functions. This means it is valid to
- include an aggregate function call in the arguments of a window function,
- but not vice versa.
- </para>
-
- <para>
- If there is a need to filter or group rows after the window calculations
- are performed, you can use a sub-select. For example:
-
-<programlisting>
-SELECT depname, empno, salary, enroll_date
-FROM
- (SELECT depname, empno, salary, enroll_date,
- rank() OVER (PARTITION BY depname ORDER BY salary DESC, empno) AS pos
- FROM empsalary
- ) AS ss
-WHERE pos &lt; 3;
-</programlisting>
-
- The above query only shows the rows from the inner query having
- <literal>rank</> less than 3.
- </para>
-
- <para>
- When a query involves multiple window functions, it is possible to write
- out each one with a separate <literal>OVER</> clause, but this is
- duplicative and error-prone if the same windowing behavior is wanted
- for several functions. Instead, each windowing behavior can be named
- in a <literal>WINDOW</> clause and then referenced in <literal>OVER</>.
- For example:
-
-<programlisting>
-SELECT sum(salary) OVER w, avg(salary) OVER w
- FROM empsalary
- WINDOW w AS (PARTITION BY depname ORDER BY salary DESC);
-</programlisting>
- </para>
-
- <para>
- More details about window functions can be found in
- <xref linkend="syntax-window-functions">,
- <xref linkend="functions-window">,
- <xref linkend="queries-window">, and the
- <xref linkend="sql-select"> reference page.
- </para>
- </sect1>
-
-
- <sect1 id="tutorial-inheritance">
- <title>Inheritance</title>
-
- <indexterm zone="tutorial-inheritance">
- <primary>inheritance</primary>
- </indexterm>
-
-&common;
- <para>
- Inheritance is a concept from object-oriented databases. It opens
- up interesting new possibilities of database design.
- </para>
-
- <para>
- Let's create two tables: A table <classname>cities</classname>
- and a table <classname>capitals</classname>. Naturally, capitals
- are also cities, so you want some way to show the capitals
- implicitly when you list all cities. If you're really clever you
- might invent some scheme like this:
-
-<programlisting>
-CREATE TABLE capitals (
- name text,
- population real,
- altitude int, -- (in ft)
- state char(2)
-);
-
-CREATE TABLE non_capitals (
- name text,
- population real,
- altitude int -- (in ft)
-);
-
-CREATE VIEW cities AS
- SELECT name, population, altitude FROM capitals
- UNION
- SELECT name, population, altitude FROM non_capitals;
-</programlisting>
-
- This works OK as far as querying goes, but it gets ugly when you
- need to update several rows, for one thing.
- </para>
-
- <para>
- A better solution is this:
-
-<programlisting>
-CREATE TABLE cities (
- name text,
- population real,
- altitude int -- (in ft)
-);
-
-CREATE TABLE capitals (
- state char(2)
-) SELECT name, altitude
- FROM cities
- WHERE altitude &gt; 500;INHERITS (cities);
-</programlisting>
- </para>
-
- <para>
- In this case, a row of <classname>capitals</classname>
- <firstterm>inherits</firstterm> all columns (<structfield>name</>,
- <structfield>population</>, and <structfield>altitude</>) from its
- <firstterm>parent</firstterm>, <classname>cities</classname>. The
- type of the column <structfield>name</structfield> is
-<!## PG>
- <type>text</type>, a native <productname>PostgreSQL</productname>
-<!## end>
-<!## XC>
- <type>text</type>, a native <productname>Postgres-XC</productname>
-<!## end>
-<!## XL>
- <type>text</type>, a native <productname>Postgres-XL</productname>
-<!## end>
- type for variable length character strings. State capitals have
- an extra column, <structfield>state</>, that shows their state. In
-<!## PG>
- <productname>PostgreSQL</productname>, a table can inherit from
-<!## end>
-<!## XC>
- <productname>Postgres-XC</productname>, a table can inherit from
-<!## end>
-<!## XL>
- <productname>Postgres-XL</productname>, a table can inherit from
-<!## end>
- zero or more other tables.
- </para>
-
- <para>
- For example, the following query finds the names of all cities,
- including state capitals, that are located at an altitude
- over 500 feet:
-
-<programlisting>
-SELECT name, altitude
- FROM cities
- WHERE altitude &gt; 500;
-</programlisting>
-
- which returns:
-
-<screen>
- name | altitude
------------+----------
- Las Vegas | 2174
- Mariposa | 1953
- Madison | 845
-(3 rows)
-</screen>
- </para>
-
- <para>
- On the other hand, the following query finds
- all the cities that are not state capitals and
- are situated at an altitude of 500 feet or higher:
-
-<programlisting>
-SELECT name, altitude
- FROM ONLY cities
- WHERE altitude &gt; 500;
-</programlisting>
-
-<screen>
- name | altitude
------------+----------
- Las Vegas | 2174
- Mariposa | 1953
-(2 rows)
-</screen>
- </para>
-
- <para>
- Here the <literal>ONLY</literal> before <literal>cities</literal>
- indicates that the query should be run over only the
- <classname>cities</classname> table, and not tables below
- <classname>cities</classname> in the inheritance hierarchy. Many
- of the commands that we have already discussed &mdash;
- <command>SELECT</command>, <command>UPDATE</command>, and
- <command>DELETE</command> &mdash; support this <literal>ONLY</literal>
- notation.
- </para>
-
- <note>
- <para>
- Although inheritance is frequently useful, it has not been integrated
- with unique constraints or foreign keys, which limits its usefulness.
- See <xref linkend="ddl-inherit"> for more detail.
- </para>
- </note>
- </sect1>
-
-
-
- <sect1 id="tutorial-conclusion">
- <title>Conclusion</title>
-
-&common;
- <para>
-<!## PG>
- <productname>PostgreSQL</productname> has many features not
-<!## end>
-<!## XC>
- <productname>Postgres-XC</productname> has many features not
-<!## end>
-<!## XL>
- <productname>Postgres-XL</productname> has many features not
-<!## end>
- touched upon in this tutorial introduction, which has been
- oriented toward newer users of <acronym>SQL</acronym>. These
- features are discussed in more detail in the remainder of this
- book.
- </para>
-
-<!## PG>
- <para>
- If you feel you need more introductory material, please visit the PostgreSQL
- <ulink url="http://www.postgresql.org">web site</ulink>
- for links to more resources.
- </para>
-<!## end>
- </sect1>
- </chapter>
diff --git a/doc-xc/src/sgml/arch-dev.sgmlin b/doc-xc/src/sgml/arch-dev.sgmlin
deleted file mode 100644
index fe4ef807b8..0000000000
--- a/doc-xc/src/sgml/arch-dev.sgmlin
+++ /dev/null
@@ -1,1630 +0,0 @@
-<!-- doc/src/sgml/arch-dev.sgml -->
-
- <chapter id="overview">
- <title>Overview of PostgreSQL Internals</title>
-
- <note>
- <title>Author</title>
- <para>
- This chapter originated as part of
- <xref linkend="SIM98">, Stefan Simkovics'
- Master's Thesis prepared at Vienna University of Technology under the direction
- of O.Univ.Prof.Dr. Georg Gottlob and Univ.Ass. Mag. Katrin Seyr.
- </para>
- </note>
-
-<!## XC>
- <para>
- This chapter describes internals
- of <productname>PostgreSQL</productname>
- which <productname>Postgres-XC</productname> inherited most of
- features.
- </para>
-<!## end>
-<!## XL>
- <para>
- This chapter describes the internals
- of <productname>PostgreSQL</productname>,
- from which <productname>Postgres-XL</productname> inherited most of
- features.
- </para>
-<!## end>
-&common;
-
- <para>
- This chapter gives an overview of the internal structure of the
- backend of <productname>PostgreSQL</productname>. After having
- read the following sections you should have an idea of how a query
- is processed. This chapter does not aim to provide a detailed
- description of the internal operation of
- <productname>PostgreSQL</productname>, as such a document would be
- very extensive. Rather, this chapter is intended to help the reader
- understand the general sequence of operations that occur within the
- backend from the point at which a query is received, to the point
- at which the results are returned to the client.
- </para>
-
- <sect1 id="query-path">
- <title>The Path of a Query</title>
-
-&common;
- <para>
- Here we give a short overview of the stages a query has to pass in
- order to obtain a result.
- </para>
-
- <procedure>
- <step>
- <para>
- A connection from an application program to the <productname>PostgreSQL</productname>
- server has to be established. The application program transmits a
- query to the server and waits to receive the results sent back by the
- server.
- </para>
- </step>
-
- <step>
- <para>
- The <firstterm>parser stage</firstterm> checks the query
- transmitted by the application
- program for correct syntax and creates
- a <firstterm>query tree</firstterm>.
- </para>
- </step>
-
- <step>
- <para>
- The <firstterm>rewrite system</firstterm> takes
- the query tree created by the parser stage and looks for
- any <firstterm>rules</firstterm> (stored in the
- <firstterm>system catalogs</firstterm>) to apply to
- the query tree. It performs the
- transformations given in the <firstterm>rule bodies</firstterm>.
- </para>
-
- <para>
- One application of the rewrite system is in the realization of
- <firstterm>views</firstterm>.
- Whenever a query against a view
- (i.e., a <firstterm>virtual table</firstterm>) is made,
- the rewrite system rewrites the user's query to
- a query that accesses the <firstterm>base tables</firstterm> given in
- the <firstterm>view definition</firstterm> instead.
- </para>
- </step>
-
- <step>
- <para>
- The <firstterm>planner/optimizer</firstterm> takes
- the (rewritten) query tree and creates a
- <firstterm>query plan</firstterm> that will be the input to the
- <firstterm>executor</firstterm>.
- </para>
-
- <para>
- It does so by first creating all possible <firstterm>paths</firstterm>
- leading to the same result. For example if there is an index on a
- relation to be scanned, there are two paths for the
- scan. One possibility is a simple sequential scan and the other
- possibility is to use the index. Next the cost for the execution of
- each path is estimated and the cheapest path is chosen. The cheapest
- path is expanded into a complete plan that the executor can use.
- </para>
- </step>
-
- <step>
- <para>
- The executor recursively steps through
- the <firstterm>plan tree</firstterm> and
- retrieves rows in the way represented by the plan.
- The executor makes use of the
- <firstterm>storage system</firstterm> while scanning
- relations, performs <firstterm>sorts</firstterm> and <firstterm>joins</firstterm>,
- evaluates <firstterm>qualifications</firstterm> and finally hands back the rows derived.
- </para>
- </step>
- </procedure>
-
- <para>
- In the following sections we will cover each of the above listed items
- in more detail to give a better understanding of <productname>PostgreSQL</productname>'s internal
- control and data structures.
- </para>
- </sect1>
-
- <sect1 id="connect-estab">
- <title>How Connections are Established</title>
-
-&common;
- <para>
- <productname>PostgreSQL</productname> is implemented using a
- simple <quote>process per user</> client/server model. In this model
- there is one <firstterm>client process</firstterm> connected to
- exactly one <firstterm>server process</firstterm>. As we do not
- know ahead of time how many connections will be made, we have to
- use a <firstterm>master process</firstterm> that spawns a new
- server process every time a connection is requested. This master
- process is called <literal>postgres</literal> and listens at a
- specified TCP/IP port for incoming connections. Whenever a request
- for a connection is detected the <literal>postgres</literal>
- process spawns a new server process. The server tasks
- communicate with each other using <firstterm>semaphores</firstterm> and
- <firstterm>shared memory</firstterm> to ensure data integrity
- throughout concurrent data access.
- </para>
-
- <para>
- The client process can be any program that understands the
- <productname>PostgreSQL</productname> protocol described in
- <xref linkend="protocol">. Many clients are based on the
- C-language library <application>libpq</>, but several independent
- implementations of the protocol exist, such as the Java
- <application>JDBC</> driver.
- </para>
-
- <para>
- Once a connection is established the client process can send a query
- to the <firstterm>backend</firstterm> (server). The query is transmitted using plain text,
- i.e., there is no parsing done in the <firstterm>frontend</firstterm> (client). The
- server parses the query, creates an <firstterm>execution plan</firstterm>,
- executes the plan and returns the retrieved rows to the client
- by transmitting them over the established connection.
- </para>
- </sect1>
-
- <sect1 id="parser-stage">
- <title>The Parser Stage</title>
-
-&common;
- <para>
- The <firstterm>parser stage</firstterm> consists of two parts:
-
- <itemizedlist>
- <listitem>
- <para>
- The <firstterm>parser</firstterm> defined in
- <filename>gram.y</filename> and <filename>scan.l</filename> is
- built using the Unix tools <application>bison</application>
- and <application>flex</application>.
- </para>
- </listitem>
- <listitem>
- <para>
- The <firstterm>transformation process</firstterm> does
- modifications and augmentations to the data structures returned by the parser.
- </para>
- </listitem>
- </itemizedlist>
- </para>
-
- <sect2>
- <title>Parser</title>
-
-&common;
- <para>
- The parser has to check the query string (which arrives as plain
- ASCII text) for valid syntax. If the syntax is correct a
- <firstterm>parse tree</firstterm> is built up and handed back;
- otherwise an error is returned. The parser and lexer are
- implemented using the well-known Unix tools <application>bison</>
- and <application>flex</>.
- </para>
-
- <para>
- The <firstterm>lexer</firstterm> is defined in the file
- <filename>scan.l</filename> and is responsible
- for recognizing <firstterm>identifiers</firstterm>,
- the <firstterm>SQL key words</firstterm> etc. For
- every key word or identifier that is found, a <firstterm>token</firstterm>
- is generated and handed to the parser.
- </para>
-
- <para>
- The parser is defined in the file <filename>gram.y</filename> and
- consists of a set of <firstterm>grammar rules</firstterm> and
- <firstterm>actions</firstterm> that are executed whenever a rule
- is fired. The code of the actions (which is actually C code) is
- used to build up the parse tree.
- </para>
-
- <para>
- The file <filename>scan.l</filename> is transformed to the C
- source file <filename>scan.c</filename> using the program
- <application>flex</application> and <filename>gram.y</filename> is
- transformed to <filename>gram.c</filename> using
- <application>bison</application>. After these transformations
- have taken place a normal C compiler can be used to create the
- parser. Never make any changes to the generated C files as they
- will be overwritten the next time <application>flex</application>
- or <application>bison</application> is called.
-
- <note>
- <para>
- The mentioned transformations and compilations are normally done
- automatically using the <firstterm>makefiles</firstterm>
- shipped with the <productname>PostgreSQL</productname>
- source distribution.
- </para>
- </note>
- </para>
-
- <para>
- A detailed description of <application>bison</application> or
- the grammar rules given in <filename>gram.y</filename> would be
- beyond the scope of this paper. There are many books and
- documents dealing with <application>flex</application> and
- <application>bison</application>. You should be familiar with
- <application>bison</application> before you start to study the
- grammar given in <filename>gram.y</filename> otherwise you won't
- understand what happens there.
- </para>
-
- </sect2>
-
- <sect2>
- <title>Transformation Process</title>
-
-&common;
- <para>
- The parser stage creates a parse tree using only fixed rules about
- the syntactic structure of SQL. It does not make any lookups in the
- system catalogs, so there is no possibility to understand the detailed
- semantics of the requested operations. After the parser completes,
- the <firstterm>transformation process</firstterm> takes the tree handed
- back by the parser as input and does the semantic interpretation needed
- to understand which tables, functions, and operators are referenced by
- the query. The data structure that is built to represent this
- information is called the <firstterm>query tree</>.
- </para>
-
- <para>
- The reason for separating raw parsing from semantic analysis is that
- system catalog lookups can only be done within a transaction, and we
- do not wish to start a transaction immediately upon receiving a query
- string. The raw parsing stage is sufficient to identify the transaction
- control commands (<command>BEGIN</>, <command>ROLLBACK</>, etc), and
- these can then be correctly executed without any further analysis.
- Once we know that we are dealing with an actual query (such as
- <command>SELECT</> or <command>UPDATE</>), it is okay to
- start a transaction if we're not already in one. Only then can the
- transformation process be invoked.
- </para>
-
- <para>
- The query tree created by the transformation process is structurally
- similar to the raw parse tree in most places, but it has many differences
- in detail. For example, a <structname>FuncCall</> node in the
- parse tree represents something that looks syntactically like a function
- call. This might be transformed to either a <structname>FuncExpr</>
- or <structname>Aggref</> node depending on whether the referenced
- name turns out to be an ordinary function or an aggregate function.
- Also, information about the actual data types of columns and expression
- results is added to the query tree.
- </para>
- </sect2>
- </sect1>
-
- <sect1 id="rule-system">
- <title>The <productname>PostgreSQL</productname> Rule System</title>
-
-&common;
- <para>
- <productname>PostgreSQL</productname> supports a powerful
- <firstterm>rule system</firstterm> for the specification
- of <firstterm>views</firstterm> and ambiguous <firstterm>view updates</firstterm>.
- Originally the <productname>PostgreSQL</productname>
- rule system consisted of two implementations:
-
- <itemizedlist>
- <listitem>
- <para>
- The first one worked using <firstterm>row level</firstterm> processing and was
- implemented deep in the <firstterm>executor</firstterm>. The rule system was
- called whenever an individual row had been accessed. This
- implementation was removed in 1995 when the last official release
- of the <productname>Berkeley Postgres</productname> project was
- transformed into <productname>Postgres95</productname>.
- </para>
- </listitem>
-
- <listitem>
- <para>
- The second implementation of the rule system is a technique
- called <firstterm>query rewriting</firstterm>.
- The <firstterm>rewrite system</firstterm> is a module
- that exists between the <firstterm>parser stage</firstterm> and the
- <firstterm>planner/optimizer</firstterm>. This technique is still implemented.
- </para>
- </listitem>
- </itemizedlist>
- </para>
-
- <para>
- The query rewriter is discussed in some detail in
- <xref linkend="rules">, so there is no need to cover it here.
- We will only point out that both the input and the output of the
- rewriter are query trees, that is, there is no change in the
- representation or level of semantic detail in the trees. Rewriting
- can be thought of as a form of macro expansion.
- </para>
-
- </sect1>
-
- <sect1 id="planner-optimizer">
- <title>Planner/Optimizer</title>
-
-&common;
- <para>
- The task of the <firstterm>planner/optimizer</firstterm> is to
- create an optimal execution plan. A given SQL query (and hence, a
- query tree) can be actually executed in a wide variety of
- different ways, each of which will produce the same set of
- results. If it is computationally feasible, the query optimizer
- will examine each of these possible execution plans, ultimately
- selecting the execution plan that is expected to run the fastest.
- </para>
-
- <note>
- <para>
- In some situations, examining each possible way in which a query
- can be executed would take an excessive amount of time and memory
- space. In particular, this occurs when executing queries
- involving large numbers of join operations. In order to determine
- a reasonable (not necessarily optimal) query plan in a reasonable amount
- of time, <productname>PostgreSQL</productname> uses a <firstterm>Genetic
- Query Optimizer</firstterm> (see <xref linkend="geqo">) when the number of joins
- exceeds a threshold (see <xref linkend="guc-geqo-threshold">).
- </para>
- </note>
-
- <para>
- The planner's search procedure actually works with data structures
- called <firstterm>paths</>, which are simply cut-down representations of
- plans containing only as much information as the planner needs to make
- its decisions. After the cheapest path is determined, a full-fledged
- <firstterm>plan tree</> is built to pass to the executor. This represents
- the desired execution plan in sufficient detail for the executor to run it.
- In the rest of this section we'll ignore the distinction between paths
- and plans.
- </para>
-
- <sect2>
- <title>Generating Possible Plans</title>
-
-&common;
- <para>
- The planner/optimizer starts by generating plans for scanning each
- individual relation (table) used in the query. The possible plans
- are determined by the available indexes on each relation.
- There is always the possibility of performing a
- sequential scan on a relation, so a sequential scan plan is always
- created. Assume an index is defined on a
- relation (for example a B-tree index) and a query contains the
- restriction
- <literal>relation.attribute OPR constant</literal>. If
- <literal>relation.attribute</literal> happens to match the key of the B-tree
- index and <literal>OPR</literal> is one of the operators listed in
- the index's <firstterm>operator class</>, another plan is created using
- the B-tree index to scan the relation. If there are further indexes
- present and the restrictions in the query happen to match a key of an
- index, further plans will be considered. Index scan plans are also
- generated for indexes that have a sort ordering that can match the
- query's <literal>ORDER BY</> clause (if any), or a sort ordering that
- might be useful for merge joining (see below).
- </para>
-
- <para>
- If the query requires joining two or more relations,
- plans for joining relations are considered
- after all feasible plans have been found for scanning single relations.
- The three available join strategies are:
-
- <itemizedlist>
- <listitem>
- <para>
- <firstterm>nested loop join</firstterm>: The right relation is scanned
- once for every row found in the left relation. This strategy
- is easy to implement but can be very time consuming. (However,
- if the right relation can be scanned with an index scan, this can
- be a good strategy. It is possible to use values from the current
- row of the left relation as keys for the index scan of the right.)
- </para>
- </listitem>
-
- <listitem>
- <para>
- <firstterm>merge join</firstterm>: Each relation is sorted on the join
- attributes before the join starts. Then the two relations are
- scanned in parallel, and matching rows are combined to form
- join rows. This kind of join is more
- attractive because each relation has to be scanned only once.
- The required sorting might be achieved either by an explicit sort
- step, or by scanning the relation in the proper order using an
- index on the join key.
- </para>
- </listitem>
-
- <listitem>
- <para>
- <firstterm>hash join</firstterm>: the right relation is first scanned
- and loaded into a hash table, using its join attributes as hash keys.
- Next the left relation is scanned and the
- appropriate values of every row found are used as hash keys to
- locate the matching rows in the table.
- </para>
- </listitem>
- </itemizedlist>
- </para>
-
- <para>
- When the query involves more than two relations, the final result
- must be built up by a tree of join steps, each with two inputs.
- The planner examines different possible join sequences to find the
- cheapest one.
- </para>
-
- <para>
- If the query uses fewer than <xref linkend="guc-geqo-threshold">
- relations, a near-exhaustive search is conducted to find the best
- join sequence. The planner preferentially considers joins between any
- two relations for which there exist a corresponding join clause in the
- <literal>WHERE</literal> qualification (i.e., for
- which a restriction like <literal>where rel1.attr1=rel2.attr2</literal>
- exists). Join pairs with no join clause are considered only when there
- is no other choice, that is, a particular relation has no available
- join clauses to any other relation. All possible plans are generated for
- every join pair considered by the planner, and the one that is
- (estimated to be) the cheapest is chosen.
- </para>
-
- <para>
- When <varname>geqo_threshold</varname> is exceeded, the join
- sequences considered are determined by heuristics, as described
- in <xref linkend="geqo">. Otherwise the process is the same.
- </para>
-
- <para>
- The finished plan tree consists of sequential or index scans of
- the base relations, plus nested-loop, merge, or hash join nodes as
- needed, plus any auxiliary steps needed, such as sort nodes or
- aggregate-function calculation nodes. Most of these plan node
- types have the additional ability to do <firstterm>selection</>
- (discarding rows that do not meet a specified Boolean condition)
- and <firstterm>projection</> (computation of a derived column set
- based on given column values, that is, evaluation of scalar
- expressions where needed). One of the responsibilities of the
- planner is to attach selection conditions from the
- <literal>WHERE</literal> clause and computation of required
- output expressions to the most appropriate nodes of the plan
- tree.
- </para>
- </sect2>
- </sect1>
-
- <sect1 id="executor">
- <title>Executor</title>
-
-&common;
- <para>
- The <firstterm>executor</firstterm> takes the plan created by the
- planner/optimizer and recursively processes it to extract the required set
- of rows. This is essentially a demand-pull pipeline mechanism.
- Each time a plan node is called, it must deliver one more row, or
- report that it is done delivering rows.
- </para>
-
- <para>
- To provide a concrete example, assume that the top
- node is a <literal>MergeJoin</literal> node.
- Before any merge can be done two rows have to be fetched (one from
- each subplan). So the executor recursively calls itself to
- process the subplans (it starts with the subplan attached to
- <literal>lefttree</literal>). The new top node (the top node of the left
- subplan) is, let's say, a
- <literal>Sort</literal> node and again recursion is needed to obtain
- an input row. The child node of the <literal>Sort</literal> might
- be a <literal>SeqScan</> node, representing actual reading of a table.
- Execution of this node causes the executor to fetch a row from the
- table and return it up to the calling node. The <literal>Sort</literal>
- node will repeatedly call its child to obtain all the rows to be sorted.
- When the input is exhausted (as indicated by the child node returning
- a NULL instead of a row), the <literal>Sort</literal> code performs
- the sort, and finally is able to return its first output row, namely
- the first one in sorted order. It keeps the remaining rows stored so
- that it can deliver them in sorted order in response to later demands.
- </para>
-
- <para>
- The <literal>MergeJoin</literal> node similarly demands the first row
- from its right subplan. Then it compares the two rows to see if they
- can be joined; if so, it returns a join row to its caller. On the next
- call, or immediately if it cannot join the current pair of inputs,
- it advances to the next row of one table
- or the other (depending on how the comparison came out), and again
- checks for a match. Eventually, one subplan or the other is exhausted,
- and the <literal>MergeJoin</literal> node returns NULL to indicate that
- no more join rows can be formed.
- </para>
-
- <para>
- Complex queries can involve many levels of plan nodes, but the general
- approach is the same: each node computes and returns its next output
- row each time it is called. Each node is also responsible for applying
- any selection or projection expressions that were assigned to it by
- the planner.
- </para>
-
- <para>
- The executor mechanism is used to evaluate all four basic SQL query types:
- <command>SELECT</>, <command>INSERT</>, <command>UPDATE</>, and
- <command>DELETE</>. For <command>SELECT</>, the top-level executor
- code only needs to send each row returned by the query plan tree off
- to the client. For <command>INSERT</>, each returned row is inserted
- into the target table specified for the <command>INSERT</>. This is
- done in a special top-level plan node called <literal>ModifyTable</>.
- (A simple
- <command>INSERT ... VALUES</> command creates a trivial plan tree
- consisting of a single <literal>Result</> node, which computes just one
- result row, and <literal>ModifyTable</> above it to perform the insertion.
- But <command>INSERT ... SELECT</> can demand the full power
- of the executor mechanism.) For <command>UPDATE</>, the planner arranges
- that each computed row includes all the updated column values, plus
- the <firstterm>TID</> (tuple ID, or row ID) of the original target row;
- this data is fed into a <literal>ModifyTable</> node, which uses the
- information to create a new updated row and mark the old row deleted.
- For <command>DELETE</>, the only column that is actually returned by the
- plan is the TID, and the <literal>ModifyTable</> node simply uses the TID
- to visit each target row and mark it deleted.
- </para>
-
- </sect1>
-
- </chapter>
-
-
-<!## XC>
- <chapter id="xc-overview">
- <title>Overview of <productname>Postgres-XC</productname> Internals</title>
-
-&xconly;
- <para>
- This chapter gives an overview of the internal structure
- of <productname>Postgres-XC</productname>.
- </para>
-
- <sect1 id="xc-overview-components">
- <title><productname>Postgres-XC</productname> Components</title>
-&xconly;
- <para>
- As described
- in <xref linkend="intro-whatis">, <productname>Postgres-XC</productname>
- is a database cluster which consists of multiple database servers
- based
- upon <productname>PostgreSQL</productname>. <productname>Postgres-XC</productname>
- provides global transparent transaction management to all the
- database servers involved and provide both read and write
- scalability.
- </para>
-
- <para>
- To achieve these features, <productname>Postgres-XC</productname>
- is composed of three major components as follows:
-
- <variablelist>
- <varlistentry>
- <term>GTM</term>
- <listitem>
- <para>
- GTM stands for global transaction manager. It provides global
- transaction ID and snapshot to each transaction
- in <productname>Postgres-XC</productname> database cluster.
- It also provide several global value such as sequence and
- global timestamp.
- </para>
- <para>
- To improve scalability itself, each server hardware or virtual
- machine may have GTM-Proxy. GTM-Proxy groups commands and
- response from/to GTM to reduce number of interaction and the
- amount of data which GTM reads and writes.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Coordinator</term>
- <listitem>
- <para>
- Coordinator is an entry point
- to <productname>Postgres-XC</productname> from applications.
- You can configure more than one Coordinators in the
- same <productname>Postgres-XC</productname>. With the help
- of GTM, they provide transparent concurrency and integrity of
- transactions globally. Application can choose any
- Coordinator to connect with. Any Coordinator provides the
- same view of the database.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Datanode</term>
- <listitem>
- <para>
- Datanode stores user data. As described
- in <xref linkend="whatis-in-short">
- and <xref linkend="SQL-CREATETABLE">, more than one Datanodes
- can be configured. Each table can be replicated or
- distributed among Datanodes. A table is distributed, you can
- choose a column as the distribute key, whose value is used to
- determine which Datanode each row should be stored.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </para>
- </sect1>
-
- <sect1 id="xc-overview-gtm">
- <title>GTM and Global Transaction Management</title>
-&xconly;
- <sect2 id="xc-overview-gtm-pgreview">
- <title>Review of <productname>PostgreSQL</productname> Transaction Management Internals</title>
-&common;
- <para>
- In PostgreSQL, each transaction is given unique ID called
- transaction ID (or XID). XID is given in ascending order to
- distinguish which transaction is older/newer.
- <footnote>
- <para>
- More precisely, XID is 32bit integer. When XID reaches the max
- value, it wraps around to the lowest value (3, as to the latest
- definition). PostgreSQL has a means to handle this, as well as
- Postgres-XC. For simplicity, it will not be described in this
- document.
- </para>
- </footnote>
- When a transaction tries to read a tuple,
- <footnote>
- <para>
- This description is somewhat simplified for explanation. You
- will find the precise rule in <filename>tqual.c</filename> file
- in PostgreSQL's source code.
- </para>
- </footnote>
- each tuple has a set of XIDs to indicate transactions which
- created and deleted the tuple. So if the target tuple is created
- by an active transaction, it is not committed or aborted and the
- transaction should ignore such tuple. In such way (in practice,
- this is done by versup module in PostgreSQL core), if we give
- each transaction a unique transaction Id throughout the system
- and maintain snapshot what transaction is active, not only in a
- single server but transaction in all the servers, we can maintain
- global consistent visibility of each tuple even when a server
- accepts new statement from other transactions running on the
- other server.
- </para>
- <para>
- These information is stored in "<varname>xmin</varname>" and
- "<varname>xmax</varname>" fields of each row of table. When
- we <command>INSERT</command> rows, <varname>XID</varname> of
- inserting transaction is recorded at xmin field. When we update
- rows of tables (with <command>UPDATE</command>
- or <command>DELETE</command> statement), PostgreSQL does not
- simply overwrite the old rows. Instead, PostgreSQL
- "<emphasis>marks</emphasis>" the old rows as
- "<emphasis>deleted</emphasis>" by writing updating
- transaction's <varname>XID</varname> to xmax field. In the case
- of <command>UPDATE</command> (just
- like <command>INSERT</command>), new rows are created whose xmin
- field is "<emphasis>marked</emphasis>"
- with <varname>XID</varname>s of the creating transaction.
- </para>
- <para>
- These "<varname>xmin</varname>" and "<varname>xmax</varname>" are
- used to determine which row is visible to a transaction. To do
- this, PostgreSQL needs a data to indicate what transactions are
- running, which is called the "<emphasis>snapshot</emphasis>".
- </para>
- <para>
- If the creating transaction is not running, visibility of each
- row depends upon the fact if the creating transaction was
- committed or aborted. Suppose a row of a table which was created
- by some transaction and is not deleted yet. If the creating
- transaction is running, such row is visible to the transaction
- which created the row, but not visible to other transactions. If
- the creating transaction is not running and was committed the row
- is visible. If the transaction was aborted, this row is not
- visible.
- </para>
- <para>
- Therefore, PostgreSQL needs two kinds of information to determine
- "which transaction is running" and "if an old transaction was
- committed or aborted."
- </para>
- <para>
- The former information is obtained as
- "<emphasis>snapshot</emphasis>." PostgreSQL maintains the latter
- information as "<filename>CLOG</filename>."
- </para>
- <para>
- PostgreSQL uses all these information to determine which row is
- visible to a given transaction.
- </para>
- </sect2>
-
- <sect2 id="xc-overview-global-mvcc">
- <title>Making Transaction Management Global</title>
-&xconly;
- <para>
- In Postgres-XC, the following features of transaction management
- and visibility checking were picked up:
- </para>
- <itemizedlist>
- <listitem>
- <para>
- Assigning XID globally to transactions (GXID, Global
- Transaction ID). This can be done globally to identify each
- Transactions in the system.
- </para>
- </listitem>
- <listitem>
- <para>
- Providing snapshot. GTM collects all the transaction's status
- (running, committed, aborted etc.) to provide snapshot globally
- (global snapshot). Please note that global snapshot
- includes <varname>GXID</varname> initiated by other
- Coordinators or Datanodes. This is needed because some older
- transaction may visit new server after a while. In this case,
- if <varname>GXID</varname> of such a transaction is not
- included in the snapshot, this transaction may be regarded as
- "old enough" and uncommitted rows may be
- read. If <varname>GXID</varname> of such transaction is
- included in the snapshot from the beginning, such inconsistency
- does not take place.
- </para>
- </listitem>
- </itemizedlist>
- <para>
- To do this, <productname>Postgres-XC</productname> introduced a dedicated component called
- GTM (Global Transaction Manager). GTM runs on one of the servers
- and provide unique and ordered transaction id to each transaction
- running on <productname>Postgres-XC</productname> servers. Because this is globally unique
- ID, we call this <varname>GXID</varname> (Global Transaction Id).
- </para>
- <para>
- GTM receives <varname>GXID</varname> request from transactions
- and provide <varname>GXID</varname>. It also keep track of all
- the transactions when it started and finished to generate
- snapshot used to control each tuple visibility. Because snapshot
- here is also global property, it is called <emphasis>Global
- Snapshot</emphasis>.
- </para>
- <para>
- As long as each transaction runs with <varname>GXID</varname> and
- Global Snapshot, it can maintain consistent visibility throughout
- the system and it is safe to run transactions in parallel in any
- servers. On the other hand, a transaction, composed of multiple
- statements, can be executed using multiple servers maintaining
- database consistency.
- </para>
- <para>
- GTM provides Global Transaction Id to each transaction and keeps
- track of the status of all the transactions, whether it is
- running, committed or aborted, to calculate global snapshot to
- maintain tuple visibility.
- </para>
- <para>
- For this purpose, each transaction reports when it starts and
- ends, as well as when it issues <command>PREPARE</command>
- command in two-phase commit protocol.
- </para>
- <para>
- Each transaction requests snapshot according to the transaction
- isolation level as done in PostgreSQL. If the transaction
- isolation level is "<emphasis>read committed</emphasis>", then
- transaction will request a snapshot for each statement. If it is
- "<emphasis>serializable</emphasis>" transaction will request a
- snapshot at the beginning of transaction and reuse it thought the
- transaction.
- </para>
- </sect2>
-
- <sect2 id="xc-overview-gtm-proxy">
- <title>Improving GTM Performance</title>
-&xconly;
- <para>
- Because GTM can be regarded as "serializing" all the transaction
- processing, people may think that GTM can be a performance
- bottleneck.
- </para>
-
- <para>
- In fact, GTM can limit the whole scalability. GTM should not be
- used in very slow network environment such as wide area
- network. GTM architecture is intended to be used with Gigabit
- local network. We encourage to install Postgres-XC with local
- Gigabit network with minimum latency, that is, use as fewer
- switches involved in the connection among GTM, Coordinator and
- Datanodes.
- </para>
-
- <sect3>
- <title>Primitive GTM Implementation</title>
-
- <para>
- Primitive GTM implementation can be done as follows:
- </para>
-
- <procedure>
- <step>
- <para>
- Coordinator backend is provided with GTM client library to
- obtain GXID and snapshot and to report the transaction status.
- </para>
- </step>
-
- <step>
- <para>
- GTM opens a port to accept connection from each Coordinator and
- Datanode backend. When GTM accepts a connection, it creates a
- thread (GTM Thread) to handle request to GTM from the connected
- Coordinator backend.
- </para>
- </step>
-
- <step>
- <para>
- GTM Thread receives each request, record it and
- sends <varname>GXID</varname>, <emphasis>snapshot</emphasis>
- and other response to the Coordinator backend.
- </para>
- </step>
-
- <step>
- <para>
- They are repeated until the Coordinator backend requests
- disconnect.
- </para>
- </step>
- </procedure>
-
- </sect3>
-
- <sect3>
- <title>GTM Proxy Implementation</title>
-
- <para>
- You may have been noticed that each transaction is issuing
- request to GTM so frequently and we can collect them into single
- block of requests in each Coordinator to reduce the amount of
- interaction, as <emphasis>GTM-Proxy</emphasis>.
- </para>
-
- <para>
- In this configuration, each Coordinator and Datanode backend
- does not connect to GTM directly. Instead, we have GTM Proxy
- between GTM and Coordinator backend to group multiple requests
- and responses. GTM Proxy, like GTM explained in the previous
- sections, accepts connection from the Coordinator
- backend. However, it does not create new thread. The following
- paragraphs explains how GTM Proxy is initialized and how it
- handles requests from Coordinator backends.
- </para>
-
- <para>
- GTM Proxy, as well as GTM, is initialized as follows:
- </para>
-
- <procedure>
- <step>
- <para>
- GTM starts up normally, but now can accept connections from
- GTM proxies.
- </para>
- </step>
-
- <step>
- <para>
- GTM Proxy starts up. GTM Proxy creates GTM Proxy Threads. Each
- GTM Proxy Threads connect to the GTM in advance. The number of
- GTM Proxy Threads can be specified at the startup. Typical
- number of threads is one or two so it can save the number of
- connections between GTM and Coordinators.
- </para>
- </step>
-
- <step>
- <para>
- GTM Main Thread waits for the request connection from each
- backend.
- </para>
- </step>
-
- </procedure>
-
- <para>
- When each Coordinator backend requests for connection, Proxy
- Main Thread assigns a GTM Proxy Thread to handle
- request. Therefore, one GTM Proxy Thread handles multiple
- Coordinator backends. If a Coordinator has one hundred
- Coordinator backends and one GTM Proxy Thread, this thread takes
- care of one hundred Coordinator backend.
- </para>
-
- <para>
- Then GTM Proxy Thread scans all the requests from Coordinator
- backend. If Coordinator is more busy, it is expected to capture
- more requests in a single scan. Therefore, the proxy can group
- many requests into single block of requests, to reduce the
- number of interaction between GTM and the Coordinator.
- </para>
-
- <para>
- Furthermore, in a single scan, we may have multiple request for
- snapshots. Because these requests can be regarded as received at
- the same time, we can represent multiple snapshots with single
- one. This will reduce the amount of data which GTM provides.
- </para>
-
- </sect3>
- </sect2>
-
- <sect2 id="xc-overview-Coordinator">
- <title>Coordinator</title>
-&xconly;
- <para>
- Coordinator handles SQL statements from applications and
- determine which Datanode should be involved and generates local
- SQL statements for each Datanode. In the most simplest case, if
- single Datanode is involved, the Coordinator simply proxies
- incoming statement to the Datanode. In more complicated case,
- for example, if the target Datanode cannot be determined, then
- the Coordinator generates local statements for each Datanode,
- collects the result to materialize at the Coordinator for further
- handling. In this case, the Coordinator will try to optimize the
- plan by
- <itemizedlist>
- <listitem>
- <para>
- Pushdown <command>WHERE</command> clause to Datanodes,
- </para>
- </listitem>
- <listitem>
- <para>
- Pushdown <emphasis>joins</emphasis> to Datanodes,
- </para>
- </listitem>
- <listitem>
- <para>
- Pushdown <emphasis>projection</emphasis> (column list in <command>SELECT</command> clause),
- </para>
- </listitem>
- <listitem>
- <para>
- Pushdown <command>ORDER BY</command> clause, as well as other clauses.
- </para>
- </listitem>
- </itemizedlist>
-
- If a transaction is involved by more than one Datanodes and/or
- Coordinators, the Coordinator will handle the transaction with
- two-phase commit protocol internally.
- </para>
-
- <para>
- In the case of aggregate
- functions, <productname>Postgres-XC</productname> introduced new
- function collection function between existing transition function
- and finalize function. Collection function runs on the
- Coordinator to collect all the intermediate results from involved
- Datanodes. For details, see <xref linkend="xaggr">
- and <xref linkend="SQL-CREATEAGGREGATE">.
- </para>
-
- <para>
- In the case of reading replicated tables, Coordinator can choose
- any Datanode to read. The most efficient way is to select one
- running in the same hardware or virtual machine. This is
- called <emphasis>preferred Datanode</emphasis> and can be
- specified by a GUC local to each Coordinator.
- </para>
-
- <para>
- On the other hand, in the case of writing replicated tables, all
- the Coordinators choose the same Datanode to begin with to avoid
- update conflicts. This is called <emphasis>primary
- Datanode</emphasis>.
- </para>
-
- <para>
- Coordinators also take care of DDL statements. Because DDL
- statements handles system catalogs, which are replicated in all
- the Coordinators and Datanodes, they are proxied to all the
- Coordinators and Datanodes. To synchronize the catalog update in
- all the nodes, the Coordinator handles DDL with two-phase commit
- protocol internally.
- </para>
-
- </sect2>
-
- <sect2 id="xc-overview-Datanode">
- <title>Datanode</title>
-&xconly;
- <para>
- While Coordinators handle cluster-wide SQL statements, Datanodes
- take care of just local issues. In this sense, Datanodes are
- essentially <productname>PostgreSQL</productname> servers except
- that transaction management information is obtained from GTM, as
- well as other global value.
- </para>
-
- </sect2>
-
-
- <sect2 id="xc-overview-pooler">
- <title>Coordinator And Datanode Connection</title>
-
- <para>
- The number of connection between Coordinator and Datanode may
- increase from time to time. This may leave unused connection and
- waste system resources. Repeating real connect and disconnect
- requires Datanode backend initialization which increases latency
- and also wastes system resources.
- </para>
-
- <para>
- For example, as in the case of GTM, if each Coordinator has one
- hundred connections to applications and we have ten Coordinators,
- after a while, each Coordinator may have connection to each data
- node. It means that each Coordinator backend has ten connections
- to Coordinators and each Coordinator has one thousand (10 x 10)
- connections to Coordinators.
- </para>
-
- <para>
- Because we consume much more resources for locks and other
- control information per backend and only a few of such connection
- is active at a given time, it is not a good idea to hold such
- unused connection between Coordinator and Datanode.
- </para>
-
- <para>
- To improve this, Postgres-XC is equipped with connection pooler
- between Coordinator and Datanode. When a Coordinator backend
- requires connection to a Datanode, the pooler looks for
- appropriate connection from the pool. If there's an available
- one, the pooler assigns it to the Coordinator backend. When the
- connection is no longer needed, the Coordinator backend returns
- the connection to the pooler. Pooler does not disconnect the
- connection. It keeps the connection to the pool for later reuse,
- keeping Datanode backend running.
- </para>
-
- </sect2>
-
- </sect1>
- </chapter>
-<!## end>
-<!## XL>
- <chapter id="xc-overview">
- <title>Overview of <productname>Postgres-XL</productname> Internals</title>
-
-&xlonly;
- <para>
- This chapter gives an overview of the internal structure
- of <productname>Postgres-XL</productname>.
- </para>
-
- <sect1 id="xc-overview-components">
- <title><productname>Postgres-XL</productname> Components</title>
-&xlonly;
- <para>
- As described
- in <xref linkend="intro-whatis">, <productname>Postgres-XL</productname>
- is a database cluster which consists of multiple database servers
- based
- upon <productname>PostgreSQL</productname>. <productname>Postgres-XL</productname>
- provides global transparent transaction management to all the
- database servers involved and provide both read and write
- scalability.
- </para>
-
- <para>
- To achieve these features, <productname>Postgres-XL</productname>
- is composed of three major components as follows:
-
- <variablelist>
- <varlistentry>
- <term>GTM</term>
- <listitem>
- <para>
- GTM stands for Global Transaction Manager. It provides global
- transaction IDs and snapshots for each transaction
- in the <productname>Postgres-XL</productname> database cluster.
- It also provide several global values such as sequences and
- global timestamps.
- </para>
- <para>
- To improve scalability itself, each server hardware or virtual
- machine may have GTM-Proxy. GTM-Proxy groups commands and
- response from/to GTM to reduce number of interaction and the
- amount of data which GTM reads and writes.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Coordinator</term>
- <listitem>
- <para>
- Coordinator is an entry point
- for <productname>Postgres-XL</productname> from applications.
- You can configure more than one Coordinators in the
- same <productname>Postgres-XL</productname>. With the help
- of GTM, they provide transparent concurrency and integrity of
- transactions globally. Applications can choose any
- Coordinator to connect to. Any Coordinator provides the
- same view of the database.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Datanode</term>
- <listitem>
- <para>
- Datanode stores user data. As described
- in <xref linkend="whatis-in-short">
- and <xref linkend="SQL-CREATETABLE">, more than one Datanodes
- can be configured. Each table can be replicated or
- distributed among Datanodes. A table is distributed, you can
- choose a column as the distribute key, whose value is used to
- determine which Datanode each row should be stored.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </para>
- </sect1>
-
- <sect1 id="xc-overview-gtm">
- <title>GTM and Global Transaction Management</title>
-&xlonly;
- <sect2 id="xc-overview-gtm-pgreview">
- <title>Review of <productname>PostgreSQL</productname> Transaction Management Internals</title>
-&common;
- <para>
- In PostgreSQL, each transaction is given unique ID called
- transaction ID (or XID). XID is given in ascending order to
- distinguish which transaction is older/newer.
- <footnote>
- <para>
- More precisely, XID is 32bit integer. When XID reaches the max
- value, it wraps around to the lowest value (3, as to the latest
- definition). PostgreSQL has a means to handle this, as well as
- Postgres-XL. For simplicity, it will not be described in this
- document.
- </para>
- </footnote>
- When a transaction tries to read a tuple,
- <footnote>
- <para>
- This description is somewhat simplified for explanation. You
- will find the precise rule in <filename>tqual.c</filename> file
- in PostgreSQL's source code.
- </para>
- </footnote>
- each tuple has a set of XIDs to indicate transactions which
- created and deleted the tuple. So if the target tuple is created
- by an active transaction, it is not committed or aborted and the
- transaction should ignore such tuple. In such way (in practice,
- this is done by versup module in PostgreSQL core), if we give
- each transaction a unique transaction Id throughout the system
- and maintain snapshot what transaction is active, not only in a
- single server but transaction in all the servers, we can maintain
- global consistent visibility of each tuple even when a server
- accepts new statement from other transactions running on the
- other server.
- </para>
- <para>
- These information is stored in "<varname>xmin</varname>" and
- "<varname>xmax</varname>" fields of each row of table. When
- we <command>INSERT</command> rows, <varname>XID</varname> of
- inserting transaction is recorded at xmin field. When we update
- rows of tables (with <command>UPDATE</command>
- or <command>DELETE</command> statement), PostgreSQL does not
- simply overwrite the old rows. Instead, PostgreSQL
- "<emphasis>marks</emphasis>" the old rows as
- "<emphasis>deleted</emphasis>" by writing updating
- transaction's <varname>XID</varname> to xmax field. In the case
- of <command>UPDATE</command> (just
- like <command>INSERT</command>), new rows are created whose xmin
- field is "<emphasis>marked</emphasis>"
- with <varname>XID</varname>s of the creating transaction.
- </para>
- <para>
- These "<varname>xmin</varname>" and "<varname>xmax</varname>" are
- used to determine which row is visible to a transaction. To do
- this, PostgreSQL needs a data to indicate what transactions are
- running, which is called the "<emphasis>snapshot</emphasis>".
- </para>
- <para>
- If the creating transaction is not running, visibility of each
- row depends upon the fact if the creating transaction was
- committed or aborted. Suppose a row of a table which was created
- by some transaction and is not deleted yet. If the creating
- transaction is running, such row is visible to the transaction
- which created the row, but not visible to other transactions. If
- the creating transaction is not running and was committed the row
- is visible. If the transaction was aborted, this row is not
- visible.
- </para>
- <para>
- Therefore, PostgreSQL needs two kinds of information to determine
- "which transaction is running" and "if an old transaction was
- committed or aborted."
- </para>
- <para>
- The former information is obtained as
- "<emphasis>snapshot</emphasis>." PostgreSQL maintains the latter
- information as "<filename>CLOG</filename>."
- </para>
- <para>
- PostgreSQL uses all these information to determine which row is
- visible to a given transaction.
- </para>
- </sect2>
-
- <sect2 id="xc-overview-global-mvcc">
- <title>Making Transaction Management Global</title>
-&xlonly;
- <para>
- In Postgres-XL, the following features of transaction management
- and visibility checking extracted out from the nodes and pulled
- into the GTM.
- </para>
- <itemizedlist>
- <listitem>
- <para>
- Assigning XID globally to transactions (GXID, Global
- Transaction ID). This can be done globally to identify each
- Transactions in the system.
- </para>
- </listitem>
- <listitem>
- <para>
- Providing snapshots. GTM collects all the transaction's status
- (running, committed, aborted etc.) to provide snapshots globally
- (global snapshot). Please note that each global snapshot
- includes <varname>GXID</varname> initiated by other
- Coordinators or Datanodes. This is needed because some older
- transaction may visit new server after a while. In this case,
- if <varname>GXID</varname> of such a transaction is not
- included in the snapshot, this transaction may be regarded as
- "old enough" and uncommitted rows may be
- read. If <varname>GXID</varname> of such transaction is
- included in the snapshot from the beginning, such inconsistency
- does not take place.
- </para>
- </listitem>
- </itemizedlist>
- <para>
- To do this, <productname>Postgres-XL</productname> introduced a dedicated component called
- GTM (Global Transaction Manager). GTM runs on one of the servers
- and provides unique and ordered transaction id to each transaction
- running on <productname>Postgres-XL</productname> servers. Because this is a globally unique
- ID, we call this <varname>GXID</varname> (Global Transaction Id).
- </para>
- <para>
- GTM receives <varname>GXID</varname> request from transactions
- and provide <varname>GXID</varname>. It also keeps track of all
- the transactions when it started and finished to generate
- snapshots used to control each tuple visibility. Because snapshots
- here is also a global property, it is called <emphasis>Global
- Snapshot</emphasis>.
- </para>
- <para>
- As long as each transaction runs with a <varname>GXID</varname> and
- a Global Snapshot, it can maintain consistent visibility throughout
- the system and it is safe to run transactions in parallel in any
- servers. On the other hand, a transaction, composed of multiple
- statements, can be executed using multiple servers maintaining
- database consistency.
- </para>
- <para>
- GTM provides Global Transaction Id to each transaction and keeps
- track of the status of all the transactions, whether it is
- running, committed or aborted, to calculate global snapshots to
- maintain tuple visibility.
- </para>
- <para>
- For this purpose, each transaction reports when it starts and
- ends, as well as when it issues <command>PREPARE</command>
- command in two-phase commit protocol.
- </para>
- <para>
- Each transaction requests snapshots according to the transaction
- isolation level as done in PostgreSQL. If the transaction
- isolation level is "<emphasis>read committed</emphasis>", then
- transaction will request a snapshot for each statement. If it is
- "<emphasis>serializable</emphasis>" transaction will request a
- snapshot at the beginning of transaction and reuse it thought the
- transaction.
- </para>
- </sect2>
-
- <sect2 id="xc-overview-gtm-proxy">
- <title>Improving GTM Performance</title>
-&xlonly;
- <para>
- Because GTM can be regarded as "serializing" all the transaction
- processing, people may think that GTM can be a performance
- bottleneck.
- </para>
-
- <para>
- In fact, GTM can limit the whole scalability. GTM should not be
- used in very slow network environment such as wide area
- network. GTM architecture is intended to be used with Gigabit
- local network. It is encouraged to install Postgres-XL with a local
- Gigabit network with minimum latency, that is, use as few
- switches involved in the connection among GTM, Coordinator and
- Datanodes.
- In addition, consider putting all components on their own subnet
- if you have multiple network ports in the systems.
- </para>
-
- <sect3>
- <title>Primitive GTM Implementation</title>
-
- <para>
- Primitive GTM implementation can be done as follows:
- </para>
-
- <procedure>
- <step>
- <para>
- The Coordinator backend is provided with a GTM client library to
- obtain GXID and snapshots and to report the transaction status.
- </para>
- </step>
-
- <step>
- <para>
- GTM opens a port to accept connections from each Coordinator and
- Datanode backend. When GTM accepts a connection, it creates a
- thread (GTM Thread) to handle requests to GTM from the connected
- Coordinator backend.
- </para>
- </step>
-
- <step>
- <para>
- GTM Thread receives each request, records it and
- sends <varname>GXID</varname>, <emphasis>snapshot</emphasis>
- and other response to the Coordinator backend.
- </para>
- </step>
-
- <step>
- <para>
- They are repeated until the Coordinator backend requests
- disconnect.
- </para>
- </step>
- </procedure>
-
- </sect3>
-
- <sect3>
- <title>GTM Proxy Implementation</title>
-
- <para>
- Each transaction is issuing
- requests to GTM frequently. We can collect them into single
- block of requests in each Coordinator to reduce the amount of
- interaction by using a <emphasis>GTM-Proxy</emphasis>.
- </para>
-
- <para>
- In this configuration, each Coordinator and Datanode backend
- does not connect to GTM directly. Instead, we have GTM Proxy
- between GTM and Coordinator backend to group multiple requests
- and responses. GTM Proxy, like GTM explained in the previous
- sections, accepts connections from the Coordinator
- backend. However, it does not create new thread. The following
- paragraphs explains how GTM Proxy is initialized and how it
- handles requests from Coordinator backends.
- </para>
-
- <para>
- GTM Proxy, as well as GTM, is initialized as follows:
- </para>
-
- <procedure>
- <step>
- <para>
- GTM starts up normally, but now can accept connections from
- GTM proxies.
- </para>
- </step>
-
- <step>
- <para>
- GTM Proxy starts up. GTM Proxy creates GTM Proxy Threads. Each
- GTM Proxy Thread connects to the GTM in advance. The number of
- GTM Proxy Threads can be specified at the startup. A typical
- number of threads is one or two so it can save the number of
- connections between GTM and Coordinators.
- </para>
- </step>
-
- <step>
- <para>
- GTM Main Thread waits for the request connection from each
- backend.
- </para>
- </step>
-
- </procedure>
-
- <para>
- When each Coordinator backend requests for connection, the Proxy
- Main Thread assigns a GTM Proxy Thread to handle
- request. Therefore, one GTM Proxy Thread handles multiple
- Coordinator backends. If a Coordinator has one hundred
- Coordinator backends and one GTM Proxy Thread, this thread takes
- care of one hundred Coordinator backend.
- </para>
-
- <para>
- Then GTM Proxy Thread scans all the requests from Coordinator
- backend. If Coordinator is busy, it is expected to capture
- more requests in a single scan. Therefore, the proxy can group
- many requests into single block of requests, to reduce the
- number of interaction between GTM and the Coordinator.
- </para>
-
- <para>
- Furthermore, in a single scan, we may have multiple request for
- snapshots. Because these requests can be regarded as received at
- the same time, we can represent multiple snapshots with single
- one. This will reduce the amount of data which GTM provides.
- </para>
-
- </sect3>
- </sect2>
-
- <sect2 id="xc-overview-Coordinator">
- <title>Coordinator</title>
-&xlonly;
- <para>
- Coordinator handles SQL statements from applications and
- determines which Datanode should be involved and generates local
- SQL statements for each Datanode. In the most simplest case, if
- a single Datanode is involved, the Coordinator simply proxies
- incoming statements to the Datanode. In more complicated cases,
- for example, if the target Datanode cannot be determined, then
- the Coordinator generates local statements for each Datanode,
- collects the result to materialize at the Coordinator for further
- handling. In this case, the Coordinator will try to optimize the
- plan by
- <itemizedlist>
- <listitem>
- <para>
- Pushdown <command>WHERE</command> clause to Datanodes,
- </para>
- </listitem>
- <listitem>
- <para>
- Pushdown <emphasis>joins</emphasis> to Datanodes,
- </para>
- </listitem>
- <listitem>
- <para>
- Pushdown <emphasis>projection</emphasis> (column list in <command>SELECT</command> clause),
- </para>
- </listitem>
- <listitem>
- <para>
- Pushdown <command>ORDER BY</command> clause, as well as other clauses.
- </para>
- </listitem>
- </itemizedlist>
-
- If a transaction is involved by more than one Datanodes and/or
- Coordinators, the Coordinator will handle the transaction with
- two-phase commit protocol internally.
- </para>
-
- <para>
- In the case of aggregate
- functions, <productname>Postgres-XL</productname> introduced new
- function collection function between existing transition function
- and finalize function. Collection function runs on the
- Coordinator to collect all the intermediate results from involved
- Datanodes. For details, see <xref linkend="xaggr">
- and <xref linkend="SQL-CREATEAGGREGATE">.
- </para>
-
- <para>
- In the case of reading replicated tables, the Coordinator can choose
- any Datanode to read. The most efficient way is to select one
- running in the same hardware or virtual machine. This is
- called <emphasis>preferred Datanode</emphasis> and can be
- specified by a GUC local to each Coordinator.
- </para>
-
- <para>
- On the other hand, in the case of writing replicated tables, all
- the Coordinators choose the same Datanode to begin with to avoid
- update conflicts. This is called <emphasis>primary
- Datanode</emphasis>.
- </para>
-
- <para>
- Coordinators also take care of DDL statements. Because DDL
- statements handles system catalogs, which are replicated in all
- the Coordinators and Datanodes, they are proxied to all the
- Coordinators and Datanodes. To synchronize the catalog update in
- all the nodes, the Coordinator handles DDL with two-phase commit
- protocol internally.
- </para>
-
- </sect2>
-
- <sect2 id="xc-overview-Datanode">
- <title>Datanode</title>
-&xlonly;
- <para>
- While Coordinators handle cluster-wide SQL statements, Datanodes
- take care of just local issues. In this sense, Datanodes are
- essentially <productname>PostgreSQL</productname> servers except
- that transaction management information is obtained from GTM, as
- well as other global value.
- </para>
-
- </sect2>
-
-
- <sect2 id="xc-overview-pooler">
- <title>Coordinator And Datanode Connection</title>
-
- <para>
- The number of connections between Coordinators and Datanodes may
- increase from time to time. This may leave unused connection and
- waste system resources. Repeating real connect and disconnect
- requires Datanode backend initialization which increases latency
- and also wastes system resources.
- </para>
-
- <para>
- For example, as in the case of GTM, if each Coordinator has one
- hundred connections to applications and we have ten Coordinators,
- after a while, each Coordinator may have connection to each data
- node. It means that each Coordinator backend has ten connections
- to Coordinators and each Coordinator has one thousand (10 x 10)
- connections to Coordinators.
- </para>
-
- <para>
- Because we consume much more resources for locks and other
- control information per backend and only a few of such connection
- is active at a given time, it is not a good idea to hold such
- unused connections between Coordinator and Datanode.
- </para>
-
- <para>
- To improve this, Postgres-XL is equipped with connection pooler
- between Coordinator and Datanode. When a Coordinator backend
- requires connection to a Datanode, the pooler looks for
- appropriate connection from the pool. If there's an available
- one, the pooler assigns it to the Coordinator backend. When the
- connection is no longer needed, the Coordinator backend returns
- the connection to the pooler. The pooler does not disconnect the
- connection. It keeps the connection to the pool for later reuse,
- keeping Datanode backend running.
- </para>
-
- </sect2>
-
- </sect1>
- </chapter>
-<!## end>
-
diff --git a/doc-xc/src/sgml/array.sgmlin b/doc-xc/src/sgml/array.sgmlin
deleted file mode 100644
index 2e7220426e..0000000000
--- a/doc-xc/src/sgml/array.sgmlin
+++ /dev/null
@@ -1,749 +0,0 @@
-<!-- doc/src/sgml/array.sgml -->
-
-<sect1 id="arrays">
- <title>Arrays</title>
-
- <indexterm>
- <primary>array</primary>
- </indexterm>
-&common;
- <para>
-<!## PG>
- <productname>PostgreSQL</productname> allows columns of a table to be
-<!## end>
-<!## XC>
- <productname>Postgres-XC</productname> allows columns of a table to be
-<!## end>
-<!## XL>
- <productname>Postgres-XL</productname> allows columns of a table to be
-<!## end>
- defined as variable-length multidimensional arrays. Arrays of any
- built-in or user-defined base type, enum type, or composite type
- can be created.
- Arrays of domains are not yet supported.
- </para>
-
- <sect2 id="arrays-declaration">
- <title>Declaration of Array Types</title>
-
- <indexterm>
- <primary>array</primary>
- <secondary>declaration</secondary>
- </indexterm>
-&common;
- <para>
- To illustrate the use of array types, we create this table:
-<programlisting>
-CREATE TABLE sal_emp (
- name text,
- pay_by_quarter integer[],
- schedule text[][]
-);
-</programlisting>
- As shown, an array data type is named by appending square brackets
- (<literal>[]</>) to the data type name of the array elements. The
- above command will create a table named
- <structname>sal_emp</structname> with a column of type
- <type>text</type> (<structfield>name</structfield>), a
- one-dimensional array of type <type>integer</type>
- (<structfield>pay_by_quarter</structfield>), which represents the
- employee's salary by quarter, and a two-dimensional array of
- <type>text</type> (<structfield>schedule</structfield>), which
- represents the employee's weekly schedule.
- </para>
-
- <para>
- The syntax for <command>CREATE TABLE</command> allows the exact size of
- arrays to be specified, for example:
-
-<programlisting>
-CREATE TABLE tictactoe (
- squares integer[3][3]
-);
-</programlisting>
-
- However, the current implementation ignores any supplied array size
- limits, i.e., the behavior is the same as for arrays of unspecified
- length.
- </para>
-
- <para>
- The current implementation does not enforce the declared
- number of dimensions either. Arrays of a particular element type are
- all considered to be of the same type, regardless of size or number
- of dimensions. So, declaring the array size or number of dimensions in
- <command>CREATE TABLE</command> is simply documentation; it does not
- affect run-time behavior.
- </para>
-
- <para>
- An alternative syntax, which conforms to the SQL standard by using
- the keyword <literal>ARRAY</>, can be used for one-dimensional arrays.
- <structfield>pay_by_quarter</structfield> could have been defined
- as:
-<programlisting>
- pay_by_quarter integer ARRAY[4],
-</programlisting>
- Or, if no array size is to be specified:
-<programlisting>
- pay_by_quarter integer ARRAY,
-</programlisting>
-<!## PG>
- As before, however, <productname>PostgreSQL</> does not enforce the
-<!## end>
-<!## XC>
- As before, however, <productname>Postgres-XC</> does not enforce the
-<!## end>
-<!## XL>
- As before, however, <productname>Postgres-XL</> does not enforce the
-<!## end>
- size restriction in any case.
- </para>
- </sect2>
-
- <sect2 id="arrays-input">
- <title>Array Value Input</title>
-
- <indexterm>
- <primary>array</primary>
- <secondary>constant</secondary>
- </indexterm>
-&common;
- <para>
- To write an array value as a literal constant, enclose the element
- values within curly braces and separate them by commas. (If you
- know C, this is not unlike the C syntax for initializing
- structures.) You can put double quotes around any element value,
- and must do so if it contains commas or curly braces. (More
- details appear below.) Thus, the general format of an array
- constant is the following:
-<synopsis>
-'{ <replaceable>val1</replaceable> <replaceable>delim</replaceable> <replaceable>val2</replaceable> <replaceable>delim</replaceable> ... }'
-</synopsis>
- where <replaceable>delim</replaceable> is the delimiter character
- for the type, as recorded in its <literal>pg_type</literal> entry.
- Among the standard data types provided in the
-<!## PG>
- <productname>PostgreSQL</productname> distribution, all use a comma
-<!## end>
-<!## XC>
- <productname>Postgres-XC</productname> distribution, all use a comma
-<!## end>
-<!## XL>
- <productname>Postgres-XL</productname> distribution, all use a comma
-<!## end>
- (<literal>,</>), except for type <type>box</> which uses a semicolon
- (<literal>;</>). Each <replaceable>val</replaceable> is
- either a constant of the array element type, or a subarray. An example
- of an array constant is:
-<programlisting>
-'{{1,2,3},{4,5,6},{7,8,9}}'
-</programlisting>
- This constant is a two-dimensional, 3-by-3 array consisting of
- three subarrays of integers.
- </para>
-
- <para>
- To set an element of an array constant to NULL, write <literal>NULL</>
- for the element value. (Any upper- or lower-case variant of
- <literal>NULL</> will do.) If you want an actual string value
- <quote>NULL</>, you must put double quotes around it.
- </para>
-
- <para>
- (These kinds of array constants are actually only a special case of
- the generic type constants discussed in <xref
- linkend="sql-syntax-constants-generic">. The constant is initially
- treated as a string and passed to the array input conversion
- routine. An explicit type specification might be necessary.)
- </para>
-
- <para>
- Now we can show some <command>INSERT</command> statements:
-
-<programlisting>
-INSERT INTO sal_emp
- VALUES ('Bill',
- '{10000, 10000, 10000, 10000}',
- '{{"meeting", "lunch"}, {"training", "presentation"}}');
-
-INSERT INTO sal_emp
- VALUES ('Carol',
- '{20000, 25000, 25000, 25000}',
- '{{"breakfast", "consulting"}, {"meeting", "lunch"}}');
-</programlisting>
- </para>
-
- <para>
- The result of the previous two inserts looks like this:
-
-<programlisting>
-SELECT * FROM sal_emp;
- name | pay_by_quarter | schedule
--------+---------------------------+-------------------------------------------
- Bill | {10000,10000,10000,10000} | {{meeting,lunch},{training,presentation}}
- Carol | {20000,25000,25000,25000} | {{breakfast,consulting},{meeting,lunch}}
-(2 rows)
-</programlisting>
- </para>
-
- <para>
- Multidimensional arrays must have matching extents for each
- dimension. A mismatch causes an error, for example:
-
-<programlisting>
-INSERT INTO sal_emp
- VALUES ('Bill',
- '{10000, 10000, 10000, 10000}',
- '{{"meeting", "lunch"}, {"meeting"}}');
-ERROR: multidimensional arrays must have array expressions with matching dimensions
-</programlisting>
- </para>
-
- <para>
- The <literal>ARRAY</> constructor syntax can also be used:
-<programlisting>
-INSERT INTO sal_emp
- VALUES ('Bill',
- ARRAY[10000, 10000, 10000, 10000],
- ARRAY[['meeting', 'lunch'], ['training', 'presentation']]);
-
-INSERT INTO sal_emp
- VALUES ('Carol',
- ARRAY[20000, 25000, 25000, 25000],
- ARRAY[['breakfast', 'consulting'], ['meeting', 'lunch']]);
-</programlisting>
- Notice that the array elements are ordinary SQL constants or
- expressions; for instance, string literals are single quoted, instead of
- double quoted as they would be in an array literal. The <literal>ARRAY</>
- constructor syntax is discussed in more detail in
- <xref linkend="sql-syntax-array-constructors">.
- </para>
- </sect2>
-
- <sect2 id="arrays-accessing">
- <title>Accessing Arrays</title>
-
- <indexterm>
- <primary>array</primary>
- <secondary>accessing</secondary>
- </indexterm>
-&common;
- <para>
- Now, we can run some queries on the table.
- First, we show how to access a single element of an array.
- This query retrieves the names of the employees whose pay changed in
- the second quarter:
-
-<programlisting>
-SELECT name FROM sal_emp WHERE pay_by_quarter[1] &lt;&gt; pay_by_quarter[2];
-
- name
--------
- Carol
-(1 row)
-</programlisting>
-
- The array subscript numbers are written within square brackets.
-<!## PG>
- By default <productname>PostgreSQL</productname> uses a
-<!## end>
-<!## XC>
- By default <productname>Postgres-XC</productname> uses a
-<!## end>
-<!## XL>
- By default <productname>Postgres-XL</productname> uses a
-<!## end>
- one-based numbering convention for arrays, that is,
- an array of <replaceable>n</> elements starts with <literal>array[1]</literal> and
- ends with <literal>array[<replaceable>n</>]</literal>.
- </para>
-
- <para>
- This query retrieves the third quarter pay of all employees:
-
-<programlisting>
-SELECT pay_by_quarter[3] FROM sal_emp;
-
- pay_by_quarter
-----------------
- 10000
- 25000
-(2 rows)
-</programlisting>
- </para>
-
- <para>
- We can also access arbitrary rectangular slices of an array, or
- subarrays. An array slice is denoted by writing
- <literal><replaceable>lower-bound</replaceable>:<replaceable>upper-bound</replaceable></literal>
- for one or more array dimensions. For example, this query retrieves the first
- item on Bill's schedule for the first two days of the week:
-
-<programlisting>
-SELECT schedule[1:2][1:1] FROM sal_emp WHERE name = 'Bill';
-
- schedule
-------------------------
- {{meeting},{training}}
-(1 row)
-</programlisting>
-
- If any dimension is written as a slice, i.e., contains a colon, then all
- dimensions are treated as slices. Any dimension that has only a single
- number (no colon) is treated as being from 1
- to the number specified. For example, <literal>[2]</> is treated as
- <literal>[1:2]</>, as in this example:
-
-<programlisting>
-SELECT schedule[1:2][2] FROM sal_emp WHERE name = 'Bill';
-
- schedule
--------------------------------------------
- {{meeting,lunch},{training,presentation}}
-(1 row)
-</programlisting>
-
- To avoid confusion with the non-slice case, it's best to use slice syntax
- for all dimensions, e.g., <literal>[1:2][1:1]</>, not <literal>[2][1:1]</>.
- </para>
-
- <para>
- An array subscript expression will return null if either the array itself or
- any of the subscript expressions are null. Also, null is returned if a
- subscript is outside the array bounds (this case does not raise an error).
- For example, if <literal>schedule</>
- currently has the dimensions <literal>[1:3][1:2]</> then referencing
- <literal>schedule[3][3]</> yields NULL. Similarly, an array reference
- with the wrong number of subscripts yields a null rather than an error.
- </para>
-
- <para>
- An array slice expression likewise yields null if the array itself or
- any of the subscript expressions are null. However, in other
- cases such as selecting an array slice that
- is completely outside the current array bounds, a slice expression
- yields an empty (zero-dimensional) array instead of null. (This
- does not match non-slice behavior and is done for historical reasons.)
- If the requested slice partially overlaps the array bounds, then it
- is silently reduced to just the overlapping region instead of
- returning null.
- </para>
-
- <para>
- The current dimensions of any array value can be retrieved with the
- <function>array_dims</function> function:
-
-<programlisting>
-SELECT array_dims(schedule) FROM sal_emp WHERE name = 'Carol';
-
- array_dims
-------------
- [1:2][1:2]
-(1 row)
-</programlisting>
-
- <function>array_dims</function> produces a <type>text</type> result,
- which is convenient for people to read but perhaps inconvenient
- for programs. Dimensions can also be retrieved with
- <function>array_upper</function> and <function>array_lower</function>,
- which return the upper and lower bound of a
- specified array dimension, respectively:
-
-<programlisting>
-SELECT array_upper(schedule, 1) FROM sal_emp WHERE name = 'Carol';
-
- array_upper
--------------
- 2
-(1 row)
-</programlisting>
-
- <function>array_length</function> will return the length of a specified
- array dimension:
-
-<programlisting>
-SELECT array_length(schedule, 1) FROM sal_emp WHERE name = 'Carol';
-
- array_length
---------------
- 2
-(1 row)
-</programlisting>
- </para>
- </sect2>
-
- <sect2 id="arrays-modifying">
- <title>Modifying Arrays</title>
-
- <indexterm>
- <primary>array</primary>
- <secondary>modifying</secondary>
- </indexterm>
-&common;
- <para>
- An array value can be replaced completely:
-
-<programlisting>
-UPDATE sal_emp SET pay_by_quarter = '{25000,25000,27000,27000}'
- WHERE name = 'Carol';
-</programlisting>
-
- or using the <literal>ARRAY</literal> expression syntax:
-
-<programlisting>
-UPDATE sal_emp SET pay_by_quarter = ARRAY[25000,25000,27000,27000]
- WHERE name = 'Carol';
-</programlisting>
-
- An array can also be updated at a single element:
-
-<programlisting>
-UPDATE sal_emp SET pay_by_quarter[4] = 15000
- WHERE name = 'Bill';
-</programlisting>
-
- or updated in a slice:
-
-<programlisting>
-UPDATE sal_emp SET pay_by_quarter[1:2] = '{27000,27000}'
- WHERE name = 'Carol';
-</programlisting>
-
- </para>
-
- <para>
- A stored array value can be enlarged by assigning to elements not already
- present. Any positions between those previously present and the newly
- assigned elements will be filled with nulls. For example, if array
- <literal>myarray</> currently has 4 elements, it will have six
- elements after an update that assigns to <literal>myarray[6]</>;
- <literal>myarray[5]</> will contain null.
- Currently, enlargement in this fashion is only allowed for one-dimensional
- arrays, not multidimensional arrays.
- </para>
-
- <para>
- Subscripted assignment allows creation of arrays that do not use one-based
- subscripts. For example one might assign to <literal>myarray[-2:7]</> to
- create an array with subscript values from -2 to 7.
- </para>
-
- <para>
- New array values can also be constructed using the concatenation operator,
- <literal>||</literal>:
-<programlisting>
-SELECT ARRAY[1,2] || ARRAY[3,4];
- ?column?
------------
- {1,2,3,4}
-(1 row)
-
-SELECT ARRAY[5,6] || ARRAY[[1,2],[3,4]];
- ?column?
----------------------
- {{5,6},{1,2},{3,4}}
-(1 row)
-</programlisting>
- </para>
-
- <para>
- The concatenation operator allows a single element to be pushed onto the
- beginning or end of a one-dimensional array. It also accepts two
- <replaceable>N</>-dimensional arrays, or an <replaceable>N</>-dimensional
- and an <replaceable>N+1</>-dimensional array.
- </para>
-
- <para>
- When a single element is pushed onto either the beginning or end of a
- one-dimensional array, the result is an array with the same lower bound
- subscript as the array operand. For example:
-<programlisting>
-SELECT array_dims(1 || '[0:1]={2,3}'::int[]);
- array_dims
-------------
- [0:2]
-(1 row)
-
-SELECT array_dims(ARRAY[1,2] || 3);
- array_dims
-------------
- [1:3]
-(1 row)
-</programlisting>
- </para>
-
- <para>
- When two arrays with an equal number of dimensions are concatenated, the
- result retains the lower bound subscript of the left-hand operand's outer
- dimension. The result is an array comprising every element of the left-hand
- operand followed by every element of the right-hand operand. For example:
-<programlisting>
-SELECT array_dims(ARRAY[1,2] || ARRAY[3,4,5]);
- array_dims
-------------
- [1:5]
-(1 row)
-
-SELECT array_dims(ARRAY[[1,2],[3,4]] || ARRAY[[5,6],[7,8],[9,0]]);
- array_dims
-------------
- [1:5][1:2]
-(1 row)
-</programlisting>
- </para>
-
- <para>
- When an <replaceable>N</>-dimensional array is pushed onto the beginning
- or end of an <replaceable>N+1</>-dimensional array, the result is
- analogous to the element-array case above. Each <replaceable>N</>-dimensional
- sub-array is essentially an element of the <replaceable>N+1</>-dimensional
- array's outer dimension. For example:
-<programlisting>
-SELECT array_dims(ARRAY[1,2] || ARRAY[[3,4],[5,6]]);
- array_dims
-------------
- [1:3][1:2]
-(1 row)
-</programlisting>
- </para>
-
- <para>
- An array can also be constructed by using the functions
- <function>array_prepend</function>, <function>array_append</function>,
- or <function>array_cat</function>. The first two only support one-dimensional
- arrays, but <function>array_cat</function> supports multidimensional arrays.
-
- Note that the concatenation operator discussed above is preferred over
- direct use of these functions. In fact, these functions primarily exist for use
- in implementing the concatenation operator. However, they might be directly
- useful in the creation of user-defined aggregates. Some examples:
-
-<programlisting>
-SELECT array_prepend(1, ARRAY[2,3]);
- array_prepend
----------------
- {1,2,3}
-(1 row)
-
-SELECT array_append(ARRAY[1,2], 3);
- array_append
---------------
- {1,2,3}
-(1 row)
-
-SELECT array_cat(ARRAY[1,2], ARRAY[3,4]);
- array_cat
------------
- {1,2,3,4}
-(1 row)
-
-SELECT array_cat(ARRAY[[1,2],[3,4]], ARRAY[5,6]);
- array_cat
----------------------
- {{1,2},{3,4},{5,6}}
-(1 row)
-
-SELECT array_cat(ARRAY[5,6], ARRAY[[1,2],[3,4]]);
- array_cat
----------------------
- {{5,6},{1,2},{3,4}}
-</programlisting>
- </para>
- </sect2>
-
- <sect2 id="arrays-searching">
- <title>Searching in Arrays</title>
-
- <indexterm>
- <primary>array</primary>
- <secondary>searching</secondary>
- </indexterm>
-&common;
- <para>
- To search for a value in an array, each value must be checked.
- This can be done manually, if you know the size of the array.
- For example:
-
-<programlisting>
-SELECT * FROM sal_emp WHERE pay_by_quarter[1] = 10000 OR
- pay_by_quarter[2] = 10000 OR
- pay_by_quarter[3] = 10000 OR
- pay_by_quarter[4] = 10000;
-</programlisting>
-
- However, this quickly becomes tedious for large arrays, and is not
- helpful if the size of the array is unknown. An alternative method is
- described in <xref linkend="functions-comparisons">. The above
- query could be replaced by:
-
-<programlisting>
-SELECT * FROM sal_emp WHERE 10000 = ANY (pay_by_quarter);
-</programlisting>
-
- In addition, you can find rows where the array has all values
- equal to 10000 with:
-
-<programlisting>
-SELECT * FROM sal_emp WHERE 10000 = ALL (pay_by_quarter);
-</programlisting>
-
- </para>
-
- <para>
- Alternatively, the <function>generate_subscripts</> function can be used.
- For example:
-
-<programlisting>
-SELECT * FROM
- (SELECT pay_by_quarter,
- generate_subscripts(pay_by_quarter, 1) AS s
- FROM sal_emp) AS foo
- WHERE pay_by_quarter[s] = 10000;
-</programlisting>
-
- This function is described in <xref linkend="functions-srf-subscripts">.
- </para>
-
- <tip>
- <para>
- Arrays are not sets; searching for specific array elements
- can be a sign of database misdesign. Consider
- using a separate table with a row for each item that would be an
- array element. This will be easier to search, and is likely to
- scale better for a large number of elements.
- </para>
- </tip>
- </sect2>
-
- <sect2 id="arrays-io">
- <title>Array Input and Output Syntax</title>
-
- <indexterm>
- <primary>array</primary>
- <secondary>I/O</secondary>
- </indexterm>
-&common;
- <para>
- The external text representation of an array value consists of items that
- are interpreted according to the I/O conversion rules for the array's
- element type, plus decoration that indicates the array structure.
- The decoration consists of curly braces (<literal>{</> and <literal>}</>)
- around the array value plus delimiter characters between adjacent items.
- The delimiter character is usually a comma (<literal>,</>) but can be
- something else: it is determined by the <literal>typdelim</> setting
- for the array's element type. Among the standard data types provided
-<!## PG>
- in the <productname>PostgreSQL</productname> distribution, all use a comma,
-<!## end>
-<!## XC>
- in the <productname>Postgres-XC</productname> distribution, all use a comma,
-<!## end>
-<!## XL>
- in the <productname>Postgres-XL</productname> distribution, all use a comma,
-<!## end>
- except for type <type>box</>, which uses a semicolon (<literal>;</>).
- In a multidimensional array, each dimension (row, plane,
- cube, etc.) gets its own level of curly braces, and delimiters
- must be written between adjacent curly-braced entities of the same level.
- </para>
-
- <para>
- The array output routine will put double quotes around element values
- if they are empty strings, contain curly braces, delimiter characters,
- double quotes, backslashes, or white space, or match the word
- <literal>NULL</>. Double quotes and backslashes
- embedded in element values will be backslash-escaped. For numeric
- data types it is safe to assume that double quotes will never appear, but
- for textual data types one should be prepared to cope with either the presence
- or absence of quotes.
- </para>
-
- <para>
- By default, the lower bound index value of an array's dimensions is
- set to one. To represent arrays with other lower bounds, the array
- subscript ranges can be specified explicitly before writing the
- array contents.
- This decoration consists of square brackets (<literal>[]</>)
- around each array dimension's lower and upper bounds, with
- a colon (<literal>:</>) delimiter character in between. The
- array dimension decoration is followed by an equal sign (<literal>=</>).
- For example:
-<programlisting>
-SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2
- FROM (SELECT '[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}'::int[] AS f1) AS ss;
-
- e1 | e2
-----+----
- 1 | 6
-(1 row)
-</programlisting>
- The array output routine will include explicit dimensions in its result
- only when there are one or more lower bounds different from one.
- </para>
-
- <para>
- If the value written for an element is <literal>NULL</> (in any case
- variant), the element is taken to be NULL. The presence of any quotes
- or backslashes disables this and allows the literal string value
- <quote>NULL</> to be entered. Also, for backward compatibility with
- pre-8.2 versions of <productname>PostgreSQL</>, the <xref
- linkend="guc-array-nulls"> configuration parameter can be turned
- <literal>off</> to suppress recognition of <literal>NULL</> as a NULL.
- </para>
-
- <para>
- As shown previously, when writing an array value you can use double
- quotes around any individual array element. You <emphasis>must</> do so
- if the element value would otherwise confuse the array-value parser.
- For example, elements containing curly braces, commas (or the data type's
- delimiter character), double quotes, backslashes, or leading or trailing
- whitespace must be double-quoted. Empty strings and strings matching the
- word <literal>NULL</> must be quoted, too. To put a double quote or
- backslash in a quoted array element value, use escape string syntax
- and precede it with a backslash. Alternatively, you can avoid quotes and use
- backslash-escaping to protect all data characters that would otherwise
- be taken as array syntax.
- </para>
-
- <para>
- You can add whitespace before a left brace or after a right
- brace. You can also add whitespace before or after any individual item
- string. In all of these cases the whitespace will be ignored. However,
- whitespace within double-quoted elements, or surrounded on both sides by
- non-whitespace characters of an element, is not ignored.
- </para>
-
- <note>
- <para>
- Remember that what you write in an SQL command will first be interpreted
- as a string literal, and then as an array. This doubles the number of
- backslashes you need. For example, to insert a <type>text</> array
- value containing a backslash and a double quote, you'd need to write:
-<programlisting>
-INSERT ... VALUES (E'{"\\\\","\\""}');
-</programlisting>
- The escape string processor removes one level of backslashes, so that
- what arrives at the array-value parser looks like <literal>{"\\","\""}</>.
- In turn, the strings fed to the <type>text</> data type's input routine
- become <literal>\</> and <literal>"</> respectively. (If we were working
- with a data type whose input routine also treated backslashes specially,
- <type>bytea</> for example, we might need as many as eight backslashes
- in the command to get one backslash into the stored array element.)
- Dollar quoting (see <xref linkend="sql-syntax-dollar-quoting">) can be
- used to avoid the need to double backslashes.
- </para>
- </note>
-
- <tip>
- <para>
- The <literal>ARRAY</> constructor syntax (see
- <xref linkend="sql-syntax-array-constructors">) is often easier to work
- with than the array-literal syntax when writing array values in SQL
- commands. In <literal>ARRAY</>, individual element values are written the
- same way they would be written when not members of an array.
- </para>
- </tip>
- </sect2>
-
-</sect1>
diff --git a/doc-xc/src/sgml/auth-delay.sgmlin b/doc-xc/src/sgml/auth-delay.sgmlin
deleted file mode 100644
index b91a7ecda7..0000000000
--- a/doc-xc/src/sgml/auth-delay.sgmlin
+++ /dev/null
@@ -1,67 +0,0 @@
-<!-- doc/src/sgml/auth-delay.sgml -->
-
-<sect1 id="auth-delay" xreflabel="auth_delay">
- <title>auth_delay</title>
-
- <indexterm zone="auth-delay">
- <primary>auth_delay</primary>
- </indexterm>
-
- <para>
- <filename>auth_delay</filename> causes the server to pause briefly before
- reporting authentication failure, to make brute-force attacks on database
- passwords more difficult. Note that it does nothing to prevent
- denial-of-service attacks, and may even exacerbate them, since processes
- that are waiting before reporting authentication failure will still consume
- connection slots.
- </para>
-
- <para>
- In order to function, this module must be loaded via
- <xref linkend="guc-shared-preload-libraries"> in <filename>postgresql.conf</>.
- </para>
-
- <sect2>
- <title>Configuration Parameters</title>
-
- <variablelist>
- <varlistentry>
- <term>
- <varname>auth_delay.milliseconds</varname> (<type>int</type>)
- </term>
- <indexterm>
- <primary><varname>auth_delay.milliseconds</> configuration parameter</primary>
- </indexterm>
- <listitem>
- <para>
- The number of milliseconds to wait before reporting an authentication
- failure. The default is 0.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
-
- <para>
- In order to set these parameters in your <filename>postgresql.conf</> file,
- you will need to add <literal>auth_delay</> to
- <xref linkend="guc-custom-variable-classes">. Typical usage might be:
- </para>
-
-<programlisting>
-# postgresql.conf
-shared_preload_libraries = 'auth_delay'
-
-custom_variable_classes = 'auth_delay'
-auth_delay.milliseconds = '500'
-</programlisting>
- </sect2>
-
- <sect2>
- <title>Author</title>
-
- <para>
- KaiGai Kohei <email>[email protected]</email>
- </para>
- </sect2>
-
-</sect1>
diff --git a/doc-xc/src/sgml/auto-explain.sgmlin b/doc-xc/src/sgml/auto-explain.sgmlin
deleted file mode 100644
index 0188053118..0000000000
--- a/doc-xc/src/sgml/auto-explain.sgmlin
+++ /dev/null
@@ -1,239 +0,0 @@
-<!-- doc/src/sgml/auto-explain.sgml -->
-
-<sect1 id="auto-explain" xreflabel="auto_explain">
- <title>auto_explain</title>
-
- <indexterm zone="auto-explain">
- <primary>auto_explain</primary>
- </indexterm>
-
-&common;
-
- <para>
- The <filename>auto_explain</filename> module provides a means for
- logging execution plans of slow statements automatically, without
- having to run <xref linkend="sql-explain">
- by hand. This is especially helpful for tracking down un-optimized queries
- in large applications.
- </para>
-
- <para>
- The module provides no SQL-accessible functions. To use it, simply
- load it into the server. You can load it into an individual session:
-
-<programlisting>
-LOAD 'auto_explain';
-</programlisting>
-
- (You must be superuser to do that.) More typical usage is to preload
- it into all sessions by including <literal>auto_explain</> in
- <xref linkend="guc-shared-preload-libraries"> in
- <filename>postgresql.conf</>. Then you can track unexpectedly slow queries
- no matter when they happen. Of course there is a price in overhead for
- that.
- </para>
-
-<!## XC>
-&xconly;
- <para>
- To log plans on Datanodes, you must preload this module in each
- Datanode. This module will log local plans of each node. For
- example, Coordinator log will include the plan for Coordinator only.
- Corresponding plan in Datanodes will be found in each Datanode's
- log.
- </para>
-<!## end>
-<!## XL>
- <para>
- To log plans on Datanodes, you must preload this module in each
- Datanode. This module will log local plans of each node. For
- example, a Coordinator log will include the plan for Coordinator only.
- the Corresponding plan in Datanodes will be found in each Datanode's
- log.
- </para>
-<!## end>
-
-
- <sect2>
- <title>Configuration Parameters</title>
-
-&common;
- <para>
- There are several configuration parameters that control the behavior of
- <filename>auto_explain</filename>. Note that the default behavior is
- to do nothing, so you must set at least
- <varname>auto_explain.log_min_duration</varname> if you want any results.
- </para>
-
- <variablelist>
- <varlistentry>
- <term>
- <varname>auto_explain.log_min_duration</varname> (<type>integer</type>)
- </term>
- <indexterm>
- <primary><varname>auto_explain.log_min_duration</> configuration parameter</primary>
- </indexterm>
- <listitem>
- <para>
- <varname>auto_explain.log_min_duration</varname> is the minimum statement
- execution time, in milliseconds, that will cause the statement's plan to
- be logged. Setting this to zero logs all plans. Minus-one (the default)
- disables logging of plans. For example, if you set it to
- <literal>250ms</literal> then all statements that run 250ms or longer
- will be logged. Only superusers can change this setting.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <varname>auto_explain.log_analyze</varname> (<type>boolean</type>)
- </term>
- <indexterm>
- <primary><varname>auto_explain.log_analyze</> configuration parameter</primary>
- </indexterm>
- <listitem>
- <para>
- <varname>auto_explain.log_analyze</varname> causes <command>EXPLAIN ANALYZE</>
- output, rather than just <command>EXPLAIN</> output, to be printed
- when an execution plan is logged. This parameter is off by default.
- Only superusers can change this setting.
- </para>
- <note>
- <para>
- When this parameter is on, per-plan-node timing occurs for all
- statements executed, whether or not they run long enough to actually
- get logged. This can have extremely negative impact on performance.
- </para>
- </note>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <varname>auto_explain.log_verbose</varname> (<type>boolean</type>)
- </term>
- <indexterm>
- <primary><varname>auto_explain.log_verbose</> configuration parameter</primary>
- </indexterm>
- <listitem>
- <para>
- <varname>auto_explain.log_verbose</varname> causes <command>EXPLAIN VERBOSE</>
- output, rather than just <command>EXPLAIN</> output, to be printed
- when an execution plan is logged. This parameter is off by default.
- Only superusers can change this setting.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <varname>auto_explain.log_buffers</varname> (<type>boolean</type>)
- </term>
- <indexterm>
- <primary><varname>auto_explain.log_buffers</> configuration parameter</primary>
- </indexterm>
- <listitem>
- <para>
- <varname>auto_explain.log_buffers</varname> causes <command>EXPLAIN
- (ANALYZE, BUFFERS)</> output, rather than just <command>EXPLAIN</>
- output, to be printed when an execution plan is logged. This parameter is
- off by default. Only superusers can change this setting. This
- parameter has no effect unless <varname>auto_explain.log_analyze</>
- parameter is set.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <varname>auto_explain.log_format</varname> (<type>enum</type>)
- </term>
- <indexterm>
- <primary><varname>auto_explain.log_format</> configuration parameter</primary>
- </indexterm>
- <listitem>
- <para>
- <varname>auto_explain.log_format</varname> selects the
- <command>EXPLAIN</> output format to be used.
- The allowed values are <literal>text</literal>, <literal>xml</literal>,
- <literal>json</literal>, and <literal>yaml</literal>. The default is text.
- Only superusers can change this setting.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <varname>auto_explain.log_nested_statements</varname> (<type>boolean</type>)
- </term>
- <indexterm>
- <primary><varname>auto_explain.log_nested_statements</> configuration parameter</primary>
- </indexterm>
- <listitem>
- <para>
- <varname>auto_explain.log_nested_statements</varname> causes nested
- statements (statements executed inside a function) to be considered
- for logging. When it is off, only top-level query plans are logged. This
- parameter is off by default. Only superusers can change this setting.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
-
- <para>
- In order to set these parameters in your <filename>postgresql.conf</> file,
- you will need to add <literal>auto_explain</> to
- <xref linkend="guc-custom-variable-classes">. Typical usage might be:
- </para>
-
-<programlisting>
-# postgresql.conf
-shared_preload_libraries = 'auto_explain'
-
-custom_variable_classes = 'auto_explain'
-auto_explain.log_min_duration = '3s'
-</programlisting>
- </sect2>
-
- <sect2>
- <title>Example</title>
-
-<programlisting>
-postgres=# LOAD 'auto_explain';
-postgres=# SET auto_explain.log_min_duration = 0;
-postgres=# SELECT count(*)
- FROM pg_class, pg_index
- WHERE oid = indrelid AND indisunique;
-</programlisting>
-
-&common;
- <para>
- This might produce log output such as:
- </para>
-
-<screen><![CDATA[
-LOG: duration: 3.651 ms plan:
- Query Text: SELECT count(*)
- FROM pg_class, pg_index
- WHERE oid = indrelid AND indisunique;
- Aggregate (cost=16.79..16.80 rows=1 width=0) (actual time=3.626..3.627 rows=1 loops=1)
- -> Hash Join (cost=4.17..16.55 rows=92 width=0) (actual time=3.349..3.594 rows=92 loops=1)
- Hash Cond: (pg_class.oid = pg_index.indrelid)
- -> Seq Scan on pg_class (cost=0.00..9.55 rows=255 width=4) (actual time=0.016..0.140 rows=255 loops=1)
- -> Hash (cost=3.02..3.02 rows=92 width=4) (actual time=3.238..3.238 rows=92 loops=1)
- Buckets: 1024 Batches: 1 Memory Usage: 4kB
- -> Seq Scan on pg_index (cost=0.00..3.02 rows=92 width=4) (actual time=0.008..3.187 rows=92 loops=1)
- Filter: indisunique
-]]></screen>
- </sect2>
-
- <sect2>
- <title>Author</title>
-
- <para>
- Takahiro Itagaki <email>[email protected]</email>
- </para>
- </sect2>
-
-</sect1>
diff --git a/doc-xc/src/sgml/backup.sgmlin b/doc-xc/src/sgml/backup.sgmlin
deleted file mode 100644
index 30950986ee..0000000000
--- a/doc-xc/src/sgml/backup.sgmlin
+++ /dev/null
@@ -1,1487 +0,0 @@
-<!-- doc/src/sgml/backup.sgml -->
-
-<chapter id="backup">
- <title>Backup and Restore</title>
-
- <indexterm zone="backup"><primary>backup</></>
-
-&common;
- <para>
- As with everything that contains valuable data, <productname>PostgreSQL</>
- databases should be backed up regularly. While the procedure is
- essentially simple, it is important to have a clear understanding of
- the underlying techniques and assumptions.
- </para>
-
- <para>
- There are three fundamentally different approaches to backing up
- <productname>PostgreSQL</> data:
- <itemizedlist>
- <listitem><para><acronym>SQL</> dump</para></listitem>
- <listitem><para>File system level backup</para></listitem>
- <listitem><para>Continuous archiving</para></listitem>
- </itemizedlist>
- Each has its own strengths and weaknesses; each is discussed in turn
- in the following sections.
- </para>
-
- <sect1 id="backup-dump">
- <title><acronym>SQL</> Dump</title>
-
-&common;
- <para>
- The idea behind this dump method is to generate a text file with SQL
- commands that, when fed back to the server, will recreate the
- database in the same state as it was at the time of the dump.
- <productname>PostgreSQL</> provides the utility program
- <xref linkend="app-pgdump"> for this purpose. The basic usage of this
- command is:
-<synopsis>
-pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable class="parameter">outfile</replaceable>
-</synopsis>
- As you see, <application>pg_dump</> writes its result to the
- standard output. We will see below how this can be useful.
- </para>
-
- <para>
- <application>pg_dump</> is a regular <productname>PostgreSQL</>
- client application (albeit a particularly clever one). This means
- that you can perform this backup procedure from any remote host that has
- access to the database. But remember that <application>pg_dump</>
- does not operate with special permissions. In particular, it must
- have read access to all tables that you want to back up, so in
- practice you almost always have to run it as a database superuser.
- </para>
-
- <para>
- To specify which database server <application>pg_dump</> should
- contact, use the command line options <option>-h
- <replaceable>host</></> and <option>-p <replaceable>port</></>. The
- default host is the local host or whatever your
- <envar>PGHOST</envar> environment variable specifies. Similarly,
- the default port is indicated by the <envar>PGPORT</envar>
- environment variable or, failing that, by the compiled-in default.
- (Conveniently, the server will normally have the same compiled-in
- default.)
- </para>
-
- <para>
- Like any other <productname>PostgreSQL</> client application,
- <application>pg_dump</> will by default connect with the database
- user name that is equal to the current operating system user name. To override
- this, either specify the <option>-U</option> option or set the
- environment variable <envar>PGUSER</envar>. Remember that
- <application>pg_dump</> connections are subject to the normal
- client authentication mechanisms (which are described in <xref
- linkend="client-authentication">).
- </para>
-
- <para>
- An important advantage of <application>pg_dump</> over the other backup
- methods described later is that <application>pg_dump</>'s output can
- generally be re-loaded into newer versions of <productname>PostgreSQL</>,
- whereas file-level backups and continuous archiving are both extremely
- server-version-specific. <application>pg_dump</> is also the only method
- that will work when transferring a database to a different machine
- architecture, such as going from a 32-bit to a 64-bit server.
- </para>
-
- <para>
- Dumps created by <application>pg_dump</> are internally consistent,
- meaning, the dump represents a snapshot of the database at the time
- <application>pg_dump</> began running. <application>pg_dump</> does not
- block other operations on the database while it is working.
- (Exceptions are those operations that need to operate with an
- exclusive lock, such as most forms of <command>ALTER TABLE</command>.)
- </para>
-
- <important>
- <para>
- If your database schema relies on OIDs (for instance, as foreign
- keys) you must instruct <application>pg_dump</> to dump the OIDs
- as well. To do this, use the <option>-o</option> command-line
- option.
- </para>
- </important>
-
-<!## XC>
-&xconly;
- <important>
- <para>
- In <productname>Postgres-XC</>, <application>pg_dump</>
- and <application>pg_dumpall</> backs up all the information stored
- both in Coordinators and Datanodes.
- </para>
- </important>
-<!## end>
-<!## XL>
-&xlonly;
- <important>
- <para>
- In <productname>Postgres-XL</>, <application>pg_dump</>
- and <application>pg_dumpall</> backs up all the information stored
- both in Coordinators and Datanodes.
- That is, it dumps the entire database just like in regular PostgreSQL
- and outputs similarly. The output will contain additional table
- distribution information.
- </para>
- </important>
-<!## end>
-
-
- <sect2 id="backup-dump-restore">
- <title>Restoring the Dump</title>
-
-&common;
- <para>
- The text files created by <application>pg_dump</> are intended to
- be read in by the <application>psql</application> program. The
- general command form to restore a dump is
-<synopsis>
-psql <replaceable class="parameter">dbname</replaceable> &lt; <replaceable class="parameter">infile</replaceable>
-</synopsis>
- where <replaceable class="parameter">infile</replaceable> is the
- file output by the <application>pg_dump</> command. The database <replaceable
- class="parameter">dbname</replaceable> will not be created by this
- command, so you must create it yourself from <literal>template0</>
- before executing <application>psql</> (e.g., with
- <literal>createdb -T template0 <replaceable
- class="parameter">dbname</></literal>). <application>psql</>
- supports options similar to <application>pg_dump</> for specifying
- the database server to connect to and the user name to use. See
- the <xref linkend="app-psql"> reference page for more information.
- </para>
-
- <para>
- Before restoring an SQL dump, all the users who own objects or were
- granted permissions on objects in the dumped database must already
- exist. If they do not, the restore will fail to recreate the
- objects with the original ownership and/or permissions.
- (Sometimes this is what you want, but usually it is not.)
- </para>
-
- <para>
- By default, the <application>psql</> script will continue to
- execute after an SQL error is encountered. You might wish to run
- <application>psql</application> with
- the <literal>ON_ERROR_STOP</> variable set to alter that
- behavior and have <application>psql</application> exit with an
- exit status of 3 if an SQL error occurs:
-<programlisting>
-psql --set ON_ERROR_STOP=on dbname &lt; infile
-</programlisting>
- Either way, you will only have a partially restored database.
- Alternatively, you can specify that the whole dump should be
- restored as a single transaction, so the restore is either fully
- completed or fully rolled back. This mode can be specified by
- passing the <option>-1</> or <option>--single-transaction</>
- command-line options to <application>psql</>. When using this
- mode, be aware that even a minor error can rollback a
- restore that has already run for many hours. However, that might
- still be preferable to manually cleaning up a complex database
- after a partially restored dump.
- </para>
-
- <para>
- The ability of <application>pg_dump</> and <application>psql</> to
- write to or read from pipes makes it possible to dump a database
- directly from one server to another, for example:
-<programlisting>
-pg_dump -h <replaceable>host1</> <replaceable>dbname</> | psql -h <replaceable>host2</> <replaceable>dbname</>
-</programlisting>
- </para>
-
- <important>
- <para>
- The dumps produced by <application>pg_dump</> are relative to
- <literal>template0</>. This means that any languages, procedures,
- etc. added via <literal>template1</> will also be dumped by
- <application>pg_dump</>. As a result, when restoring, if you are
- using a customized <literal>template1</>, you must create the
- empty database from <literal>template0</>, as in the example
- above.
- </para>
- </important>
-
- <para>
- After restoring a backup, it is wise to run <xref
- linkend="sql-analyze"> on each
- database so the query optimizer has useful statistics;
- see <xref linkend="vacuum-for-statistics">
- and <xref linkend="autovacuum"> for more information.
- For more advice on how to load large amounts of data
- into <productname>PostgreSQL</> efficiently, refer to <xref
- linkend="populate">.
- </para>
- </sect2>
-
- <sect2 id="backup-dump-all">
- <title>Using <application>pg_dumpall</></title>
-
-&common;
- <para>
- <application>pg_dump</> dumps only a single database at a time,
- and it does not dump information about roles or tablespaces
- (because those are cluster-wide rather than per-database).
- To support convenient dumping of the entire contents of a database
- cluster, the <xref linkend="app-pg-dumpall"> program is provided.
- <application>pg_dumpall</> backs up each database in a given
- cluster, and also preserves cluster-wide data such as role and
- tablespace definitions. The basic usage of this command is:
-<synopsis>
-pg_dumpall &gt; <replaceable>outfile</>
-</synopsis>
- The resulting dump can be restored with <application>psql</>:
-<synopsis>
-psql -f <replaceable class="parameter">infile</replaceable> postgres
-</synopsis>
- (Actually, you can specify any existing database name to start from,
- but if you are loading into an empty cluster then <literal>postgres</>
- should usually be used.) It is always necessary to have
- database superuser access when restoring a <application>pg_dumpall</>
- dump, as that is required to restore the role and tablespace information.
- If you use tablespaces, make sure that the tablespace paths in the
- dump are appropriate for the new installation.
- </para>
-
- <para>
- <application>pg_dumpall</> works by emitting commands to re-create
- roles, tablespaces, and empty databases, then invoking
- <application>pg_dump</> for each database. This means that while
- each database will be internally consistent, the snapshots of
- different databases might not be exactly in-sync.
- </para>
- </sect2>
-
- <sect2 id="backup-dump-large">
- <title>Handling Large Databases</title>
-
-&common;
- <para>
- Some operating systems have maximum file size limits that cause
- problems when creating large <application>pg_dump</> output files.
- Fortunately, <application>pg_dump</> can write to the standard
- output, so you can use standard Unix tools to work around this
- potential problem. There are several possible methods:
- </para>
-
- <formalpara>
- <title>Use compressed dumps.</title>
- <para>
- You can use your favorite compression program, for example
- <application>gzip</application>:
-
-<programlisting>
-pg_dump <replaceable class="parameter">dbname</replaceable> | gzip &gt; <replaceable class="parameter">filename</replaceable>.gz
-</programlisting>
-
- Reload with:
-
-<programlisting>
-gunzip -c <replaceable class="parameter">filename</replaceable>.gz | psql <replaceable class="parameter">dbname</replaceable>
-</programlisting>
-
- or:
-
-<programlisting>
-cat <replaceable class="parameter">filename</replaceable>.gz | gunzip | psql <replaceable class="parameter">dbname</replaceable>
-</programlisting>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Use <command>split</>.</title>
- <para>
- The <command>split</command> command
- allows you to split the output into smaller files that are
- acceptable in size to the underlying file system. For example, to
- make chunks of 1 megabyte:
-
-<programlisting>
-pg_dump <replaceable class="parameter">dbname</replaceable> | split -b 1m - <replaceable class="parameter">filename</replaceable>
-</programlisting>
-
- Reload with:
-
-<programlisting>
-cat <replaceable class="parameter">filename</replaceable>* | psql <replaceable class="parameter">dbname</replaceable>
-</programlisting>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Use <application>pg_dump</>'s custom dump format.</title>
- <para>
- If <productname>PostgreSQL</productname> was built on a system with the
- <application>zlib</> compression library installed, the custom dump
- format will compress data as it writes it to the output file. This will
- produce dump file sizes similar to using <command>gzip</command>, but it
- has the added advantage that tables can be restored selectively. The
- following command dumps a database using the custom dump format:
-
-<programlisting>
-pg_dump -Fc <replaceable class="parameter">dbname</replaceable> &gt; <replaceable class="parameter">filename</replaceable>
-</programlisting>
-
- A custom-format dump is not a script for <application>psql</>, but
- instead must be restored with <application>pg_restore</>, for example:
-
-<programlisting>
-pg_restore -d <replaceable class="parameter">dbname</replaceable> <replaceable class="parameter">filename</replaceable>
-</programlisting>
-
- See the <xref linkend="app-pgdump"> and <xref
- linkend="app-pgrestore"> reference pages for details.
- </para>
- </formalpara>
-
- <para>
- For very large databases, you might need to combine <command>split</>
- with one of the other two approaches.
- </para>
-
- </sect2>
- </sect1>
-
- <sect1 id="backup-file">
- <title>File System Level Backup</title>
-
-<!## XC>
-&xconly;
- <para>
- File system level backup covers only each Coordinator or Datanode.
- To make file system level backup, you should backup each
- Coordinator and Datanode manually.
- </para>
-<!## end>
-<!## XL>
-&xlonly;
- <para>
- File system level backup covers only each Coordinator or Datanode.
- To make file system level backup, you should backup each
- Coordinator and Datanode manually.
- </para>
-<!## end>
-
-&common;
- <para>
- An alternative backup strategy is to directly copy the files that
- <productname>PostgreSQL</> uses to store the data in the database;
- <xref linkend="creating-cluster"> explains where these files
- are located. You can use whatever method you prefer
- for doing file system backups; for example:
-
-<programlisting>
-tar -cf backup.tar /usr/local/pgsql/data
-</programlisting>
- </para>
-
- <para>
- There are two restrictions, however, which make this method
- impractical, or at least inferior to the <application>pg_dump</>
- method:
-
- <orderedlist>
- <listitem>
- <para>
- The database server <emphasis>must</> be shut down in order to
- get a usable backup. Half-way measures such as disallowing all
- connections will <emphasis>not</emphasis> work
- (in part because <command>tar</command> and similar tools do not take
- an atomic snapshot of the state of the file system,
- but also because of internal buffering within the server).
- Information about stopping the server can be found in
- <xref linkend="server-shutdown">. Needless to say, you
- also need to shut down the server before restoring the data.
- </para>
- </listitem>
-
- <listitem>
- <para>
- If you have dug into the details of the file system layout of the
- database, you might be tempted to try to back up or restore only certain
- individual tables or databases from their respective files or
- directories. This will <emphasis>not</> work because the
- information contained in these files is not usable without
- the commit log files,
- <filename>pg_clog/*</filename>, which contain the commit status of
- all transactions. A table file is only usable with this
- information. Of course it is also impossible to restore only a
- table and the associated <filename>pg_clog</filename> data
- because that would render all other tables in the database
- cluster useless. So file system backups only work for complete
- backup and restoration of an entire database cluster.
- </para>
- </listitem>
- </orderedlist>
- </para>
-
- <para>
- An alternative file-system backup approach is to make a
- <quote>consistent snapshot</quote> of the data directory, if the
- file system supports that functionality (and you are willing to
- trust that it is implemented correctly). The typical procedure is
- to make a <quote>frozen snapshot</> of the volume containing the
- database, then copy the whole data directory (not just parts, see
- above) from the snapshot to a backup device, then release the frozen
- snapshot. This will work even while the database server is running.
- However, a backup created in this way saves
- the database files in a state as if the database server was not
- properly shut down; therefore, when you start the database server
- on the backed-up data, it will think the previous server instance
- crashed and will replay the WAL log. This is not a problem; just
- be aware of it (and be sure to include the WAL files in your backup).
- You can perform a <command>CHECKPOINT</command> before taking the
- snapshot to reduce recovery time.
- </para>
-
- <para>
- If your database is spread across multiple file systems, there might not
- be any way to obtain exactly-simultaneous frozen snapshots of all
- the volumes. For example, if your data files and WAL log are on different
- disks, or if tablespaces are on different file systems, it might
- not be possible to use snapshot backup because the snapshots
- <emphasis>must</> be simultaneous.
- Read your file system documentation very carefully before trusting
- the consistent-snapshot technique in such situations.
- </para>
-
- <para>
- If simultaneous snapshots are not possible, one option is to shut down
- the database server long enough to establish all the frozen snapshots.
- Another option is to perform a continuous archiving base backup (<xref
- linkend="backup-base-backup">) because such backups are immune to file
- system changes during the backup. This requires enabling continuous
- archiving just during the backup process; restore is done using
- continuous archive recovery (<xref linkend="backup-pitr-recovery">).
- </para>
-
- <para>
- Another option is to use <application>rsync</> to perform a file
- system backup. This is done by first running <application>rsync</>
- while the database server is running, then shutting down the database
- server just long enough to do a second <application>rsync</>. The
- second <application>rsync</> will be much quicker than the first,
- because it has relatively little data to transfer, and the end result
- will be consistent because the server was down. This method
- allows a file system backup to be performed with minimal downtime.
- </para>
-
- <para>
- Note that a file system backup will typically be larger
- than an SQL dump. (<application>pg_dump</application> does not need to dump
- the contents of indexes for example, just the commands to recreate
- them.) However, taking a file system backup might be faster.
- </para>
- </sect1>
-
- <sect1 id="continuous-archiving">
- <title>Continuous Archiving and Point-in-Time Recovery (PITR)</title>
-
- <indexterm zone="backup">
- <primary>continuous archiving</primary>
- </indexterm>
-
- <indexterm zone="backup">
- <primary>point-in-time recovery</primary>
- </indexterm>
-
- <indexterm zone="backup">
- <primary>PITR</primary>
- </indexterm>
-
-<!## XC>
-&xconly;
- <para>
- This section describes PITR for <productname>PostgreSQL</>.
- Because Coordinator and Datanode of <productname>Postgres-XC</> are
- essentially <productname>PostgreSQL</productname>server, you can do
- PITR for each Coordinator and Datanode manually.
- </para>
-<!## end>
-
- <para>
- At all times, <productname>PostgreSQL</> maintains a
- <firstterm>write ahead log</> (WAL) in the <filename>pg_xlog/</>
- subdirectory of the cluster's data directory. The log records
- every change made to the database's data files. This log exists
- primarily for crash-safety purposes: if the system crashes, the
- database can be restored to consistency by <quote>replaying</> the
- log entries made since the last checkpoint. However, the existence
- of the log makes it possible to use a third strategy for backing up
- databases: we can combine a file-system-level backup with backup of
- the WAL files. If recovery is needed, we restore the file system backup and
- then replay from the backed-up WAL files to bring the system to a
- current state. This approach is more complex to administer than
- either of the previous approaches, but it has some significant
- benefits:
- <itemizedlist>
- <listitem>
- <para>
- We do not need a perfectly consistent file system backup as the starting point.
- Any internal inconsistency in the backup will be corrected by log
- replay (this is not significantly different from what happens during
- crash recovery). So we do not need a file system snapshot capability,
- just <application>tar</> or a similar archiving tool.
- </para>
- </listitem>
- <listitem>
- <para>
- Since we can combine an indefinitely long sequence of WAL files
- for replay, continuous backup can be achieved simply by continuing to archive
- the WAL files. This is particularly valuable for large databases, where
- it might not be convenient to take a full backup frequently.
- </para>
- </listitem>
- <listitem>
- <para>
- It is not necessary to replay the WAL entries all the
- way to the end. We could stop the replay at any point and have a
- consistent snapshot of the database as it was at that time. Thus,
- this technique supports <firstterm>point-in-time recovery</>: it is
- possible to restore the database to its state at any time since your base
- backup was taken.
- </para>
- </listitem>
- <listitem>
- <para>
- If we continuously feed the series of WAL files to another
- machine that has been loaded with the same base backup file, we
- have a <firstterm>warm standby</> system: at any point we can bring up
- the second machine and it will have a nearly-current copy of the
- database.
- </para>
- </listitem>
- </itemizedlist>
- </para>
-
- <note>
- <para>
- <application>pg_dump</application> and
- <application>pg_dumpall</application> do not produce file-system-level
- backups and cannot be used as part of a continuous-archiving solution.
- Such dumps are <emphasis>logical</> and do not contain enough
- information to be used by WAL replay.
- </para>
- </note>
-
- <para>
- As with the plain file-system-backup technique, this method can only
- support restoration of an entire database cluster, not a subset.
- Also, it requires a lot of archival storage: the base backup might be bulky,
- and a busy system will generate many megabytes of WAL traffic that
- have to be archived. Still, it is the preferred backup technique in
- many situations where high reliability is needed.
- </para>
-
- <para>
- To recover successfully using continuous archiving (also called
- <quote>online backup</> by many database vendors), you need a continuous
- sequence of archived WAL files that extends back at least as far as the
- start time of your backup. So to get started, you should set up and test
- your procedure for archiving WAL files <emphasis>before</> you take your
- first base backup. Accordingly, we first discuss the mechanics of
- archiving WAL files.
- </para>
-
- <sect2 id="backup-archiving-wal">
- <title>Setting Up WAL Archiving</title>
-
-<!## XC>
-&xconly;
- <para>
- This section describes PITR for <productname>PostgreSQL</>.
- Because Coordinator and Datanode of <productname>Postgres-XC</> are
- essentially <productname>PostgreSQL</productname>server, you can
- set up WAL archiving for each Coordinator and Datanode manually.
- </para>
-<!## end>
-
- <para>
- In an abstract sense, a running <productname>PostgreSQL</> system
- produces an indefinitely long sequence of WAL records. The system
- physically divides this sequence into WAL <firstterm>segment
- files</>, which are normally 16MB apiece (although the segment size
- can be altered when building <productname>PostgreSQL</>). The segment
- files are given numeric names that reflect their position in the
- abstract WAL sequence. When not using WAL archiving, the system
- normally creates just a few segment files and then
- <quote>recycles</> them by renaming no-longer-needed segment files
- to higher segment numbers. It's assumed that segment files whose
- contents precede the checkpoint-before-last are no longer of
- interest and can be recycled.
- </para>
-
- <para>
- When archiving WAL data, we need to capture the contents of each segment
- file once it is filled, and save that data somewhere before the segment
- file is recycled for reuse. Depending on the application and the
- available hardware, there could be many different ways of <quote>saving
- the data somewhere</>: we could copy the segment files to an NFS-mounted
- directory on another machine, write them onto a tape drive (ensuring that
- you have a way of identifying the original name of each file), or batch
- them together and burn them onto CDs, or something else entirely. To
- provide the database administrator with flexibility,
- <productname>PostgreSQL</> tries not to make any assumptions about how
- the archiving will be done. Instead, <productname>PostgreSQL</> lets
- the administrator specify a shell command to be executed to copy a
- completed segment file to wherever it needs to go. The command could be
- as simple as a <literal>cp</>, or it could invoke a complex shell
- script &mdash; it's all up to you.
- </para>
-
- <para>
- To enable WAL archiving, set the <xref linkend="guc-wal-level">
- configuration parameter to <literal>archive</> (or <literal>hot_standby</>),
- <xref linkend="guc-archive-mode"> to <literal>on</>,
- and specify the shell command to use in the <xref
- linkend="guc-archive-command"> configuration parameter. In practice
- these settings will always be placed in the
- <filename>postgresql.conf</filename> file.
- In <varname>archive_command</>,
- <literal>%p</> is replaced by the path name of the file to
- archive, while <literal>%f</> is replaced by only the file name.
- (The path name is relative to the current working directory,
- i.e., the cluster's data directory.)
- Use <literal>%%</> if you need to embed an actual <literal>%</>
- characterin the command. The simplest useful command is something
- like:
-<programlisting>
-archive_command = 'cp -i %p /mnt/server/archivedir/%f &lt;/dev/null' # Unix
-archive_command = 'copy "%p" "C:\\server\\archivedir\\%f"' # Windows
-</programlisting>
- which will copy archivable WAL segments to the directory
- <filename>/mnt/server/archivedir</>. (This is an example, not a
- recommendation, and might not work on all platforms.) After the
- <literal>%p</> and <literal>%f</> parameters have been replaced,
- the actual command executed might look like this:
-<programlisting>
-cp -i pg_xlog/00000001000000A900000065 /mnt/server/archivedir/00000001000000A900000065 &lt;/dev/null
-</programlisting>
- A similar command will be generated for each new file to be archived.
- </para>
-
- <para>
- The archive command will be executed under the ownership of the same
- user that the <productname>PostgreSQL</> server is running as. Since
- the series of WAL files being archived contains effectively everything
- in your database, you will want to be sure that the archived data is
- protected from prying eyes; for example, archive into a directory that
- does not have group or world read access.
- </para>
-
- <para>
- It is important that the archive command return zero exit status if and
- only if it succeeds. Upon getting a zero result,
- <productname>PostgreSQL</> will assume that the file has been
- successfully archived, and will remove or recycle it. However, a nonzero
- status tells <productname>PostgreSQL</> that the file was not archived;
- it will try again periodically until it succeeds.
- </para>
-
- <para>
- The archive command should generally be designed to refuse to overwrite
- any pre-existing archive file. This is an important safety feature to
- preserve the integrity of your archive in case of administrator error
- (such as sending the output of two different servers to the same archive
- directory).
- It is advisable to test your proposed archive command to ensure that it
- indeed does not overwrite an existing file, <emphasis>and that it returns
- nonzero status in this case</>. On many Unix platforms, <command>cp
- -i</> causes copy to prompt before overwriting a file, and
- <literal>&lt; /dev/null</> causes the prompt (and overwriting) to
- fail. If your platform does not support this behavior, you should
- add a command to test for the existence of the archive file. For
- example, something like:
-<programlisting>
-archive_command = 'test ! -f /mnt/server/archivedir/%f &amp;&amp; cp %p /mnt/server/archivedir/%f'
-</programlisting>
- works correctly on most Unix variants.
- </para>
-
- <para>
- While designing your archiving setup, consider what will happen if
- the archive command fails repeatedly because some aspect requires
- operator intervention or the archive runs out of space. For example, this
- could occur if you write to tape without an autochanger; when the tape
- fills, nothing further can be archived until the tape is swapped.
- You should ensure that any error condition or request to a human operator
- is reported appropriately so that the situation can be
- resolved reasonably quickly. The <filename>pg_xlog/</> directory will
- continue to fill with WAL segment files until the situation is resolved.
- (If the file system containing <filename>pg_xlog/</> fills up,
- <productname>PostgreSQL</> will do a PANIC shutdown. No committed
- transactions will be lost, but the database will remain offline until
- you free some space.)
- </para>
-
- <para>
- The speed of the archiving command is unimportant as long as it can keep up
- with the average rate at which your server generates WAL data. Normal
- operation continues even if the archiving process falls a little behind.
- If archiving falls significantly behind, this will increase the amount of
- data that would be lost in the event of a disaster. It will also mean that
- the <filename>pg_xlog/</> directory will contain large numbers of
- not-yet-archived segment files, which could eventually exceed available
- disk space. You are advised to monitor the archiving process to ensure that
- it is working as you intend.
- </para>
-
- <para>
- In writing your archive command, you should assume that the file names to
- be archived can be up to 64 characters long and can contain any
- combination of ASCII letters, digits, and dots. It is not necessary to
- preserve the original relative path (<literal>%p</>) but it is necessary to
- preserve the file name (<literal>%f</>).
- </para>
-
- <para>
- Note that although WAL archiving will allow you to restore any
- modifications made to the data in your <productname>PostgreSQL</> database,
- it will not restore changes made to configuration files (that is,
- <filename>postgresql.conf</>, <filename>pg_hba.conf</> and
- <filename>pg_ident.conf</>), since those are edited manually rather
- than through SQL operations.
- You might wish to keep the configuration files in a location that will
- be backed up by your regular file system backup procedures. See
- <xref linkend="runtime-config-file-locations"> for how to relocate the
- configuration files.
- </para>
-
- <para>
- The archive command is only invoked on completed WAL segments. Hence,
- if your server generates only little WAL traffic (or has slack periods
- where it does so), there could be a long delay between the completion
- of a transaction and its safe recording in archive storage. To put
- a limit on how old unarchived data can be, you can set
- <xref linkend="guc-archive-timeout"> to force the server to switch
- to a new WAL segment file at least that often. Note that archived
- files that are archived early due to a forced switch are still the same
- length as completely full files. It is therefore unwise to set a very
- short <varname>archive_timeout</> &mdash; it will bloat your archive
- storage. <varname>archive_timeout</> settings of a minute or so are
- usually reasonable.
- </para>
-
- <para>
- Also, you can force a segment switch manually with
- <function>pg_switch_xlog</> if you want to ensure that a
- just-finished transaction is archived as soon as possible. Other utility
- functions related to WAL management are listed in <xref
- linkend="functions-admin-backup-table">.
- </para>
-
- <para>
- When <varname>wal_level</> is <literal>minimal</> some SQL commands
- are optimized to avoid WAL logging, as described in <xref
- linkend="populate-pitr">. If archiving or streaming replication were
- turned on during execution of one of these statements, WAL would not
- contain enough information for archive recovery. (Crash recovery is
- unaffected.) For this reason, <varname>wal_level</> can only be changed at
- server start. However, <varname>archive_command</> can be changed with a
- configuration file reload. If you wish to temporarily stop archiving,
- one way to do it is to set <varname>archive_command</> to the empty
- string (<literal>''</>).
- This will cause WAL files to accumulate in <filename>pg_xlog/</> until a
- working <varname>archive_command</> is re-established.
- </para>
- </sect2>
-
- <sect2 id="backup-base-backup">
- <title>Making a Base Backup</title>
-
-<!## XC>
-&xconly;
- <para>
- This section describes how to make a base backup of single
- Datanode or Coordinator. Please note that you should take base
- backup of all the Datanodes and Coordinators. Also please note
- that you don't have to take base backups at exactly the same time.
- You may take base backup of each Datanode or Coordinator one after
- another, or you may take some of them at the same time. You don't
- have to do it at exactly the same time.
- </para>
-<!## end>
-
- <para>
- The procedure for making a base backup is relatively simple:
- <orderedlist>
- <listitem>
- <para>
- Ensure that WAL archiving is enabled and working.
- </para>
- </listitem>
- <listitem>
- <para>
- Connect to the database as a superuser and issue the command:
-<programlisting>
-SELECT pg_start_backup('label');
-</programlisting>
- where <literal>label</> is any string you want to use to uniquely
- identify this backup operation. (One good practice is to use the
- full path where you intend to put the backup dump file.)
- <function>pg_start_backup</> creates a <firstterm>backup label</> file,
- called <filename>backup_label</>, in the cluster directory with
- information about your backup, including the start time and label
- string.
- </para>
-
- <para>
- It does not matter which database within the cluster you connect to to
- issue this command. You can ignore the result returned by the function;
- but if it reports an error, deal with that before proceeding.
- </para>
-
- <para>
- By default, <function>pg_start_backup</> can take a long time to finish.
- This is because it performs a checkpoint, and the I/O
- required for the checkpoint will be spread out over a significant
- period of time, by default half your inter-checkpoint interval
- (see the configuration parameter
- <xref linkend="guc-checkpoint-completion-target">). This is
- usually what you want, because it minimizes the impact on query
- processing. If you want to start the backup as soon as
- possible, use:
-<programlisting>
-SELECT pg_start_backup('label', true);
-</programlisting>
- This forces the checkpoint to be done as quickly as possible.
- </para>
- </listitem>
- <listitem>
- <para>
- Perform the backup, using any convenient file-system-backup tool
- such as <application>tar</> or <application>cpio</> (not
- <application>pg_dump</application> or
- <application>pg_dumpall</application>). It is neither
- necessary nor desirable to stop normal operation of the database
- while you do this.
- </para>
- </listitem>
- <listitem>
- <para>
- Again connect to the database as a superuser, and issue the command:
-<programlisting>
-SELECT pg_stop_backup();
-</programlisting>
- This terminates the backup mode and performs an automatic switch to
- the next WAL segment. The reason for the switch is to arrange for
- the last WAL segment file written during the backup interval to be
- ready to archive.
- </para>
- </listitem>
- <listitem>
- <para>
- Once the WAL segment files active during the backup are archived, you are
- done. The file identified by <function>pg_stop_backup</>'s result is
- the last segment that is required to form a complete set of backup files.
- If <varname>archive_mode</> is enabled,
- <function>pg_stop_backup</> does not return until the last segment has
- been archived.
- Archiving of these files happens automatically since you have
- already configured <varname>archive_command</>. In most cases this
- happens quickly, but you are advised to monitor your archive
- system to ensure there are no delays.
- If the archive process has fallen behind
- because of failures of the archive command, it will keep retrying
- until the archive succeeds and the backup is complete.
- If you wish to place a time limit on the execution of
- <function>pg_stop_backup</>, set an appropriate
- <varname>statement_timeout</varname> value.
- </para>
- </listitem>
- </orderedlist>
- </para>
-
- <para>
- You can also use the <xref linkend="app-pgbasebackup"> tool to take
- the backup, instead of manually copying the files. This tool will do
- the equivalent of <function>pg_start_backup()</>, copy and
- <function>pg_stop_backup()</> steps automatically, and transfers the
- backup over a regular <productname>PostgreSQL</productname> connection
- using the replication protocol, instead of requiring file system level
- access. <command>pg_basebackup</command> does not interfere with file system level backups
- taken using <function>pg_start_backup()</>/<function>pg_stop_backup()</>.
- </para>
-
- <para>
- Some file system backup tools emit warnings or errors
- if the files they are trying to copy change while the copy proceeds.
- When taking a base backup of an active database, this situation is normal
- and not an error. However, you need to ensure that you can distinguish
- complaints of this sort from real errors. For example, some versions
- of <application>rsync</> return a separate exit code for
- <quote>vanished source files</>, and you can write a driver script to
- accept this exit code as a non-error case. Also, some versions of
- GNU <application>tar</> return an error code indistinguishable from
- a fatal error if a file was truncated while <application>tar</> was
- copying it. Fortunately, GNU <application>tar</> versions 1.16 and
- later exit with 1 if a file was changed during the backup,
- and 2 for other errors.
- </para>
-
- <para>
- It is not necessary to be concerned about the amount of time elapsed
- between <function>pg_start_backup</> and the start of the actual backup,
- nor between the end of the backup and <function>pg_stop_backup</>; a
- few minutes' delay won't hurt anything. (However, if you normally run the
- server with <varname>full_page_writes</> disabled, you might notice a drop
- in performance between <function>pg_start_backup</> and
- <function>pg_stop_backup</>, since <varname>full_page_writes</> is
- effectively forced on during backup mode.) You must ensure that these
- steps are carried out in sequence, without any possible
- overlap, or you will invalidate the backup.
- </para>
-
- <para>
- Be certain that your backup dump includes all of the files under
- the database cluster directory (e.g., <filename>/usr/local/pgsql/data</>).
- If you are using tablespaces that do not reside underneath this directory,
- be careful to include them as well (and be sure that your backup dump
- archives symbolic links as links, otherwise the restore will corrupt
- your tablespaces).
- </para>
-
- <para>
- You can, however, omit from the backup dump the files within the
- cluster's <filename>pg_xlog/</> subdirectory. This
- slight adjustment is worthwhile because it reduces the risk
- of mistakes when restoring. This is easy to arrange if
- <filename>pg_xlog/</> is a symbolic link pointing to someplace outside
- the cluster directory, which is a common setup anyway for performance
- reasons.
- </para>
-
- <para>
- To make use of the backup, you will need to keep all the WAL
- segment files generated during and after the file system backup.
- To aid you in doing this, the <function>pg_stop_backup</> function
- creates a <firstterm>backup history file</> that is immediately
- stored into the WAL archive area. This file is named after the first
- WAL segment file that you need for the file system backup.
- For example, if the starting WAL file is
- <literal>0000000100001234000055CD</> the backup history file will be
- named something like
- <literal>0000000100001234000055CD.007C9330.backup</>. (The second
- part of the file name stands for an exact position within the WAL
- file, and can ordinarily be ignored.) Once you have safely archived
- the file system backup and the WAL segment files used during the
- backup (as specified in the backup history file), all archived WAL
- segments with names numerically less are no longer needed to recover
- the file system backup and can be deleted. However, you should
- consider keeping several backup sets to be absolutely certain that
- you can recover your data.
- </para>
-
- <para>
- The backup history file is just a small text file. It contains the
- label string you gave to <function>pg_start_backup</>, as well as
- the starting and ending times and WAL segments of the backup.
- If you used the label to identify the associated dump file,
- then the archived history file is enough to tell you which dump file to
- restore.
- </para>
-
- <para>
- Since you have to keep around all the archived WAL files back to your
- last base backup, the interval between base backups should usually be
- chosen based on how much storage you want to expend on archived WAL
- files. You should also consider how long you are prepared to spend
- recovering, if recovery should be necessary &mdash; the system will have to
- replay all those WAL segments, and that could take awhile if it has
- been a long time since the last base backup.
- </para>
-
- <para>
- It's also worth noting that the <function>pg_start_backup</> function
- makes a file named <filename>backup_label</> in the database cluster
- directory, which is removed by <function>pg_stop_backup</>.
- This file will of course be archived as a part of your backup dump file.
- The backup label file includes the label string you gave to
- <function>pg_start_backup</>, as well as the time at which
- <function>pg_start_backup</> was run, and the name of the starting WAL
- file. In case of confusion it is
- therefore possible to look inside a backup dump file and determine
- exactly which backup session the dump file came from.
- </para>
-
- <para>
- It is also possible to make a backup dump while the server is
- stopped. In this case, you obviously cannot use
- <function>pg_start_backup</> or <function>pg_stop_backup</>, and
- you will therefore be left to your own devices to keep track of which
- backup dump is which and how far back the associated WAL files go.
- It is generally better to follow the continuous archiving procedure above.
- </para>
- </sect2>
-
- <sect2 id="backup-pitr-recovery">
- <title>Recovering Using a Continuous Archive Backup</title>
-
-<!## XC>
-&xconly;
- <para>
- This section describes recovering with continuous archive backup
- for <productname>PostgreSQL</>. Because Coordinator and Datanode
- of <productname>Postgres-XC</> are
- essentially <productname>PostgreSQL</productname>server, you can do
- this for each Coordinator and Datanode manually.
- </para>
-<!## end>
-
- <para>
- Okay, the worst has happened and you need to recover from your backup.
- Here is the procedure:
- <orderedlist>
- <listitem>
- <para>
- Stop the server, if it's running.
- </para>
- </listitem>
- <listitem>
- <para>
- If you have the space to do so,
- copy the whole cluster data directory and any tablespaces to a temporary
- location in case you need them later. Note that this precaution will
- require that you have enough free space on your system to hold two
- copies of your existing database. If you do not have enough space,
- you should at least save the contents of the cluster's <filename>pg_xlog</>
- subdirectory, as it might contain logs which
- were not archived before the system went down.
- </para>
- </listitem>
- <listitem>
- <para>
- Remove all existing files and subdirectories under the cluster data
- directory and under the root directories of any tablespaces you are using.
- </para>
- </listitem>
- <listitem>
- <para>
- Restore the database files from your file system backup. Be sure that they
- are restored with the right ownership (the database system user, not
- <literal>root</>!) and with the right permissions. If you are using
- tablespaces,
- you should verify that the symbolic links in <filename>pg_tblspc/</>
- were correctly restored.
- </para>
- </listitem>
- <listitem>
- <para>
- Remove any files present in <filename>pg_xlog/</>; these came from the
- file system backup and are therefore probably obsolete rather than current.
- If you didn't archive <filename>pg_xlog/</> at all, then recreate
- it with proper permissions,
- being careful to ensure that you re-establish it as a symbolic link
- if you had it set up that way before.
- </para>
- </listitem>
- <listitem>
- <para>
- If you have unarchived WAL segment files that you saved in step 2,
- copy them into <filename>pg_xlog/</>. (It is best to copy them,
- not move them, so you still have the unmodified files if a
- problem occurs and you have to start over.)
- </para>
- </listitem>
- <listitem>
- <para>
- Create a recovery command file <filename>recovery.conf</> in the cluster
- data directory (see <xref linkend="recovery-config">). You might
- also want to temporarily modify <filename>pg_hba.conf</> to prevent
- ordinary users from connecting until you are sure the recovery was successful.
- </para>
- </listitem>
- <listitem>
- <para>
- Start the server. The server will go into recovery mode and
- proceed to read through the archived WAL files it needs. Should the
- recovery be terminated because of an external error, the server can
- simply be restarted and it will continue recovery. Upon completion
- of the recovery process, the server will rename
- <filename>recovery.conf</> to <filename>recovery.done</> (to prevent
- accidentally re-entering recovery mode later) and then
- commence normal database operations.
- </para>
- </listitem>
- <listitem>
- <para>
- Inspect the contents of the database to ensure you have recovered to
- the desired state. If not, return to step 1. If all is well,
- allow your users to connect by restoring <filename>pg_hba.conf</> to normal.
- </para>
- </listitem>
- </orderedlist>
- </para>
-
- <para>
- The key part of all this is to set up a recovery configuration file that
- describes how you want to recover and how far the recovery should
- run. You can use <filename>recovery.conf.sample</> (normally
- located in the installation's <filename>share/</> directory) as a
- prototype. The one thing that you absolutely must specify in
- <filename>recovery.conf</> is the <varname>restore_command</>,
- which tells <productname>PostgreSQL</> how to retrieve archived
- WAL file segments. Like the <varname>archive_command</>, this is
- a shell command string. It can contain <literal>%f</>, which is
- replaced by the name of the desired log file, and <literal>%p</>,
- which is replaced by the path name to copy the log file to.
- (The path name is relative to the current working directory,
- i.e., the cluster's data directory.)
- Write <literal>%%</> if you need to embed an actual <literal>%</>
- character in the command. The simplest useful command is
- something like:
-<programlisting>
-restore_command = 'cp /mnt/server/archivedir/%f %p'
-</programlisting>
- which will copy previously archived WAL segments from the directory
- <filename>/mnt/server/archivedir</>. Of course, you can use something
- much more complicated, perhaps even a shell script that requests the
- operator to mount an appropriate tape.
- </para>
-
- <para>
- It is important that the command return nonzero exit status on failure.
- The command <emphasis>will</> be called requesting files that are not present
- in the archive; it must return nonzero when so asked. This is not an
- error condition. Not all of the requested files will be WAL segment
- files; you should also expect requests for files with a suffix of
- <literal>.backup</> or <literal>.history</>. Also be aware that
- the base name of the <literal>%p</> path will be different from
- <literal>%f</>; do not expect them to be interchangeable.
- </para>
-
- <para>
- WAL segments that cannot be found in the archive will be sought in
- <filename>pg_xlog/</>; this allows use of recent un-archived segments.
- However, segments that are available from the archive will be used in
- preference to files in <filename>pg_xlog/</>. The system will not
- overwrite the existing contents of <filename>pg_xlog/</> when retrieving
- archived files.
- </para>
-
- <para>
- Normally, recovery will proceed through all available WAL segments,
- thereby restoring the database to the current point in time (or as
- close as possible given the available WAL segments). Therefore, a normal
- recovery will end with a <quote>file not found</> message, the exact text
- of the error message depending upon your choice of
- <varname>restore_command</>. You may also see an error message
- at the start of recovery for a file named something like
- <filename>00000001.history</>. This is also normal and does not
- indicate a problem in simple recovery situations; see
- <xref linkend="backup-timelines"> for discussion.
- </para>
-
- <para>
- If you want to recover to some previous point in time (say, right before
- the junior DBA dropped your main transaction table), just specify the
- required stopping point in <filename>recovery.conf</>. You can specify
- the stop point, known as the <quote>recovery target</>, either by
- date/time, named restore point or by completion of a specific transaction
- ID. As of this writing only the date/time and named restore point options
- are very usable, since there are no tools to help you identify with any
- accuracy which transaction ID to use.
- </para>
-
- <note>
- <para>
- The stop point must be after the ending time of the base backup, i.e.,
- the end time of <function>pg_stop_backup</>. You cannot use a base backup
- to recover to a time when that backup was in progress. (To
- recover to such a time, you must go back to your previous base backup
- and roll forward from there.)
- </para>
- </note>
-
- <para>
- If recovery finds corrupted WAL data, recovery will
- halt at that point and the server will not start. In such a case the
- recovery process could be re-run from the beginning, specifying a
- <quote>recovery target</> before the point of corruption so that recovery
- can complete normally.
- If recovery fails for an external reason, such as a system crash or
- if the WAL archive has become inaccessible, then the recovery can simply
- be restarted and it will restart almost from where it failed.
- Recovery restart works much like checkpointing in normal operation:
- the server periodically forces all its state to disk, and then updates
- the <filename>pg_control</> file to indicate that the already-processed
- WAL data need not be scanned again.
- </para>
-
- </sect2>
-
- <sect2 id="backup-timelines">
- <title>Timelines</title>
-
- <indexterm zone="backup">
- <primary>timelines</primary>
- </indexterm>
-
-<!## XC>
-&xconly;
- <para>
- Because Coordinator and Datanode of <productname>Postgres-XC</> are
- essentially <productname>PostgreSQL</productname>server, you can
- apply timelines for each Coordinator and Datanode manually.
- </para>
-<!## end>
-
- <para>
- The ability to restore the database to a previous point in time creates
- some complexities that are akin to science-fiction stories about time
- travel and parallel universes. For example, in the original history of the database,
- suppose you dropped a critical table at 5:15PM on Tuesday evening, but
- didn't realize your mistake until Wednesday noon.
- Unfazed, you get out your backup, restore to the point-in-time 5:14PM
- Tuesday evening, and are up and running. In <emphasis>this</> history of
- the database universe, you never dropped the table. But suppose
- you later realize this wasn't such a great idea, and would like
- to return to sometime Wednesday morning in the original history.
- You won't be able
- to if, while your database was up-and-running, it overwrote some of the
- WAL segment files that led up to the time you now wish you
- could get back to. Thus, to avoid this, you need to distinguish the series of
- WAL records generated after you've done a point-in-time recovery from
- those that were generated in the original database history.
- </para>
-
- <para>
- To deal with this problem, <productname>PostgreSQL</> has a notion
- of <firstterm>timelines</>. Whenever an archive recovery completes,
- a new timeline is created to identify the series of WAL records
- generated after that recovery. The timeline
- ID number is part of WAL segment file names so a new timeline does
- not overwrite the WAL data generated by previous timelines. It is
- in fact possible to archive many different timelines. While that might
- seem like a useless feature, it's often a lifesaver. Consider the
- situation where you aren't quite sure what point-in-time to recover to,
- and so have to do several point-in-time recoveries by trial and error
- until you find the best place to branch off from the old history. Without
- timelines this process would soon generate an unmanageable mess. With
- timelines, you can recover to <emphasis>any</> prior state, including
- states in timeline branches that you abandoned earlier.
- </para>
-
- <para>
- Every time a new timeline is created, <productname>PostgreSQL</> creates
- a <quote>timeline history</> file that shows which timeline it branched
- off from and when. These history files are necessary to allow the system
- to pick the right WAL segment files when recovering from an archive that
- contains multiple timelines. Therefore, they are archived into the WAL
- archive area just like WAL segment files. The history files are just
- small text files, so it's cheap and appropriate to keep them around
- indefinitely (unlike the segment files which are large). You can, if
- you like, add comments to a history file to record your own notes about
- how and why this particular timeline was created. Such comments will be
- especially valuable when you have a thicket of different timelines as
- a result of experimentation.
- </para>
-
- <para>
- The default behavior of recovery is to recover along the same timeline
- that was current when the base backup was taken. If you wish to recover
- into some child timeline (that is, you want to return to some state that
- was itself generated after a recovery attempt), you need to specify the
- target timeline ID in <filename>recovery.conf</>. You cannot recover into
- timelines that branched off earlier than the base backup.
- </para>
-
- </sect2>
-
- <sect2 id="backup-tips">
- <title>Tips and Examples</title>
-
-&pgnotice;
- <para>
- Some tips for configuring continuous archiving are given here.
- </para>
-
- <sect3 id="backup-standalone">
- <title>Standalone Hot Backups</title>
-
-&pgnotice;
- <para>
- It is possible to use <productname>PostgreSQL</>'s backup facilities to
- produce standalone hot backups. These are backups that cannot be used
- for point-in-time recovery, yet are typically much faster to backup and
- restore than <application>pg_dump</> dumps. (They are also much larger
- than <application>pg_dump</> dumps, so in some cases the speed advantage
- might be negated.)
- </para>
-
- <para>
- To prepare for standalone hot backups, set <varname>wal_level</> to
- <literal>archive</> (or <literal>hot_standby</>), <varname>archive_mode</> to
- <literal>on</>, and set up an <varname>archive_command</> that performs
- archiving only when a <emphasis>switch file</> exists. For example:
-<programlisting>
-archive_command = 'test ! -f /var/lib/pgsql/backup_in_progress || cp -i %p /var/lib/pgsql/archive/%f &lt; /dev/null'
-</programlisting>
- This command will perform archiving when
- <filename>/var/lib/pgsql/backup_in_progress</> exists, and otherwise
- silently return zero exit status (allowing <productname>PostgreSQL</>
- to recycle the unwanted WAL file).
- </para>
-
- <para>
- With this preparation, a backup can be taken using a script like the
- following:
-<programlisting>
-touch /var/lib/pgsql/backup_in_progress
-psql -c "select pg_start_backup('hot_backup');"
-tar -cf /var/lib/pgsql/backup.tar /var/lib/pgsql/data/
-psql -c "select pg_stop_backup();"
-rm /var/lib/pgsql/backup_in_progress
-tar -rf /var/lib/pgsql/backup.tar /var/lib/pgsql/archive/
-</programlisting>
- The switch file <filename>/var/lib/pgsql/backup_in_progress</> is
- created first, enabling archiving of completed WAL files to occur.
- After the backup the switch file is removed. Archived WAL files are
- then added to the backup so that both base backup and all required
- WAL files are part of the same <application>tar</> file.
- Please remember to add error handling to your backup scripts.
- </para>
-
- <para>
- If archive storage size is a concern, use <application>pg_compresslog</>,
- <ulink url="http://pglesslog.projects.postgresql.org"></ulink>, to
- remove unnecessary <xref linkend="guc-full-page-writes"> and trailing
- space from the WAL files. You can then use
- <application>gzip</application> to further compress the output of
- <application>pg_compresslog</>:
-<programlisting>
-archive_command = 'pg_compresslog %p - | gzip &gt; /var/lib/pgsql/archive/%f'
-</programlisting>
- You will then need to use <application>gunzip</> and
- <application>pg_decompresslog</> during recovery:
-<programlisting>
-restore_command = 'gunzip &lt; /mnt/server/archivedir/%f | pg_decompresslog - %p'
-</programlisting>
- </para>
- </sect3>
-
- <sect3 id="backup-scripts">
- <title><varname>archive_command</varname> Scripts</title>
-
-&pgnotice;
- <para>
- Many people choose to use scripts to define their
- <varname>archive_command</varname>, so that their
- <filename>postgresql.conf</> entry looks very simple:
-<programlisting>
-archive_command = 'local_backup_script.sh'
-</programlisting>
- Using a separate script file is advisable any time you want to use
- more than a single command in the archiving process.
- This allows all complexity to be managed within the script, which
- can be written in a popular scripting language such as
- <application>bash</> or <application>perl</>.
- Any messages written to <literal>stderr</> from the script will appear
- in the database server log, allowing complex configurations to be
- diagnosed easily if they fail.
- </para>
-
- <para>
- Examples of requirements that might be solved within a script include:
- <itemizedlist>
- <listitem>
- <para>
- Copying data to secure off-site data storage
- </para>
- </listitem>
- <listitem>
- <para>
- Batching WAL files so that they are transferred every three hours,
- rather than one at a time
- </para>
- </listitem>
- <listitem>
- <para>
- Interfacing with other backup and recovery software
- </para>
- </listitem>
- <listitem>
- <para>
- Interfacing with monitoring software to report errors
- </para>
- </listitem>
- </itemizedlist>
- </para>
- </sect3>
- </sect2>
-
-
- <sect2 id="continuous-archiving-caveats">
- <title>Caveats</title>
-
- <para>
- At this writing, there are several limitations of the continuous archiving
- technique. These will probably be fixed in future releases:
-
- <itemizedlist>
- <listitem>
- <para>
- Operations on hash indexes are not presently WAL-logged, so
- replay will not update these indexes. This will mean that any new inserts
- will be ignored by the index, updated rows will apparently disappear and
- deleted rows will still retain pointers. In other words, if you modify a
- table with a hash index on it then you will get incorrect query results
- on a standby server. When recovery completes it is recommended that you
- manually <xref linkend="sql-reindex">
- each such index after completing a recovery operation.
- </para>
- </listitem>
-
- <listitem>
- <para>
- If a <xref linkend="sql-createdatabase">
- command is executed while a base backup is being taken, and then
- the template database that the <command>CREATE DATABASE</> copied
- is modified while the base backup is still in progress, it is
- possible that recovery will cause those modifications to be
- propagated into the created database as well. This is of course
- undesirable. To avoid this risk, it is best not to modify any
- template databases while taking a base backup.
- </para>
- </listitem>
-
- <listitem>
- <para>
- <xref linkend="sql-createtablespace">
- commands are WAL-logged with the literal absolute path, and will
- therefore be replayed as tablespace creations with the same
- absolute path. This might be undesirable if the log is being
- replayed on a different machine. It can be dangerous even if the
- log is being replayed on the same machine, but into a new data
- directory: the replay will still overwrite the contents of the
- original tablespace. To avoid potential gotchas of this sort,
- the best practice is to take a new base backup after creating or
- dropping tablespaces.
- </para>
- </listitem>
- </itemizedlist>
- </para>
-
- <para>
- It should also be noted that the default <acronym>WAL</acronym>
- format is fairly bulky since it includes many disk page snapshots.
- These page snapshots are designed to support crash recovery, since
- we might need to fix partially-written disk pages. Depending on
- your system hardware and software, the risk of partial writes might
- be small enough to ignore, in which case you can significantly
- reduce the total volume of archived logs by turning off page
- snapshots using the <xref linkend="guc-full-page-writes">
- parameter. (Read the notes and warnings in <xref linkend="wal">
- before you do so.) Turning off page snapshots does not prevent
- use of the logs for PITR operations. An area for future
- development is to compress archived WAL data by removing
- unnecessary page copies even when <varname>full_page_writes</> is
- on. In the meantime, administrators might wish to reduce the number
- of page snapshots included in WAL by increasing the checkpoint
- interval parameters as much as feasible.
- </para>
- </sect2>
- </sect1>
-
-</chapter>
diff --git a/doc-xc/src/sgml/biblio.sgmlin b/doc-xc/src/sgml/biblio.sgmlin
deleted file mode 100644
index 290f28c086..0000000000
--- a/doc-xc/src/sgml/biblio.sgmlin
+++ /dev/null
@@ -1,594 +0,0 @@
-<!-- doc/src/sgml/biblio.sgml -->
-
- <bibliography id="biblio">
- <title>Bibliography</title>
-
- <para>
- Selected references and readings for <acronym>SQL</acronym>
- and <productname>PostgreSQL</productname>.
- </para>
-
- <para>
- Some white papers and technical reports from the original
- <productname>POSTGRES</productname> development team
- are available at the University of California, Berkeley, Computer Science
- Department <ulink url="http://db.cs.berkeley.edu/papers/">
- web site</ulink>.
- </para>
-
- <bibliodiv>
- <title><acronym>SQL</acronym> Reference Books</title>
- <para>Reference texts for <acronym>SQL</acronym> features.</para>
-
- <biblioentry id="BOWMAN01">
- <title>The Practical <acronym>SQL</acronym> Handbook</title>
- <titleabbrev>Bowman et al, 2001</titleabbrev>
- <subtitle>Using SQL Variants</subtitle>
- <edition>Fourth Edition</edition>
- <authorgroup>
- <author>
- <firstname>Judith</firstname>
- <surname>Bowman</surname>
- </author>
- <author>
- <firstname>Sandra</firstname>
- <surname>Emerson</surname>
- </author>
- <author>
- <firstname>Marcy</firstname>
- <surname>Darnovsky</surname>
- </author>
- </authorgroup>
- <isbn>0-201-70309-2</isbn>
- <pubdate>2001</pubdate>
- <publisher>
- <publishername>Addison-Wesley Professional</publishername>
- </publisher>
- <copyright>
- <year>2001</year>
- </copyright>
- </biblioentry>
-
- <biblioentry id="DATE97">
- <title>A Guide to the <acronym>SQL</acronym> Standard</title>
- <titleabbrev>Date and Darwen, 1997</titleabbrev>
- <subtitle>A user's guide to the standard database language <acronym>SQL</acronym></subtitle>
- <edition>Fourth Edition</edition>
- <authorgroup>
- <author>
- <firstname>C. J.</firstname>
- <surname>Date</surname>
- </author>
- <author>
- <firstname>Hugh</firstname>
- <surname>Darwen</surname>
- </author>
- </authorgroup>
- <isbn>0-201-96426-0</isbn>
- <pubdate>1997</pubdate>
- <publisher>
- <publishername>Addison-Wesley</publishername>
- </publisher>
- <copyright>
- <year>1997</year>
- <holder>Addison-Wesley Longman, Inc.</holder>
- </copyright>
- </biblioentry>
-
- <biblioentry id="DATE04">
- <title>An Introduction to Database Systems</title>
- <titleabbrev>Date, 2004</titleabbrev>
- <edition>Eighth Edition</edition>
- <authorgroup>
- <author>
- <firstname>C. J.</firstname>
- <surname>Date</surname>
- </author>
- </authorgroup>
- <isbn>0-321-19784-4</isbn>
- <pubdate>2003</pubdate>
- <publisher>
- <publishername>Addison-Wesley</publishername>
- </publisher>
- <copyright>
- <year>2004</year>
- <holder>Pearson Education, Inc.</holder>
- </copyright>
- </biblioentry>
-
- <biblioentry id="ELMA04">
- <title>Fundamentals of Database Systems</title>
- <edition>Fourth Edition</edition>
- <authorgroup>
- <author>
- <firstname>Ramez</firstname>
- <surname>Elmasri</surname>
- </author>
- <author>
- <firstname>Shamkant</firstname>
- <surname>Navathe</surname>
- </author>
- </authorgroup>
- <isbn>0-321-12226-7</isbn>
- <pubdate>2003</pubdate>
- <publisher>
- <publishername>Addison-Wesley</publishername>
- </publisher>
- <copyright>
- <year>2004</year>
- </copyright>
- </biblioentry>
-
- <biblioentry id="MELT93">
- <title>Understanding the New <acronym>SQL</acronym></title>
- <titleabbrev>Melton and Simon, 1993</titleabbrev>
- <subtitle>A complete guide</subtitle>
- <authorgroup>
- <author>
- <firstname>Jim</firstname>
- <surname>Melton</surname>
- </author>
- <author>
- <firstname>Alan R.</firstname>
- <surname>Simon</surname>
- </author>
- </authorgroup>
- <isbn>1-55860-245-3</isbn>
- <pubdate>1993</pubdate>
- <publisher>
- <publishername>Morgan Kaufmann</publishername>
- </publisher>
- <copyright>
- <year>1993</year>
- <holder>Morgan Kaufmann Publishers, Inc.</holder>
- </copyright>
- </biblioentry>
-
- <biblioentry id="ULL88">
- <title>Principles of Database and Knowledge</title>
- <subtitle>Base Systems</subtitle>
- <titleabbrev>Ullman, 1988</titleabbrev>
- <authorgroup>
- <author>
- <firstname>Jeffrey D.</firstname>
- <surname>Ullman</surname>
- </author>
- </authorgroup>
- <volumenum>Volume 1</volumenum>
- <publisher>
- <publishername>Computer Science Press</publishername>
- </publisher>
- <pubdate>1988</pubdate>
- </biblioentry>
-
- </bibliodiv>
-
- <bibliodiv>
- <title>PostgreSQL-specific Documentation</title>
- <para>This section is for related documentation.</para>
-
- <biblioentry id="SIM98">
- <title>Enhancement of the ANSI SQL Implementation of PostgreSQL</title>
- <titleabbrev>Simkovics, 1998</titleabbrev>
- <authorgroup>
- <author>
- <firstname>Stefan</firstname>
- <surname>Simkovics</surname>
-<!--
-Paul-Peters-Gasse 36
-2384 Breitenfurt
-AUSTRIA
--->
- </author>
- </authorgroup>
-<!--
- <othercredit>
- <contrib>
- with support by
- </contrib>
- <honorific>O. Univ. Prof. Dr.</honorific>
- <firstname>Georg</firstname>
- <surname>Gottlob</surname>
- <honorific>Univ. Ass. Mag.</honorific>
- <firstname>Katrin</firstname>
- <surname>Seyr</surname>
- </othercredit>
--->
- <abstract>
- <para>
- Discusses SQL history and syntax, and describes the addition of
- <literal>INTERSECT</> and <literal>EXCEPT</> constructs into
- <productname>PostgreSQL</productname>. Prepared as a Master's
- Thesis with the support of O. Univ. Prof. Dr. Georg Gottlob and
- Univ. Ass. Mag. Katrin Seyr at Vienna University of Technology.
- </para>
- </abstract>
-
- <pubdate>November 29, 1998</pubdate>
- <publisher>
- <publishername>Department of Information Systems, Vienna University of Technology</publishername>
- <address>Vienna, Austria</address>
- </publisher>
- </biblioentry>
-
- <biblioentry id="YU95">
- <title>The <productname>Postgres95</productname> User Manual</title>
- <titleabbrev>Yu and Chen, 1995</titleabbrev>
- <authorgroup>
- <author>
- <firstname>A.</firstname>
- <surname>Yu</surname>
- </author>
- <author>
- <firstname>J.</firstname>
- <surname>Chen</surname>
- </author>
- </authorgroup>
- <authorgroup>
- <collab>
- <collabname>The POSTGRES Group</collabname>
- </collab>
- </authorgroup>
-
- <pubdate>Sept. 5, 1995</pubdate>
- <publisher>
- <publishername>University of California</publishername>
- <address>Berkeley, California</address>
- </publisher>
- </biblioentry>
-
- <biblioentry id="FONG">
- <title>
- <ulink url="http://db.cs.berkeley.edu/papers/UCB-MS-zfong.pdf">
- The design and implementation of the <productname>POSTGRES</productname> query optimizer
- </ulink></title>
- <author>
- <firstname>Zelaine</firstname>
- <surname>Fong</surname>
- </author>
- <publisher>
- <publishername>University of California, Berkeley, Computer Science Department</publishername>
- </publisher>
- </biblioentry>
-
- </bibliodiv>
-
- <bibliodiv>
- <title>Proceedings and Articles</title>
- <para>This section is for articles and newsletters.</para>
-
- <biblioentry id="OLSON93">
- <title>Partial indexing in POSTGRES: research project</title>
- <titleabbrev>Olson, 1993</titleabbrev>
- <authorgroup>
- <author>
- <firstname>Nels</firstname>
- <surname>Olson</surname>
- </author>
- </authorgroup>
- <pubdate>1993</pubdate>
- <pubsnumber>UCB Engin T7.49.1993 O676</pubsnumber>
- <publisher>
- <publishername>University of California</publishername>
- <address>Berkeley, California</address>
- </publisher>
- </biblioentry>
-
- <biblioentry id="ONG90">
- <biblioset relation="article">
- <title>A Unified Framework for Version Modeling Using Production Rules in a Database System</title>
- <titleabbrev>Ong and Goh, 1990</titleabbrev>
- <authorgroup>
- <author>
- <firstname>L.</firstname>
- <surname>Ong</surname>
- </author>
- <author>
- <firstname>J.</firstname>
- <surname>Goh</surname>
- </author>
- </authorgroup>
- </biblioset>
- <biblioset relation="journal">
- <title>ERL Technical Memorandum M90/33</title>
- <pubdate>April, 1990</pubdate>
- <publisher>
- <publishername>University of California</publishername>
- <address>Berkely, California</address>
- </publisher>
- </biblioset>
- </biblioentry>
-
- <biblioentry id="ROWE87">
- <biblioset relation="article">
- <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M87-13.pdf">
- The <productname>POSTGRES</productname> data model
- </ulink></title>
- <titleabbrev>Rowe and Stonebraker, 1987</titleabbrev>
- <authorgroup>
- <author>
- <firstname>L.</firstname>
- <surname>Rowe</surname>
- </author>
- <author>
- <firstname>M.</firstname>
- <surname>Stonebraker</surname>
- </author>
- </authorgroup>
- </biblioset>
- <confgroup>
- <conftitle>VLDB Conference</conftitle>
- <confdates>Sept. 1987</confdates>
- <address>Brighton, England</address>
- </confgroup>
- </biblioentry>
-
- <biblioentry id="SESHADRI95">
- <biblioset relation="article">
- <title>Generalized Partial Indexes
- <ulink url="http://citeseer.ist.psu.edu/seshadri95generalized.html">(cached version)
-<!--
- Original URL: http://citeseer.ist.psu.edu/seshadri95generalized.html
--->
- </ulink>
- </title>
- <titleabbrev>Seshardri, 1995</titleabbrev>
- <authorgroup>
- <author>
- <firstname>P.</firstname>
- <surname>Seshadri</surname>
- </author>
- <author>
- <firstname>A.</firstname>
- <surname>Swami</surname>
- </author>
- </authorgroup>
- </biblioset>
- <confgroup>
- <conftitle>Eleventh International Conference on Data Engineering</conftitle>
- <confdates>6-10 March 1995</confdates>
- <address>Taipeh, Taiwan</address>
- </confgroup>
- <pubdate>1995</pubdate>
- <pubsnumber>Cat. No.95CH35724</pubsnumber>
- <publisher>
- <publishername>IEEE Computer Society Press</publishername>
- <address>Los Alamitos, California</address>
- </publisher>
- <pagenums>420-7</pagenums>
- </biblioentry>
-
- <biblioentry id="STON86">
- <biblioset relation="article">
- <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M85-95.pdf">
- The design of <productname>POSTGRES</productname>
- </ulink></title>
- <titleabbrev>Stonebraker and Rowe, 1986</titleabbrev>
- <authorgroup>
- <author>
- <firstname>M.</firstname>
- <surname>Stonebraker</surname>
- </author>
- <author>
- <firstname>L.</firstname>
- <surname>Rowe</surname>
- </author>
- </authorgroup>
- </biblioset>
- <confgroup>
- <conftitle>ACM-SIGMOD Conference on Management of Data</conftitle>
- <confdates>May 1986</confdates>
- <address>Washington, DC</address>
- </confgroup>
- </biblioentry>
-
- <biblioentry id="STON87a">
- <biblioset relation="article">
- <title>The design of the <productname>POSTGRES</productname> rules system</title>
- <titleabbrev>Stonebraker, Hanson, Hong, 1987</titleabbrev>
- <authorgroup>
- <author>
- <firstname>M.</firstname>
- <surname>Stonebraker</surname>
- </author>
- <author>
- <firstname>E.</firstname>
- <surname>Hanson</surname>
- </author>
- <author>
- <firstname>C. H.</firstname>
- <surname>Hong</surname>
- </author>
- </authorgroup>
- </biblioset>
- <confgroup>
- <conftitle>IEEE Conference on Data Engineering</conftitle>
- <confdates>Feb. 1987</confdates>
- <address>Los Angeles, California</address>
- </confgroup>
- </biblioentry>
-
- <biblioentry id="STON87b">
- <biblioset relation="article">
- <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M87-06.pdf">
- The design of the <productname>POSTGRES</productname> storage system
- </ulink></title>
- <titleabbrev>Stonebraker, 1987</titleabbrev>
- <authorgroup>
- <author>
- <firstname>M.</firstname>
- <surname>Stonebraker</surname>
- </author>
- </authorgroup>
- </biblioset>
- <confgroup>
- <conftitle>VLDB Conference</conftitle>
- <confdates>Sept. 1987</confdates>
- <address>Brighton, England</address>
- </confgroup>
- </biblioentry>
-
- <biblioentry id="STON89">
- <biblioset relation="article">
- &l