summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Eisentraut2017-10-09 01:44:17 +0000
committerPeter Eisentraut2017-10-17 19:10:33 +0000
commitc29c578908dc0271eeb13a4014e54bff07a29c05 (patch)
tree1aa03fb6f1864719f2f23d4b0b9d5e2865764082
parent6ecabead4b5993c42745f2802d857b1a79f48bf9 (diff)
Don't use SGML empty tags
For DocBook XML compatibility, don't use SGML empty tags (</>) anymore, replace by the full tag name. Add a warning option to catch future occurrences. Alexander Lakhin, Jürgen Purtz
-rw-r--r--doc/src/sgml/Makefile3
-rw-r--r--doc/src/sgml/acronyms.sgml18
-rw-r--r--doc/src/sgml/adminpack.sgml54
-rw-r--r--doc/src/sgml/advanced.sgml110
-rw-r--r--doc/src/sgml/amcheck.sgml66
-rw-r--r--doc/src/sgml/arch-dev.sgml64
-rw-r--r--doc/src/sgml/array.sgml110
-rw-r--r--doc/src/sgml/auth-delay.sgml6
-rw-r--r--doc/src/sgml/auto-explain.sgml36
-rw-r--r--doc/src/sgml/backup.sgml496
-rw-r--r--doc/src/sgml/bgworker.sgml80
-rw-r--r--doc/src/sgml/biblio.sgml2
-rw-r--r--doc/src/sgml/bki.sgml86
-rw-r--r--doc/src/sgml/bloom.sgml24
-rw-r--r--doc/src/sgml/brin.sgml78
-rw-r--r--doc/src/sgml/btree-gin.sgml18
-rw-r--r--doc/src/sgml/btree-gist.sgml32
-rw-r--r--doc/src/sgml/catalogs.sgml1012
-rw-r--r--doc/src/sgml/charset.sgml270
-rw-r--r--doc/src/sgml/citext.sgml120
-rw-r--r--doc/src/sgml/client-auth.sgml328
-rw-r--r--doc/src/sgml/config.sgml2156
-rw-r--r--doc/src/sgml/contrib-spi.sgml80
-rw-r--r--doc/src/sgml/contrib.sgml26
-rw-r--r--doc/src/sgml/cube.sgml158
-rw-r--r--doc/src/sgml/custom-scan.sgml136
-rw-r--r--doc/src/sgml/datatype.sgml692
-rw-r--r--doc/src/sgml/datetime.sgml72
-rw-r--r--doc/src/sgml/dblink.sgml270
-rw-r--r--doc/src/sgml/ddl.sgml390
-rw-r--r--doc/src/sgml/dfunc.sgml40
-rw-r--r--doc/src/sgml/dict-int.sgml18
-rw-r--r--doc/src/sgml/dict-xsyn.sgml40
-rw-r--r--doc/src/sgml/diskusage.sgml16
-rw-r--r--doc/src/sgml/dml.sgml32
-rw-r--r--doc/src/sgml/docguide.sgml2
-rw-r--r--doc/src/sgml/earthdistance.sgml32
-rw-r--r--doc/src/sgml/ecpg.sgml734
-rw-r--r--doc/src/sgml/errcodes.sgml18
-rw-r--r--doc/src/sgml/event-trigger.sgml78
-rw-r--r--doc/src/sgml/extend.sgml362
-rw-r--r--doc/src/sgml/external-projects.sgml18
-rw-r--r--doc/src/sgml/fdwhandler.sgml880
-rw-r--r--doc/src/sgml/file-fdw.sgml48
-rw-r--r--doc/src/sgml/func.sgml2528
-rw-r--r--doc/src/sgml/fuzzystrmatch.sgml26
-rw-r--r--doc/src/sgml/generate-errcodes-table.pl4
-rw-r--r--doc/src/sgml/generic-wal.sgml42
-rw-r--r--doc/src/sgml/geqo.sgml12
-rw-r--r--doc/src/sgml/gin.sgml286
-rw-r--r--doc/src/sgml/gist.sgml402
-rw-r--r--doc/src/sgml/high-availability.sgml516
-rw-r--r--doc/src/sgml/history.sgml4
-rw-r--r--doc/src/sgml/hstore.sgml152
-rw-r--r--doc/src/sgml/indexam.sgml446
-rw-r--r--doc/src/sgml/indices.sgml224
-rw-r--r--doc/src/sgml/info.sgml6
-rw-r--r--doc/src/sgml/information_schema.sgml364
-rw-r--r--doc/src/sgml/install-windows.sgml58
-rw-r--r--doc/src/sgml/installation.sgml460
-rw-r--r--doc/src/sgml/intagg.sgml28
-rw-r--r--doc/src/sgml/intarray.sgml68
-rw-r--r--doc/src/sgml/intro.sgml4
-rw-r--r--doc/src/sgml/isn.sgml28
-rw-r--r--doc/src/sgml/json.sgml174
-rw-r--r--doc/src/sgml/libpq.sgml1414
-rw-r--r--doc/src/sgml/lo.sgml34
-rw-r--r--doc/src/sgml/lobj.sgml128
-rw-r--r--doc/src/sgml/logicaldecoding.sgml18
-rw-r--r--doc/src/sgml/ltree.sgml234
-rw-r--r--doc/src/sgml/maintenance.sgml338
-rw-r--r--doc/src/sgml/manage-ag.sgml196
-rw-r--r--doc/src/sgml/monitoring.sgml1440
-rw-r--r--doc/src/sgml/mvcc.sgml130
-rw-r--r--doc/src/sgml/nls.sgml24
-rw-r--r--doc/src/sgml/notation.sgml8
-rw-r--r--doc/src/sgml/oid2name.sgml60
-rw-r--r--doc/src/sgml/pageinspect.sgml36
-rw-r--r--doc/src/sgml/parallel.sgml96
-rw-r--r--doc/src/sgml/perform.sgml344
-rw-r--r--doc/src/sgml/pgbuffercache.sgml16
-rw-r--r--doc/src/sgml/pgcrypto.sgml176
-rw-r--r--doc/src/sgml/pgfreespacemap.sgml8
-rw-r--r--doc/src/sgml/pgprewarm.sgml16
-rw-r--r--doc/src/sgml/pgrowlocks.sgml10
-rw-r--r--doc/src/sgml/pgstandby.sgml100
-rw-r--r--doc/src/sgml/pgstatstatements.sgml106
-rw-r--r--doc/src/sgml/pgstattuple.sgml30
-rw-r--r--doc/src/sgml/pgtrgm.sgml90
-rw-r--r--doc/src/sgml/pgvisibility.sgml12
-rw-r--r--doc/src/sgml/planstats.sgml52
-rw-r--r--doc/src/sgml/plhandler.sgml50
-rw-r--r--doc/src/sgml/plperl.sgml148
-rw-r--r--doc/src/sgml/plpgsql.sgml1204
-rw-r--r--doc/src/sgml/plpython.sgml104
-rw-r--r--doc/src/sgml/pltcl.sgml210
-rw-r--r--doc/src/sgml/postgres-fdw.sgml198
-rw-r--r--doc/src/sgml/postgres.sgml16
-rw-r--r--doc/src/sgml/problems.sgml20
-rw-r--r--doc/src/sgml/protocol.sgml474
-rw-r--r--doc/src/sgml/queries.sgml674
-rw-r--r--doc/src/sgml/query.sgml36
-rw-r--r--doc/src/sgml/rangetypes.sgml66
-rw-r--r--doc/src/sgml/recovery-config.sgml132
-rw-r--r--doc/src/sgml/ref/abort.sgml2
-rw-r--r--doc/src/sgml/ref/alter_aggregate.sgml18
-rw-r--r--doc/src/sgml/ref/alter_collation.sgml2
-rw-r--r--doc/src/sgml/ref/alter_conversion.sgml2
-rw-r--r--doc/src/sgml/ref/alter_database.sgml4
-rw-r--r--doc/src/sgml/ref/alter_default_privileges.sgml20
-rw-r--r--doc/src/sgml/ref/alter_domain.sgml20
-rw-r--r--doc/src/sgml/ref/alter_extension.sgml22
-rw-r--r--doc/src/sgml/ref/alter_foreign_data_wrapper.sgml18
-rw-r--r--doc/src/sgml/ref/alter_foreign_table.sgml42
-rw-r--r--doc/src/sgml/ref/alter_function.sgml28
-rw-r--r--doc/src/sgml/ref/alter_group.sgml8
-rw-r--r--doc/src/sgml/ref/alter_index.sgml10
-rw-r--r--doc/src/sgml/ref/alter_materialized_view.sgml4
-rw-r--r--doc/src/sgml/ref/alter_opclass.sgml2
-rw-r--r--doc/src/sgml/ref/alter_operator.sgml2
-rw-r--r--doc/src/sgml/ref/alter_opfamily.sgml32
-rw-r--r--doc/src/sgml/ref/alter_publication.sgml8
-rw-r--r--doc/src/sgml/ref/alter_role.sgml22
-rw-r--r--doc/src/sgml/ref/alter_schema.sgml2
-rw-r--r--doc/src/sgml/ref/alter_sequence.sgml34
-rw-r--r--doc/src/sgml/ref/alter_server.sgml12
-rw-r--r--doc/src/sgml/ref/alter_statistics.sgml4
-rw-r--r--doc/src/sgml/ref/alter_subscription.sgml4
-rw-r--r--doc/src/sgml/ref/alter_system.sgml12
-rw-r--r--doc/src/sgml/ref/alter_table.sgml140
-rw-r--r--doc/src/sgml/ref/alter_tablespace.sgml4
-rw-r--r--doc/src/sgml/ref/alter_trigger.sgml4
-rw-r--r--doc/src/sgml/ref/alter_tsconfig.sgml20
-rw-r--r--doc/src/sgml/ref/alter_tsdictionary.sgml8
-rw-r--r--doc/src/sgml/ref/alter_tsparser.sgml2
-rw-r--r--doc/src/sgml/ref/alter_tstemplate.sgml2
-rw-r--r--doc/src/sgml/ref/alter_type.sgml6
-rw-r--r--doc/src/sgml/ref/alter_user_mapping.sgml14
-rw-r--r--doc/src/sgml/ref/alter_view.sgml10
-rw-r--r--doc/src/sgml/ref/analyze.sgml16
-rw-r--r--doc/src/sgml/ref/begin.sgml4
-rw-r--r--doc/src/sgml/ref/close.sgml4
-rw-r--r--doc/src/sgml/ref/cluster.sgml10
-rw-r--r--doc/src/sgml/ref/clusterdb.sgml62
-rw-r--r--doc/src/sgml/ref/comment.sgml26
-rw-r--r--doc/src/sgml/ref/commit.sgml2
-rw-r--r--doc/src/sgml/ref/commit_prepared.sgml2
-rw-r--r--doc/src/sgml/ref/copy.sgml226
-rw-r--r--doc/src/sgml/ref/create_access_method.sgml8
-rw-r--r--doc/src/sgml/ref/create_aggregate.sgml164
-rw-r--r--doc/src/sgml/ref/create_cast.sgml80
-rw-r--r--doc/src/sgml/ref/create_collation.sgml2
-rw-r--r--doc/src/sgml/ref/create_conversion.sgml6
-rw-r--r--doc/src/sgml/ref/create_database.sgml54
-rw-r--r--doc/src/sgml/ref/create_domain.sgml22
-rw-r--r--doc/src/sgml/ref/create_event_trigger.sgml6
-rw-r--r--doc/src/sgml/ref/create_extension.sgml38
-rw-r--r--doc/src/sgml/ref/create_foreign_data_wrapper.sgml12
-rw-r--r--doc/src/sgml/ref/create_foreign_table.sgml44
-rw-r--r--doc/src/sgml/ref/create_function.sgml128
-rw-r--r--doc/src/sgml/ref/create_index.sgml120
-rw-r--r--doc/src/sgml/ref/create_language.sgml42
-rw-r--r--doc/src/sgml/ref/create_materialized_view.sgml10
-rw-r--r--doc/src/sgml/ref/create_opclass.sgml38
-rw-r--r--doc/src/sgml/ref/create_operator.sgml24
-rw-r--r--doc/src/sgml/ref/create_opfamily.sgml4
-rw-r--r--doc/src/sgml/ref/create_policy.sgml20
-rw-r--r--doc/src/sgml/ref/create_publication.sgml14
-rw-r--r--doc/src/sgml/ref/create_role.sgml94
-rw-r--r--doc/src/sgml/ref/create_rule.sgml32
-rw-r--r--doc/src/sgml/ref/create_schema.sgml32
-rw-r--r--doc/src/sgml/ref/create_sequence.sgml28
-rw-r--r--doc/src/sgml/ref/create_server.sgml8
-rw-r--r--doc/src/sgml/ref/create_statistics.sgml10
-rw-r--r--doc/src/sgml/ref/create_subscription.sgml4
-rw-r--r--doc/src/sgml/ref/create_table.sgml294
-rw-r--r--doc/src/sgml/ref/create_table_as.sgml36
-rw-r--r--doc/src/sgml/ref/create_tablespace.sgml22
-rw-r--r--doc/src/sgml/ref/create_trigger.sgml172
-rw-r--r--doc/src/sgml/ref/create_tsconfig.sgml2
-rw-r--r--doc/src/sgml/ref/create_tstemplate.sgml2
-rw-r--r--doc/src/sgml/ref/create_type.sgml98
-rw-r--r--doc/src/sgml/ref/create_user.sgml4
-rw-r--r--doc/src/sgml/ref/create_user_mapping.sgml10
-rw-r--r--doc/src/sgml/ref/create_view.sgml140
-rw-r--r--doc/src/sgml/ref/createdb.sgml66
-rw-r--r--doc/src/sgml/ref/createuser.sgml114
-rw-r--r--doc/src/sgml/ref/declare.sgml56
-rw-r--r--doc/src/sgml/ref/delete.sgml62
-rw-r--r--doc/src/sgml/ref/discard.sgml10
-rw-r--r--doc/src/sgml/ref/do.sgml18
-rw-r--r--doc/src/sgml/ref/drop_access_method.sgml4
-rw-r--r--doc/src/sgml/ref/drop_aggregate.sgml10
-rw-r--r--doc/src/sgml/ref/drop_collation.sgml4
-rw-r--r--doc/src/sgml/ref/drop_conversion.sgml2
-rw-r--r--doc/src/sgml/ref/drop_database.sgml2
-rw-r--r--doc/src/sgml/ref/drop_domain.sgml6
-rw-r--r--doc/src/sgml/ref/drop_extension.sgml6
-rw-r--r--doc/src/sgml/ref/drop_foreign_data_wrapper.sgml6
-rw-r--r--doc/src/sgml/ref/drop_foreign_table.sgml2
-rw-r--r--doc/src/sgml/ref/drop_function.sgml12
-rw-r--r--doc/src/sgml/ref/drop_index.sgml12
-rw-r--r--doc/src/sgml/ref/drop_language.sgml4
-rw-r--r--doc/src/sgml/ref/drop_opclass.sgml12
-rw-r--r--doc/src/sgml/ref/drop_opfamily.sgml4
-rw-r--r--doc/src/sgml/ref/drop_owned.sgml2
-rw-r--r--doc/src/sgml/ref/drop_publication.sgml2
-rw-r--r--doc/src/sgml/ref/drop_role.sgml4
-rw-r--r--doc/src/sgml/ref/drop_schema.sgml2
-rw-r--r--doc/src/sgml/ref/drop_sequence.sgml2
-rw-r--r--doc/src/sgml/ref/drop_server.sgml6
-rw-r--r--doc/src/sgml/ref/drop_subscription.sgml2
-rw-r--r--doc/src/sgml/ref/drop_table.sgml6
-rw-r--r--doc/src/sgml/ref/drop_tablespace.sgml6
-rw-r--r--doc/src/sgml/ref/drop_tsconfig.sgml4
-rw-r--r--doc/src/sgml/ref/drop_tsdictionary.sgml2
-rw-r--r--doc/src/sgml/ref/drop_tsparser.sgml2
-rw-r--r--doc/src/sgml/ref/drop_tstemplate.sgml2
-rw-r--r--doc/src/sgml/ref/drop_type.sgml4
-rw-r--r--doc/src/sgml/ref/drop_user_mapping.sgml14
-rw-r--r--doc/src/sgml/ref/drop_view.sgml2
-rw-r--r--doc/src/sgml/ref/dropdb.sgml48
-rw-r--r--doc/src/sgml/ref/dropuser.sgml46
-rw-r--r--doc/src/sgml/ref/ecpg-ref.sgml6
-rw-r--r--doc/src/sgml/ref/end.sgml2
-rw-r--r--doc/src/sgml/ref/execute.sgml4
-rw-r--r--doc/src/sgml/ref/explain.sgml18
-rw-r--r--doc/src/sgml/ref/fetch.sgml36
-rw-r--r--doc/src/sgml/ref/grant.sgml98
-rw-r--r--doc/src/sgml/ref/import_foreign_schema.sgml14
-rw-r--r--doc/src/sgml/ref/initdb.sgml42
-rw-r--r--doc/src/sgml/ref/insert.sgml86
-rw-r--r--doc/src/sgml/ref/listen.sgml6
-rw-r--r--doc/src/sgml/ref/load.sgml14
-rw-r--r--doc/src/sgml/ref/lock.sgml72
-rw-r--r--doc/src/sgml/ref/move.sgml2
-rw-r--r--doc/src/sgml/ref/notify.sgml12
-rw-r--r--doc/src/sgml/ref/pg_basebackup.sgml40
-rw-r--r--doc/src/sgml/ref/pg_config-ref.sgml100
-rw-r--r--doc/src/sgml/ref/pg_controldata.sgml8
-rw-r--r--doc/src/sgml/ref/pg_ctl-ref.sgml50
-rw-r--r--doc/src/sgml/ref/pg_dump.sgml280
-rw-r--r--doc/src/sgml/ref/pg_dumpall.sgml102
-rw-r--r--doc/src/sgml/ref/pg_isready.sgml32
-rw-r--r--doc/src/sgml/ref/pg_receivewal.sgml34
-rw-r--r--doc/src/sgml/ref/pg_recvlogical.sgml26
-rw-r--r--doc/src/sgml/ref/pg_resetwal.sgml56
-rw-r--r--doc/src/sgml/ref/pg_restore.sgml150
-rw-r--r--doc/src/sgml/ref/pg_rewind.sgml48
-rw-r--r--doc/src/sgml/ref/pg_waldump.sgml16
-rw-r--r--doc/src/sgml/ref/pgarchivecleanup.sgml50
-rw-r--r--doc/src/sgml/ref/pgbench.sgml568
-rw-r--r--doc/src/sgml/ref/pgtestfsync.sgml16
-rw-r--r--doc/src/sgml/ref/pgtesttiming.sgml10
-rw-r--r--doc/src/sgml/ref/pgupgrade.sgml232
-rw-r--r--doc/src/sgml/ref/postgres-ref.sgml76
-rw-r--r--doc/src/sgml/ref/postmaster.sgml2
-rw-r--r--doc/src/sgml/ref/prepare.sgml18
-rw-r--r--doc/src/sgml/ref/prepare_transaction.sgml30
-rw-r--r--doc/src/sgml/ref/psql-ref.sgml662
-rw-r--r--doc/src/sgml/ref/reassign_owned.sgml2
-rw-r--r--doc/src/sgml/ref/refresh_materialized_view.sgml4
-rw-r--r--doc/src/sgml/ref/reindex.sgml34
-rw-r--r--doc/src/sgml/ref/reindexdb.sgml80
-rw-r--r--doc/src/sgml/ref/release_savepoint.sgml2
-rw-r--r--doc/src/sgml/ref/reset.sgml10
-rw-r--r--doc/src/sgml/ref/revoke.sgml48
-rw-r--r--doc/src/sgml/ref/rollback.sgml2
-rw-r--r--doc/src/sgml/ref/rollback_prepared.sgml2
-rw-r--r--doc/src/sgml/ref/rollback_to.sgml28
-rw-r--r--doc/src/sgml/ref/savepoint.sgml8
-rw-r--r--doc/src/sgml/ref/security_label.sgml22
-rw-r--r--doc/src/sgml/ref/select.sgml598
-rw-r--r--doc/src/sgml/ref/set.sgml44
-rw-r--r--doc/src/sgml/ref/set_constraints.sgml14
-rw-r--r--doc/src/sgml/ref/set_role.sgml36
-rw-r--r--doc/src/sgml/ref/set_session_auth.sgml16
-rw-r--r--doc/src/sgml/ref/set_transaction.sgml12
-rw-r--r--doc/src/sgml/ref/show.sgml2
-rw-r--r--doc/src/sgml/ref/start_transaction.sgml6
-rw-r--r--doc/src/sgml/ref/truncate.sgml38
-rw-r--r--doc/src/sgml/ref/unlisten.sgml2
-rw-r--r--doc/src/sgml/ref/update.sgml88
-rw-r--r--doc/src/sgml/ref/vacuum.sgml24
-rw-r--r--doc/src/sgml/ref/vacuumdb.sgml52
-rw-r--r--doc/src/sgml/ref/values.sgml60
-rw-r--r--doc/src/sgml/regress.sgml122
-rw-r--r--doc/src/sgml/release-10.sgml736
-rw-r--r--doc/src/sgml/release-7.4.sgml700
-rw-r--r--doc/src/sgml/release-8.0.sgml1266
-rw-r--r--doc/src/sgml/release-8.1.sgml1344
-rw-r--r--doc/src/sgml/release-8.2.sgml1598
-rw-r--r--doc/src/sgml/release-8.3.sgml1682
-rw-r--r--doc/src/sgml/release-8.4.sgml2468
-rw-r--r--doc/src/sgml/release-9.0.sgml2572
-rw-r--r--doc/src/sgml/release-9.1.sgml2678
-rw-r--r--doc/src/sgml/release-9.2.sgml2694
-rw-r--r--doc/src/sgml/release-9.3.sgml2518
-rw-r--r--doc/src/sgml/release-9.4.sgml2142
-rw-r--r--doc/src/sgml/release-9.5.sgml1800
-rw-r--r--doc/src/sgml/release-9.6.sgml1516
-rw-r--r--doc/src/sgml/release-old.sgml314
-rw-r--r--doc/src/sgml/release.sgml2
-rw-r--r--doc/src/sgml/rowtypes.sgml130
-rw-r--r--doc/src/sgml/rules.sgml326
-rw-r--r--doc/src/sgml/runtime.sgml610
-rw-r--r--doc/src/sgml/seg.sgml70
-rw-r--r--doc/src/sgml/sepgsql.sgml190
-rw-r--r--doc/src/sgml/sourcerepo.sgml24
-rw-r--r--doc/src/sgml/sources.sgml170
-rw-r--r--doc/src/sgml/spgist.sgml468
-rw-r--r--doc/src/sgml/spi.sgml226
-rw-r--r--doc/src/sgml/sslinfo.sgml14
-rw-r--r--doc/src/sgml/start.sgml22
-rw-r--r--doc/src/sgml/storage.sgml328
-rw-r--r--doc/src/sgml/syntax.sgml362
-rw-r--r--doc/src/sgml/tablefunc.sgml168
-rw-r--r--doc/src/sgml/tablesample-method.sgml128
-rw-r--r--doc/src/sgml/tcn.sgml8
-rw-r--r--doc/src/sgml/test-decoding.sgml4
-rw-r--r--doc/src/sgml/textsearch.sgml734
-rw-r--r--doc/src/sgml/trigger.sgml220
-rw-r--r--doc/src/sgml/tsm-system-rows.sgml8
-rw-r--r--doc/src/sgml/tsm-system-time.sgml8
-rw-r--r--doc/src/sgml/typeconv.sgml122
-rw-r--r--doc/src/sgml/unaccent.sgml44
-rw-r--r--doc/src/sgml/user-manag.sgml138
-rw-r--r--doc/src/sgml/uuid-ossp.sgml26
-rw-r--r--doc/src/sgml/vacuumlo.sgml42
-rw-r--r--doc/src/sgml/wal.sgml138
-rw-r--r--doc/src/sgml/xaggr.sgml160
-rw-r--r--doc/src/sgml/xfunc.sgml614
-rw-r--r--doc/src/sgml/xindex.sgml192
-rw-r--r--doc/src/sgml/xml2.sgml58
-rw-r--r--doc/src/sgml/xoper.sgml142
-rw-r--r--doc/src/sgml/xplang.sgml26
-rw-r--r--doc/src/sgml/xtypes.sgml68
337 files changed, 31636 insertions, 31635 deletions
diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile
index 164c00bb63..428eb569fc 100644
--- a/doc/src/sgml/Makefile
+++ b/doc/src/sgml/Makefile
@@ -66,10 +66,11 @@ ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML)
# Enable some extra warnings
# -wfully-tagged needed to throw a warning on missing tags
# for older tool chains, 2007-08-31
-override SPFLAGS += -wall -wno-unused-param -wno-empty -wfully-tagged
+override SPFLAGS += -wall -wno-unused-param -wfully-tagged
# Additional warnings for XML compatibility. The conditional is meant
# to detect whether we are using OpenSP rather than the ancient
# original SP.
+override SPFLAGS += -wempty
ifneq (,$(filter o%,$(notdir $(OSX))))
override SPFLAGS += -wdata-delim -winstance-ignore-ms -winstance-include-ms -winstance-param-entity
endif
diff --git a/doc/src/sgml/acronyms.sgml b/doc/src/sgml/acronyms.sgml
index 29f85e0846..35514d4d9a 100644
--- a/doc/src/sgml/acronyms.sgml
+++ b/doc/src/sgml/acronyms.sgml
@@ -4,8 +4,8 @@
<title>Acronyms</title>
<para>
- This is a list of acronyms commonly used in the <productname>PostgreSQL</>
- documentation and in discussions about <productname>PostgreSQL</>.
+ This is a list of acronyms commonly used in the <productname>PostgreSQL</productname>
+ documentation and in discussions about <productname>PostgreSQL</productname>.
<variablelist>
@@ -153,7 +153,7 @@
<ulink
url="http://en.wikipedia.org/wiki/Data_Definition_Language">Data
Definition Language</ulink>, SQL commands such as <command>CREATE
- TABLE</>, <command>ALTER USER</>
+ TABLE</command>, <command>ALTER USER</command>
</para>
</listitem>
</varlistentry>
@@ -164,8 +164,8 @@
<para>
<ulink
url="http://en.wikipedia.org/wiki/Data_Manipulation_Language">Data
- Manipulation Language</ulink>, SQL commands such as <command>INSERT</>,
- <command>UPDATE</>, <command>DELETE</>
+ Manipulation Language</ulink>, SQL commands such as <command>INSERT</command>,
+ <command>UPDATE</command>, <command>DELETE</command>
</para>
</listitem>
</varlistentry>
@@ -281,7 +281,7 @@
<listitem>
<para>
<link linkend="config-setting">Grand Unified Configuration</link>,
- the <productname>PostgreSQL</> subsystem that handles server configuration
+ the <productname>PostgreSQL</productname> subsystem that handles server configuration
</para>
</listitem>
</varlistentry>
@@ -384,7 +384,7 @@
<term><acronym>LSN</acronym></term>
<listitem>
<para>
- Log Sequence Number, see <link linkend="datatype-pg-lsn"><type>pg_lsn</></link>
+ Log Sequence Number, see <link linkend="datatype-pg-lsn"><type>pg_lsn</type></link>
and <link linkend="wal-internals">WAL Internals</link>.
</para>
</listitem>
@@ -486,7 +486,7 @@
<term><acronym>PGSQL</acronym></term>
<listitem>
<para>
- <link linkend="postgres"><productname>PostgreSQL</></link>
+ <link linkend="postgres"><productname>PostgreSQL</productname></link>
</para>
</listitem>
</varlistentry>
@@ -495,7 +495,7 @@
<term><acronym>PGXS</acronym></term>
<listitem>
<para>
- <link linkend="extend-pgxs"><productname>PostgreSQL</> Extension System</link>
+ <link linkend="extend-pgxs"><productname>PostgreSQL</productname> Extension System</link>
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/adminpack.sgml b/doc/src/sgml/adminpack.sgml
index fddf90c4a5..b27a4a325d 100644
--- a/doc/src/sgml/adminpack.sgml
+++ b/doc/src/sgml/adminpack.sgml
@@ -8,8 +8,8 @@
</indexterm>
<para>
- <filename>adminpack</> provides a number of support functions which
- <application>pgAdmin</> and other administration and management tools can
+ <filename>adminpack</filename> provides a number of support functions which
+ <application>pgAdmin</application> and other administration and management tools can
use to provide additional functionality, such as remote management
of server log files.
Use of all these functions is restricted to superusers.
@@ -25,7 +25,7 @@
</para>
<table id="functions-adminpack-table">
- <title><filename>adminpack</> Functions</title>
+ <title><filename>adminpack</filename> Functions</title>
<tgroup cols="3">
<thead>
<row><entry>Name</entry> <entry>Return Type</entry> <entry>Description</entry>
@@ -58,7 +58,7 @@
<entry><function>pg_catalog.pg_logdir_ls()</function></entry>
<entry><type>setof record</type></entry>
<entry>
- List the log files in the <varname>log_directory</> directory
+ List the log files in the <varname>log_directory</varname> directory
</entry>
</row>
</tbody>
@@ -69,9 +69,9 @@
<primary>pg_file_write</primary>
</indexterm>
<para>
- <function>pg_file_write</> writes the specified <parameter>data</> into
- the file named by <parameter>filename</>. If <parameter>append</> is
- false, the file must not already exist. If <parameter>append</> is true,
+ <function>pg_file_write</function> writes the specified <parameter>data</parameter> into
+ the file named by <parameter>filename</parameter>. If <parameter>append</parameter> is
+ false, the file must not already exist. If <parameter>append</parameter> is true,
the file can already exist, and will be appended to if so.
Returns the number of bytes written.
</para>
@@ -80,15 +80,15 @@
<primary>pg_file_rename</primary>
</indexterm>
<para>
- <function>pg_file_rename</> renames a file. If <parameter>archivename</>
- is omitted or NULL, it simply renames <parameter>oldname</>
- to <parameter>newname</> (which must not already exist).
- If <parameter>archivename</> is provided, it first
- renames <parameter>newname</> to <parameter>archivename</> (which must
- not already exist), and then renames <parameter>oldname</>
- to <parameter>newname</>. In event of failure of the second rename step,
- it will try to rename <parameter>archivename</> back
- to <parameter>newname</> before reporting the error.
+ <function>pg_file_rename</function> renames a file. If <parameter>archivename</parameter>
+ is omitted or NULL, it simply renames <parameter>oldname</parameter>
+ to <parameter>newname</parameter> (which must not already exist).
+ If <parameter>archivename</parameter> is provided, it first
+ renames <parameter>newname</parameter> to <parameter>archivename</parameter> (which must
+ not already exist), and then renames <parameter>oldname</parameter>
+ to <parameter>newname</parameter>. In event of failure of the second rename step,
+ it will try to rename <parameter>archivename</parameter> back
+ to <parameter>newname</parameter> before reporting the error.
Returns true on success, false if the source file(s) are not present or
not writable; other cases throw errors.
</para>
@@ -97,19 +97,19 @@
<primary>pg_file_unlink</primary>
</indexterm>
<para>
- <function>pg_file_unlink</> removes the specified file.
+ <function>pg_file_unlink</function> removes the specified file.
Returns true on success, false if the specified file is not present
- or the <function>unlink()</> call fails; other cases throw errors.
+ or the <function>unlink()</function> call fails; other cases throw errors.
</para>
<indexterm>
<primary>pg_logdir_ls</primary>
</indexterm>
<para>
- <function>pg_logdir_ls</> returns the start timestamps and path
+ <function>pg_logdir_ls</function> returns the start timestamps and path
names of all the log files in the <xref linkend="guc-log-directory">
directory. The <xref linkend="guc-log-filename"> parameter must have its
- default setting (<literal>postgresql-%Y-%m-%d_%H%M%S.log</>) to use this
+ default setting (<literal>postgresql-%Y-%m-%d_%H%M%S.log</literal>) to use this
function.
</para>
@@ -119,12 +119,12 @@
and should not be used in new applications; instead use those shown
in <xref linkend="functions-admin-signal-table">
and <xref linkend="functions-admin-genfile-table">. These functions are
- provided in <filename>adminpack</> only for compatibility with old
- versions of <application>pgAdmin</>.
+ provided in <filename>adminpack</filename> only for compatibility with old
+ versions of <application>pgAdmin</application>.
</para>
<table id="functions-adminpack-deprecated-table">
- <title>Deprecated <filename>adminpack</> Functions</title>
+ <title>Deprecated <filename>adminpack</filename> Functions</title>
<tgroup cols="3">
<thead>
<row><entry>Name</entry> <entry>Return Type</entry> <entry>Description</entry>
@@ -136,22 +136,22 @@
<entry><function>pg_catalog.pg_file_read(filename text, offset bigint, nbytes bigint)</function></entry>
<entry><type>text</type></entry>
<entry>
- Alternate name for <function>pg_read_file()</>
+ Alternate name for <function>pg_read_file()</function>
</entry>
</row>
<row>
<entry><function>pg_catalog.pg_file_length(filename text)</function></entry>
<entry><type>bigint</type></entry>
<entry>
- Same as <structfield>size</> column returned
- by <function>pg_stat_file()</>
+ Same as <structfield>size</structfield> column returned
+ by <function>pg_stat_file()</function>
</entry>
</row>
<row>
<entry><function>pg_catalog.pg_logfile_rotate()</function></entry>
<entry><type>integer</type></entry>
<entry>
- Alternate name for <function>pg_rotate_logfile()</>, but note that it
+ Alternate name for <function>pg_rotate_logfile()</function>, but note that it
returns integer 0 or 1 rather than <type>boolean</type>
</entry>
</row>
diff --git a/doc/src/sgml/advanced.sgml b/doc/src/sgml/advanced.sgml
index f47c01987b..bf87df4dcb 100644
--- a/doc/src/sgml/advanced.sgml
+++ b/doc/src/sgml/advanced.sgml
@@ -145,7 +145,7 @@ DETAIL: Key (city)=(Berkeley) is not present in table "cities".
</indexterm>
<para>
- <firstterm>Transactions</> are a fundamental concept of all database
+ <firstterm>Transactions</firstterm> are a fundamental concept of all database
systems. The essential point of a transaction is that it bundles
multiple steps into a single, all-or-nothing operation. The intermediate
states between the steps are not visible to other concurrent transactions,
@@ -182,8 +182,8 @@ UPDATE branches SET balance = balance + 100.00
remain a happy customer if she was debited without Bob being credited.
We need a guarantee that if something goes wrong partway through the
operation, none of the steps executed so far will take effect. Grouping
- the updates into a <firstterm>transaction</> gives us this guarantee.
- A transaction is said to be <firstterm>atomic</>: from the point of
+ the updates into a <firstterm>transaction</firstterm> gives us this guarantee.
+ A transaction is said to be <firstterm>atomic</firstterm>: from the point of
view of other transactions, it either happens completely or not at all.
</para>
@@ -216,9 +216,9 @@ UPDATE branches SET balance = balance + 100.00
</para>
<para>
- In <productname>PostgreSQL</>, a transaction is set up by surrounding
+ In <productname>PostgreSQL</productname>, a transaction is set up by surrounding
the SQL commands of the transaction with
- <command>BEGIN</> and <command>COMMIT</> commands. So our banking
+ <command>BEGIN</command> and <command>COMMIT</command> commands. So our banking
transaction would actually look like:
<programlisting>
@@ -233,23 +233,23 @@ COMMIT;
<para>
If, partway through the transaction, we decide we do not want to
commit (perhaps we just noticed that Alice's balance went negative),
- we can issue the command <command>ROLLBACK</> instead of
- <command>COMMIT</>, and all our updates so far will be canceled.
+ we can issue the command <command>ROLLBACK</command> instead of
+ <command>COMMIT</command>, and all our updates so far will be canceled.
</para>
<para>
- <productname>PostgreSQL</> actually treats every SQL statement as being
- executed within a transaction. If you do not issue a <command>BEGIN</>
+ <productname>PostgreSQL</productname> actually treats every SQL statement as being
+ executed within a transaction. If you do not issue a <command>BEGIN</command>
command,
- then each individual statement has an implicit <command>BEGIN</> and
- (if successful) <command>COMMIT</> wrapped around it. A group of
- statements surrounded by <command>BEGIN</> and <command>COMMIT</>
- is sometimes called a <firstterm>transaction block</>.
+ then each individual statement has an implicit <command>BEGIN</command> and
+ (if successful) <command>COMMIT</command> wrapped around it. A group of
+ statements surrounded by <command>BEGIN</command> and <command>COMMIT</command>
+ is sometimes called a <firstterm>transaction block</firstterm>.
</para>
<note>
<para>
- Some client libraries issue <command>BEGIN</> and <command>COMMIT</>
+ Some client libraries issue <command>BEGIN</command> and <command>COMMIT</command>
commands automatically, so that you might get the effect of transaction
blocks without asking. Check the documentation for the interface
you are using.
@@ -258,11 +258,11 @@ COMMIT;
<para>
It's possible to control the statements in a transaction in a more
- granular fashion through the use of <firstterm>savepoints</>. Savepoints
+ granular fashion through the use of <firstterm>savepoints</firstterm>. Savepoints
allow you to selectively discard parts of the transaction, while
committing the rest. After defining a savepoint with
- <command>SAVEPOINT</>, you can if needed roll back to the savepoint
- with <command>ROLLBACK TO</>. All the transaction's database changes
+ <command>SAVEPOINT</command>, you can if needed roll back to the savepoint
+ with <command>ROLLBACK TO</command>. All the transaction's database changes
between defining the savepoint and rolling back to it are discarded, but
changes earlier than the savepoint are kept.
</para>
@@ -308,7 +308,7 @@ COMMIT;
<para>
This example is, of course, oversimplified, but there's a lot of control
possible in a transaction block through the use of savepoints.
- Moreover, <command>ROLLBACK TO</> is the only way to regain control of a
+ Moreover, <command>ROLLBACK TO</command> is the only way to regain control of a
transaction block that was put in aborted state by the
system due to an error, short of rolling it back completely and starting
again.
@@ -325,7 +325,7 @@ COMMIT;
</indexterm>
<para>
- A <firstterm>window function</> performs a calculation across a set of
+ A <firstterm>window function</firstterm> performs a calculation across a set of
table rows that are somehow related to the current row. This is comparable
to the type of calculation that can be done with an aggregate function.
However, window functions do not cause rows to become grouped into a single
@@ -360,31 +360,31 @@ SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM emps
</screen>
The first three output columns come directly from the table
- <structname>empsalary</>, and there is one output row for each row in the
+ <structname>empsalary</structname>, and there is one output row for each row in the
table. The fourth column represents an average taken across all the table
- rows that have the same <structfield>depname</> value as the current row.
- (This actually is the same function as the non-window <function>avg</>
- aggregate, but the <literal>OVER</> clause causes it to be
+ rows that have the same <structfield>depname</structfield> value as the current row.
+ (This actually is the same function as the non-window <function>avg</function>
+ aggregate, but the <literal>OVER</literal> clause causes it to be
treated as a window function and computed across the window frame.)
</para>
<para>
- A window function call always contains an <literal>OVER</> clause
+ A window function call always contains an <literal>OVER</literal> clause
directly following the window function's name and argument(s). This is what
syntactically distinguishes it from a normal function or non-window
- aggregate. The <literal>OVER</> clause determines exactly how the
+ aggregate. The <literal>OVER</literal> clause determines exactly how the
rows of the query are split up for processing by the window function.
- The <literal>PARTITION BY</> clause within <literal>OVER</>
+ The <literal>PARTITION BY</literal> clause within <literal>OVER</literal>
divides the rows into groups, or partitions, that share the same
- values of the <literal>PARTITION BY</> expression(s). For each row,
+ values of the <literal>PARTITION BY</literal> expression(s). For each row,
the window function is computed across the rows that fall into the
same partition as the current row.
</para>
<para>
You can also control the order in which rows are processed by
- window functions using <literal>ORDER BY</> within <literal>OVER</>.
- (The window <literal>ORDER BY</> does not even have to match the
+ window functions using <literal>ORDER BY</literal> within <literal>OVER</literal>.
+ (The window <literal>ORDER BY</literal> does not even have to match the
order in which the rows are output.) Here is an example:
<programlisting>
@@ -409,39 +409,39 @@ FROM empsalary;
(10 rows)
</screen>
- As shown here, the <function>rank</> function produces a numerical rank
- for each distinct <literal>ORDER BY</> value in the current row's
- partition, using the order defined by the <literal>ORDER BY</> clause.
- <function>rank</> needs no explicit parameter, because its behavior
- is entirely determined by the <literal>OVER</> clause.
+ As shown here, the <function>rank</function> function produces a numerical rank
+ for each distinct <literal>ORDER BY</literal> value in the current row's
+ partition, using the order defined by the <literal>ORDER BY</literal> clause.
+ <function>rank</function> needs no explicit parameter, because its behavior
+ is entirely determined by the <literal>OVER</literal> clause.
</para>
<para>
The rows considered by a window function are those of the <quote>virtual
- table</> produced by the query's <literal>FROM</> clause as filtered by its
- <literal>WHERE</>, <literal>GROUP BY</>, and <literal>HAVING</> clauses
+ table</quote> produced by the query's <literal>FROM</literal> clause as filtered by its
+ <literal>WHERE</literal>, <literal>GROUP BY</literal>, and <literal>HAVING</literal> clauses
if any. For example, a row removed because it does not meet the
- <literal>WHERE</> condition is not seen by any window function.
+ <literal>WHERE</literal> condition is not seen by any window function.
A query can contain multiple window functions that slice up the data
- in different ways using different <literal>OVER</> clauses, but
+ in different ways using different <literal>OVER</literal> clauses, but
they all act on the same collection of rows defined by this virtual table.
</para>
<para>
- We already saw that <literal>ORDER BY</> can be omitted if the ordering
+ We already saw that <literal>ORDER BY</literal> can be omitted if the ordering
of rows is not important. It is also possible to omit <literal>PARTITION
- BY</>, in which case there is a single partition containing all rows.
+ BY</literal>, in which case there is a single partition containing all rows.
</para>
<para>
There is another important concept associated with window functions:
for each row, there is a set of rows within its partition called its
- <firstterm>window frame</>. Some window functions act only
+ <firstterm>window frame</firstterm>. Some window functions act only
on the rows of the window frame, rather than of the whole partition.
- By default, if <literal>ORDER BY</> is supplied then the frame consists of
+ By default, if <literal>ORDER BY</literal> is supplied then the frame consists of
all rows from the start of the partition up through the current row, plus
any following rows that are equal to the current row according to the
- <literal>ORDER BY</> clause. When <literal>ORDER BY</> is omitted the
+ <literal>ORDER BY</literal> clause. When <literal>ORDER BY</literal> is omitted the
default frame consists of all rows in the partition.
<footnote>
<para>
@@ -450,7 +450,7 @@ FROM empsalary;
<xref linkend="syntax-window-functions"> for details.
</para>
</footnote>
- Here is an example using <function>sum</>:
+ Here is an example using <function>sum</function>:
</para>
<programlisting>
@@ -474,11 +474,11 @@ SELECT salary, sum(salary) OVER () FROM empsalary;
</screen>
<para>
- Above, since there is no <literal>ORDER BY</> in the <literal>OVER</>
+ Above, since there is no <literal>ORDER BY</literal> in the <literal>OVER</literal>
clause, the window frame is the same as the partition, which for lack of
- <literal>PARTITION BY</> is the whole table; in other words each sum is
+ <literal>PARTITION BY</literal> is the whole table; in other words each sum is
taken over the whole table and so we get the same result for each output
- row. But if we add an <literal>ORDER BY</> clause, we get very different
+ row. But if we add an <literal>ORDER BY</literal> clause, we get very different
results:
</para>
@@ -510,8 +510,8 @@ SELECT salary, sum(salary) OVER (ORDER BY salary) FROM empsalary;
<para>
Window functions are permitted only in the <literal>SELECT</literal> list
- and the <literal>ORDER BY</> clause of the query. They are forbidden
- elsewhere, such as in <literal>GROUP BY</>, <literal>HAVING</>
+ and the <literal>ORDER BY</literal> clause of the query. They are forbidden
+ elsewhere, such as in <literal>GROUP BY</literal>, <literal>HAVING</literal>
and <literal>WHERE</literal> clauses. This is because they logically
execute after the processing of those clauses. Also, window functions
execute after non-window aggregate functions. This means it is valid to
@@ -534,15 +534,15 @@ WHERE pos &lt; 3;
</programlisting>
The above query only shows the rows from the inner query having
- <literal>rank</> less than 3.
+ <literal>rank</literal> less than 3.
</para>
<para>
When a query involves multiple window functions, it is possible to write
- out each one with a separate <literal>OVER</> clause, but this is
+ out each one with a separate <literal>OVER</literal> clause, but this is
duplicative and error-prone if the same windowing behavior is wanted
for several functions. Instead, each windowing behavior can be named
- in a <literal>WINDOW</> clause and then referenced in <literal>OVER</>.
+ in a <literal>WINDOW</literal> clause and then referenced in <literal>OVER</literal>.
For example:
<programlisting>
@@ -623,13 +623,13 @@ CREATE TABLE capitals (
<para>
In this case, a row of <classname>capitals</classname>
- <firstterm>inherits</firstterm> all columns (<structfield>name</>,
- <structfield>population</>, and <structfield>altitude</>) from its
+ <firstterm>inherits</firstterm> all columns (<structfield>name</structfield>,
+ <structfield>population</structfield>, and <structfield>altitude</structfield>) from its
<firstterm>parent</firstterm>, <classname>cities</classname>. The
type of the column <structfield>name</structfield> is
<type>text</type>, a native <productname>PostgreSQL</productname>
type for variable length character strings. State capitals have
- an extra column, <structfield>state</>, that shows their state. In
+ an extra column, <structfield>state</structfield>, that shows their state. In
<productname>PostgreSQL</productname>, a table can inherit from
zero or more other tables.
</para>
diff --git a/doc/src/sgml/amcheck.sgml b/doc/src/sgml/amcheck.sgml
index dd71dbd679..0dd68f0ba1 100644
--- a/doc/src/sgml/amcheck.sgml
+++ b/doc/src/sgml/amcheck.sgml
@@ -8,19 +8,19 @@
</indexterm>
<para>
- The <filename>amcheck</> module provides functions that allow you to
+ The <filename>amcheck</filename> module provides functions that allow you to
verify the logical consistency of the structure of indexes. If the
structure appears to be valid, no error is raised.
</para>
<para>
- The functions verify various <emphasis>invariants</> in the
+ The functions verify various <emphasis>invariants</emphasis> in the
structure of the representation of particular indexes. The
correctness of the access method functions behind index scans and
other important operations relies on these invariants always
holding. For example, certain functions verify, among other things,
- that all B-Tree pages have items in <quote>logical</> order (e.g.,
- for B-Tree indexes on <type>text</>, index tuples should be in
+ that all B-Tree pages have items in <quote>logical</quote> order (e.g.,
+ for B-Tree indexes on <type>text</type>, index tuples should be in
collated lexical order). If that particular invariant somehow fails
to hold, we can expect binary searches on the affected page to
incorrectly guide index scans, resulting in wrong answers to SQL
@@ -35,7 +35,7 @@
functions.
</para>
<para>
- <filename>amcheck</> functions may be used only by superusers.
+ <filename>amcheck</filename> functions may be used only by superusers.
</para>
<sect2>
@@ -82,7 +82,7 @@ ORDER BY c.relpages DESC LIMIT 10;
(10 rows)
</screen>
This example shows a session that performs verification of every
- catalog index in the database <quote>test</>. Details of just
+ catalog index in the database <quote>test</quote>. Details of just
the 10 largest indexes verified are displayed. Since no error
is raised, all indexes tested appear to be logically consistent.
Naturally, this query could easily be changed to call
@@ -90,10 +90,10 @@ ORDER BY c.relpages DESC LIMIT 10;
database where verification is supported.
</para>
<para>
- <function>bt_index_check</function> acquires an <literal>AccessShareLock</>
+ <function>bt_index_check</function> acquires an <literal>AccessShareLock</literal>
on the target index and the heap relation it belongs to. This lock mode
is the same lock mode acquired on relations by simple
- <literal>SELECT</> statements.
+ <literal>SELECT</literal> statements.
<function>bt_index_check</function> does not verify invariants
that span child/parent relationships, nor does it verify that
the target index is consistent with its heap relation. When a
@@ -132,13 +132,13 @@ ORDER BY c.relpages DESC LIMIT 10;
logical inconsistency or other problem.
</para>
<para>
- A <literal>ShareLock</> is required on the target index by
+ A <literal>ShareLock</literal> is required on the target index by
<function>bt_index_parent_check</function> (a
- <literal>ShareLock</> is also acquired on the heap relation).
+ <literal>ShareLock</literal> is also acquired on the heap relation).
These locks prevent concurrent data modification from
- <command>INSERT</>, <command>UPDATE</>, and <command>DELETE</>
+ <command>INSERT</command>, <command>UPDATE</command>, and <command>DELETE</command>
commands. The locks also prevent the underlying relation from
- being concurrently processed by <command>VACUUM</>, as well as
+ being concurrently processed by <command>VACUUM</command>, as well as
all other utility commands. Note that the function holds locks
only while running, not for the entire transaction.
</para>
@@ -159,13 +159,13 @@ ORDER BY c.relpages DESC LIMIT 10;
</sect2>
<sect2>
- <title>Using <filename>amcheck</> effectively</title>
+ <title>Using <filename>amcheck</filename> effectively</title>
<para>
- <filename>amcheck</> can be effective at detecting various types of
+ <filename>amcheck</filename> can be effective at detecting various types of
failure modes that <link
linkend="app-initdb-data-checksums"><application>data page
- checksums</></link> will always fail to catch. These include:
+ checksums</application></link> will always fail to catch. These include:
<itemizedlist>
<listitem>
@@ -176,13 +176,13 @@ ORDER BY c.relpages DESC LIMIT 10;
<para>
This includes issues caused by the comparison rules of operating
system collations changing. Comparisons of datums of a collatable
- type like <type>text</> must be immutable (just as all
+ type like <type>text</type> must be immutable (just as all
comparisons used for B-Tree index scans must be immutable), which
implies that operating system collation rules must never change.
Though rare, updates to operating system collation rules can
cause these issues. More commonly, an inconsistency in the
collation order between a master server and a standby server is
- implicated, possibly because the <emphasis>major</> operating
+ implicated, possibly because the <emphasis>major</emphasis> operating
system version in use is inconsistent. Such inconsistencies will
generally only arise on standby servers, and so can generally
only be detected on standby servers.
@@ -190,25 +190,25 @@ ORDER BY c.relpages DESC LIMIT 10;
<para>
If a problem like this arises, it may not affect each individual
index that is ordered using an affected collation, simply because
- <emphasis>indexed</> values might happen to have the same
+ <emphasis>indexed</emphasis> values might happen to have the same
absolute ordering regardless of the behavioral inconsistency. See
<xref linkend="locale"> and <xref linkend="collation"> for
- further details about how <productname>PostgreSQL</> uses
+ further details about how <productname>PostgreSQL</productname> uses
operating system locales and collations.
</para>
</listitem>
<listitem>
<para>
Corruption caused by hypothetical undiscovered bugs in the
- underlying <productname>PostgreSQL</> access method code or sort
+ underlying <productname>PostgreSQL</productname> access method code or sort
code.
</para>
<para>
Automatic verification of the structural integrity of indexes
plays a role in the general testing of new or proposed
- <productname>PostgreSQL</> features that could plausibly allow a
+ <productname>PostgreSQL</productname> features that could plausibly allow a
logical inconsistency to be introduced. One obvious testing
- strategy is to call <filename>amcheck</> functions continuously
+ strategy is to call <filename>amcheck</filename> functions continuously
when running the standard regression tests. See <xref
linkend="regress-run"> for details on running the tests.
</para>
@@ -219,12 +219,12 @@ ORDER BY c.relpages DESC LIMIT 10;
simply not be enabled.
</para>
<para>
- Note that <filename>amcheck</> examines a page as represented in some
+ Note that <filename>amcheck</filename> examines a page as represented in some
shared memory buffer at the time of verification if there is only a
shared buffer hit when accessing the block. Consequently,
- <filename>amcheck</> does not necessarily examine data read from the
+ <filename>amcheck</filename> does not necessarily examine data read from the
file system at the time of verification. Note that when checksums are
- enabled, <filename>amcheck</> may raise an error due to a checksum
+ enabled, <filename>amcheck</filename> may raise an error due to a checksum
failure when a corrupt block is read into a buffer.
</para>
</listitem>
@@ -234,7 +234,7 @@ ORDER BY c.relpages DESC LIMIT 10;
and operating system.
</para>
<para>
- <productname>PostgreSQL</> does not protect against correctable
+ <productname>PostgreSQL</productname> does not protect against correctable
memory errors and it is assumed you will operate using RAM that
uses industry standard Error Correcting Codes (ECC) or better
protection. However, ECC memory is typically only immune to
@@ -244,7 +244,7 @@ ORDER BY c.relpages DESC LIMIT 10;
</para>
</listitem>
</itemizedlist>
- In general, <filename>amcheck</> can only prove the presence of
+ In general, <filename>amcheck</filename> can only prove the presence of
corruption; it cannot prove its absence.
</para>
@@ -252,19 +252,19 @@ ORDER BY c.relpages DESC LIMIT 10;
<sect2>
<title>Repairing corruption</title>
<para>
- No error concerning corruption raised by <filename>amcheck</> should
- ever be a false positive. In practice, <filename>amcheck</> is more
+ No error concerning corruption raised by <filename>amcheck</filename> should
+ ever be a false positive. In practice, <filename>amcheck</filename> is more
likely to find software bugs than problems with hardware.
- <filename>amcheck</> raises errors in the event of conditions that,
+ <filename>amcheck</filename> raises errors in the event of conditions that,
by definition, should never happen, and so careful analysis of
- <filename>amcheck</> errors is often required.
+ <filename>amcheck</filename> errors is often required.
</para>
<para>
There is no general method of repairing problems that
- <filename>amcheck</> detects. An explanation for the root cause of
+ <filename>amcheck</filename> detects. An explanation for the root cause of
an invariant violation should be sought. <xref
linkend="pageinspect"> may play a useful role in diagnosing
- corruption that <filename>amcheck</> detects. A <command>REINDEX</>
+ corruption that <filename>amcheck</filename> detects. A <command>REINDEX</command>
may not be effective in repairing corruption.
</para>
diff --git a/doc/src/sgml/arch-dev.sgml b/doc/src/sgml/arch-dev.sgml
index c835e87215..5423aadb9c 100644
--- a/doc/src/sgml/arch-dev.sgml
+++ b/doc/src/sgml/arch-dev.sgml
@@ -118,7 +118,7 @@
<para>
<productname>PostgreSQL</productname> is implemented using a
- simple <quote>process per user</> client/server model. In this model
+ simple <quote>process per user</quote> client/server model. In this model
there is one <firstterm>client process</firstterm> connected to
exactly one <firstterm>server process</firstterm>. As we do not
know ahead of time how many connections will be made, we have to
@@ -137,9 +137,9 @@
The client process can be any program that understands the
<productname>PostgreSQL</productname> protocol described in
<xref linkend="protocol">. Many clients are based on the
- C-language library <application>libpq</>, but several independent
+ C-language library <application>libpq</application>, but several independent
implementations of the protocol exist, such as the Java
- <application>JDBC</> driver.
+ <application>JDBC</application> driver.
</para>
<para>
@@ -184,8 +184,8 @@
text) for valid syntax. If the syntax is correct a
<firstterm>parse tree</firstterm> is built up and handed back;
otherwise an error is returned. The parser and lexer are
- implemented using the well-known Unix tools <application>bison</>
- and <application>flex</>.
+ implemented using the well-known Unix tools <application>bison</application>
+ and <application>flex</application>.
</para>
<para>
@@ -251,7 +251,7 @@
back by the parser as input and does the semantic interpretation needed
to understand which tables, functions, and operators are referenced by
the query. The data structure that is built to represent this
- information is called the <firstterm>query tree</>.
+ information is called the <firstterm>query tree</firstterm>.
</para>
<para>
@@ -259,10 +259,10 @@
system catalog lookups can only be done within a transaction, and we
do not wish to start a transaction immediately upon receiving a query
string. The raw parsing stage is sufficient to identify the transaction
- control commands (<command>BEGIN</>, <command>ROLLBACK</>, etc), and
+ control commands (<command>BEGIN</command>, <command>ROLLBACK</command>, etc), and
these can then be correctly executed without any further analysis.
Once we know that we are dealing with an actual query (such as
- <command>SELECT</> or <command>UPDATE</>), it is okay to
+ <command>SELECT</command> or <command>UPDATE</command>), it is okay to
start a transaction if we're not already in one. Only then can the
transformation process be invoked.
</para>
@@ -270,10 +270,10 @@
<para>
The query tree created by the transformation process is structurally
similar to the raw parse tree in most places, but it has many differences
- in detail. For example, a <structname>FuncCall</> node in the
+ in detail. For example, a <structname>FuncCall</structname> node in the
parse tree represents something that looks syntactically like a function
- call. This might be transformed to either a <structname>FuncExpr</>
- or <structname>Aggref</> node depending on whether the referenced
+ call. This might be transformed to either a <structname>FuncExpr</structname>
+ or <structname>Aggref</structname> node depending on whether the referenced
name turns out to be an ordinary function or an aggregate function.
Also, information about the actual data types of columns and expression
results is added to the query tree.
@@ -354,10 +354,10 @@
<para>
The planner's search procedure actually works with data structures
- called <firstterm>paths</>, which are simply cut-down representations of
+ called <firstterm>paths</firstterm>, which are simply cut-down representations of
plans containing only as much information as the planner needs to make
its decisions. After the cheapest path is determined, a full-fledged
- <firstterm>plan tree</> is built to pass to the executor. This represents
+ <firstterm>plan tree</firstterm> is built to pass to the executor. This represents
the desired execution plan in sufficient detail for the executor to run it.
In the rest of this section we'll ignore the distinction between paths
and plans.
@@ -378,12 +378,12 @@
<literal>relation.attribute OPR constant</literal>. If
<literal>relation.attribute</literal> happens to match the key of the B-tree
index and <literal>OPR</literal> is one of the operators listed in
- the index's <firstterm>operator class</>, another plan is created using
+ the index's <firstterm>operator class</firstterm>, another plan is created using
the B-tree index to scan the relation. If there are further indexes
present and the restrictions in the query happen to match a key of an
index, further plans will be considered. Index scan plans are also
generated for indexes that have a sort ordering that can match the
- query's <literal>ORDER BY</> clause (if any), or a sort ordering that
+ query's <literal>ORDER BY</literal> clause (if any), or a sort ordering that
might be useful for merge joining (see below).
</para>
@@ -462,9 +462,9 @@
the base relations, plus nested-loop, merge, or hash join nodes as
needed, plus any auxiliary steps needed, such as sort nodes or
aggregate-function calculation nodes. Most of these plan node
- types have the additional ability to do <firstterm>selection</>
+ types have the additional ability to do <firstterm>selection</firstterm>
(discarding rows that do not meet a specified Boolean condition)
- and <firstterm>projection</> (computation of a derived column set
+ and <firstterm>projection</firstterm> (computation of a derived column set
based on given column values, that is, evaluation of scalar
expressions where needed). One of the responsibilities of the
planner is to attach selection conditions from the
@@ -496,7 +496,7 @@
subplan) is, let's say, a
<literal>Sort</literal> node and again recursion is needed to obtain
an input row. The child node of the <literal>Sort</literal> might
- be a <literal>SeqScan</> node, representing actual reading of a table.
+ be a <literal>SeqScan</literal> node, representing actual reading of a table.
Execution of this node causes the executor to fetch a row from the
table and return it up to the calling node. The <literal>Sort</literal>
node will repeatedly call its child to obtain all the rows to be sorted.
@@ -529,24 +529,24 @@
<para>
The executor mechanism is used to evaluate all four basic SQL query types:
- <command>SELECT</>, <command>INSERT</>, <command>UPDATE</>, and
- <command>DELETE</>. For <command>SELECT</>, the top-level executor
+ <command>SELECT</command>, <command>INSERT</command>, <command>UPDATE</command>, and
+ <command>DELETE</command>. For <command>SELECT</command>, the top-level executor
code only needs to send each row returned by the query plan tree off
- to the client. For <command>INSERT</>, each returned row is inserted
- into the target table specified for the <command>INSERT</>. This is
- done in a special top-level plan node called <literal>ModifyTable</>.
+ to the client. For <command>INSERT</command>, each returned row is inserted
+ into the target table specified for the <command>INSERT</command>. This is
+ done in a special top-level plan node called <literal>ModifyTable</literal>.
(A simple
- <command>INSERT ... VALUES</> command creates a trivial plan tree
- consisting of a single <literal>Result</> node, which computes just one
- result row, and <literal>ModifyTable</> above it to perform the insertion.
- But <command>INSERT ... SELECT</> can demand the full power
- of the executor mechanism.) For <command>UPDATE</>, the planner arranges
+ <command>INSERT ... VALUES</command> command creates a trivial plan tree
+ consisting of a single <literal>Result</literal> node, which computes just one
+ result row, and <literal>ModifyTable</literal> above it to perform the insertion.
+ But <command>INSERT ... SELECT</command> can demand the full power
+ of the executor mechanism.) For <command>UPDATE</command>, the planner arranges
that each computed row includes all the updated column values, plus
- the <firstterm>TID</> (tuple ID, or row ID) of the original target row;
- this data is fed into a <literal>ModifyTable</> node, which uses the
+ the <firstterm>TID</firstterm> (tuple ID, or row ID) of the original target row;
+ this data is fed into a <literal>ModifyTable</literal> node, which uses the
information to create a new updated row and mark the old row deleted.
- For <command>DELETE</>, the only column that is actually returned by the
- plan is the TID, and the <literal>ModifyTable</> node simply uses the TID
+ For <command>DELETE</command>, the only column that is actually returned by the
+ plan is the TID, and the <literal>ModifyTable</literal> node simply uses the TID
to visit each target row and mark it deleted.
</para>
diff --git a/doc/src/sgml/array.sgml b/doc/src/sgml/array.sgml
index 88eb4be04d..9187f6e02e 100644
--- a/doc/src/sgml/array.sgml
+++ b/doc/src/sgml/array.sgml
@@ -32,7 +32,7 @@ CREATE TABLE sal_emp (
);
</programlisting>
As shown, an array data type is named by appending square brackets
- (<literal>[]</>) to the data type name of the array elements. The
+ (<literal>[]</literal>) to the data type name of the array elements. The
above command will create a table named
<structname>sal_emp</structname> with a column of type
<type>text</type> (<structfield>name</structfield>), a
@@ -69,7 +69,7 @@ CREATE TABLE tictactoe (
<para>
An alternative syntax, which conforms to the SQL standard by using
- the keyword <literal>ARRAY</>, can be used for one-dimensional arrays.
+ the keyword <literal>ARRAY</literal>, can be used for one-dimensional arrays.
<structfield>pay_by_quarter</structfield> could have been defined
as:
<programlisting>
@@ -79,7 +79,7 @@ CREATE TABLE tictactoe (
<programlisting>
pay_by_quarter integer ARRAY,
</programlisting>
- As before, however, <productname>PostgreSQL</> does not enforce the
+ As before, however, <productname>PostgreSQL</productname> does not enforce the
size restriction in any case.
</para>
</sect2>
@@ -107,8 +107,8 @@ CREATE TABLE tictactoe (
for the type, as recorded in its <literal>pg_type</literal> entry.
Among the standard data types provided in the
<productname>PostgreSQL</productname> distribution, all use a comma
- (<literal>,</>), except for type <type>box</> which uses a semicolon
- (<literal>;</>). Each <replaceable>val</replaceable> is
+ (<literal>,</literal>), except for type <type>box</type> which uses a semicolon
+ (<literal>;</literal>). Each <replaceable>val</replaceable> is
either a constant of the array element type, or a subarray. An example
of an array constant is:
<programlisting>
@@ -119,10 +119,10 @@ CREATE TABLE tictactoe (
</para>
<para>
- To set an element of an array constant to NULL, write <literal>NULL</>
+ To set an element of an array constant to NULL, write <literal>NULL</literal>
for the element value. (Any upper- or lower-case variant of
- <literal>NULL</> will do.) If you want an actual string value
- <quote>NULL</>, you must put double quotes around it.
+ <literal>NULL</literal> will do.) If you want an actual string value
+ <quote>NULL</quote>, you must put double quotes around it.
</para>
<para>
@@ -176,7 +176,7 @@ ERROR: multidimensional arrays must have array expressions with matching dimens
</para>
<para>
- The <literal>ARRAY</> constructor syntax can also be used:
+ The <literal>ARRAY</literal> constructor syntax can also be used:
<programlisting>
INSERT INTO sal_emp
VALUES ('Bill',
@@ -190,7 +190,7 @@ INSERT INTO sal_emp
</programlisting>
Notice that the array elements are ordinary SQL constants or
expressions; for instance, string literals are single quoted, instead of
- double quoted as they would be in an array literal. The <literal>ARRAY</>
+ double quoted as they would be in an array literal. The <literal>ARRAY</literal>
constructor syntax is discussed in more detail in
<xref linkend="sql-syntax-array-constructors">.
</para>
@@ -222,8 +222,8 @@ SELECT name FROM sal_emp WHERE pay_by_quarter[1] &lt;&gt; pay_by_quarter[2];
The array subscript numbers are written within square brackets.
By default <productname>PostgreSQL</productname> uses a
one-based numbering convention for arrays, that is,
- an array of <replaceable>n</> elements starts with <literal>array[1]</literal> and
- ends with <literal>array[<replaceable>n</>]</literal>.
+ an array of <replaceable>n</replaceable> elements starts with <literal>array[1]</literal> and
+ ends with <literal>array[<replaceable>n</replaceable>]</literal>.
</para>
<para>
@@ -259,8 +259,8 @@ SELECT schedule[1:2][1:1] FROM sal_emp WHERE name = 'Bill';
If any dimension is written as a slice, i.e., contains a colon, then all
dimensions are treated as slices. Any dimension that has only a single
number (no colon) is treated as being from 1
- to the number specified. For example, <literal>[2]</> is treated as
- <literal>[1:2]</>, as in this example:
+ to the number specified. For example, <literal>[2]</literal> is treated as
+ <literal>[1:2]</literal>, as in this example:
<programlisting>
SELECT schedule[1:2][2] FROM sal_emp WHERE name = 'Bill';
@@ -272,7 +272,7 @@ SELECT schedule[1:2][2] FROM sal_emp WHERE name = 'Bill';
</programlisting>
To avoid confusion with the non-slice case, it's best to use slice syntax
- for all dimensions, e.g., <literal>[1:2][1:1]</>, not <literal>[2][1:1]</>.
+ for all dimensions, e.g., <literal>[1:2][1:1]</literal>, not <literal>[2][1:1]</literal>.
</para>
<para>
@@ -302,9 +302,9 @@ SELECT schedule[:][1:1] FROM sal_emp WHERE name = 'Bill';
An array subscript expression will return null if either the array itself or
any of the subscript expressions are null. Also, null is returned if a
subscript is outside the array bounds (this case does not raise an error).
- For example, if <literal>schedule</>
- currently has the dimensions <literal>[1:3][1:2]</> then referencing
- <literal>schedule[3][3]</> yields NULL. Similarly, an array reference
+ For example, if <literal>schedule</literal>
+ currently has the dimensions <literal>[1:3][1:2]</literal> then referencing
+ <literal>schedule[3][3]</literal> yields NULL. Similarly, an array reference
with the wrong number of subscripts yields a null rather than an error.
</para>
@@ -423,16 +423,16 @@ UPDATE sal_emp SET pay_by_quarter[1:2] = '{27000,27000}'
A stored array value can be enlarged by assigning to elements not already
present. Any positions between those previously present and the newly
assigned elements will be filled with nulls. For example, if array
- <literal>myarray</> currently has 4 elements, it will have six
- elements after an update that assigns to <literal>myarray[6]</>;
- <literal>myarray[5]</> will contain null.
+ <literal>myarray</literal> currently has 4 elements, it will have six
+ elements after an update that assigns to <literal>myarray[6]</literal>;
+ <literal>myarray[5]</literal> will contain null.
Currently, enlargement in this fashion is only allowed for one-dimensional
arrays, not multidimensional arrays.
</para>
<para>
Subscripted assignment allows creation of arrays that do not use one-based
- subscripts. For example one might assign to <literal>myarray[-2:7]</> to
+ subscripts. For example one might assign to <literal>myarray[-2:7]</literal> to
create an array with subscript values from -2 to 7.
</para>
@@ -457,8 +457,8 @@ SELECT ARRAY[5,6] || ARRAY[[1,2],[3,4]];
<para>
The concatenation operator allows a single element to be pushed onto the
beginning or end of a one-dimensional array. It also accepts two
- <replaceable>N</>-dimensional arrays, or an <replaceable>N</>-dimensional
- and an <replaceable>N+1</>-dimensional array.
+ <replaceable>N</replaceable>-dimensional arrays, or an <replaceable>N</replaceable>-dimensional
+ and an <replaceable>N+1</replaceable>-dimensional array.
</para>
<para>
@@ -501,10 +501,10 @@ SELECT array_dims(ARRAY[[1,2],[3,4]] || ARRAY[[5,6],[7,8],[9,0]]);
</para>
<para>
- When an <replaceable>N</>-dimensional array is pushed onto the beginning
- or end of an <replaceable>N+1</>-dimensional array, the result is
- analogous to the element-array case above. Each <replaceable>N</>-dimensional
- sub-array is essentially an element of the <replaceable>N+1</>-dimensional
+ When an <replaceable>N</replaceable>-dimensional array is pushed onto the beginning
+ or end of an <replaceable>N+1</replaceable>-dimensional array, the result is
+ analogous to the element-array case above. Each <replaceable>N</replaceable>-dimensional
+ sub-array is essentially an element of the <replaceable>N+1</replaceable>-dimensional
array's outer dimension. For example:
<programlisting>
SELECT array_dims(ARRAY[1,2] || ARRAY[[3,4],[5,6]]);
@@ -587,9 +587,9 @@ SELECT array_append(ARRAY[1, 2], NULL); -- this might have been meant
The heuristic it uses to resolve the constant's type is to assume it's of
the same type as the operator's other input &mdash; in this case,
integer array. So the concatenation operator is presumed to
- represent <function>array_cat</>, not <function>array_append</>. When
+ represent <function>array_cat</function>, not <function>array_append</function>. When
that's the wrong choice, it could be fixed by casting the constant to the
- array's element type; but explicit use of <function>array_append</> might
+ array's element type; but explicit use of <function>array_append</function> might
be a preferable solution.
</para>
</sect2>
@@ -633,7 +633,7 @@ SELECT * FROM sal_emp WHERE 10000 = ALL (pay_by_quarter);
</para>
<para>
- Alternatively, the <function>generate_subscripts</> function can be used.
+ Alternatively, the <function>generate_subscripts</function> function can be used.
For example:
<programlisting>
@@ -648,7 +648,7 @@ SELECT * FROM
</para>
<para>
- You can also search an array using the <literal>&amp;&amp;</> operator,
+ You can also search an array using the <literal>&amp;&amp;</literal> operator,
which checks whether the left operand overlaps with the right operand.
For instance:
@@ -662,8 +662,8 @@ SELECT * FROM sal_emp WHERE pay_by_quarter &amp;&amp; ARRAY[10000];
</para>
<para>
- You can also search for specific values in an array using the <function>array_position</>
- and <function>array_positions</> functions. The former returns the subscript of
+ You can also search for specific values in an array using the <function>array_position</function>
+ and <function>array_positions</function> functions. The former returns the subscript of
the first occurrence of a value in an array; the latter returns an array with the
subscripts of all occurrences of the value in the array. For example:
@@ -703,13 +703,13 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1);
The external text representation of an array value consists of items that
are interpreted according to the I/O conversion rules for the array's
element type, plus decoration that indicates the array structure.
- The decoration consists of curly braces (<literal>{</> and <literal>}</>)
+ The decoration consists of curly braces (<literal>{</literal> and <literal>}</literal>)
around the array value plus delimiter characters between adjacent items.
- The delimiter character is usually a comma (<literal>,</>) but can be
- something else: it is determined by the <literal>typdelim</> setting
+ The delimiter character is usually a comma (<literal>,</literal>) but can be
+ something else: it is determined by the <literal>typdelim</literal> setting
for the array's element type. Among the standard data types provided
in the <productname>PostgreSQL</productname> distribution, all use a comma,
- except for type <type>box</>, which uses a semicolon (<literal>;</>).
+ except for type <type>box</type>, which uses a semicolon (<literal>;</literal>).
In a multidimensional array, each dimension (row, plane,
cube, etc.) gets its own level of curly braces, and delimiters
must be written between adjacent curly-braced entities of the same level.
@@ -719,7 +719,7 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1);
The array output routine will put double quotes around element values
if they are empty strings, contain curly braces, delimiter characters,
double quotes, backslashes, or white space, or match the word
- <literal>NULL</>. Double quotes and backslashes
+ <literal>NULL</literal>. Double quotes and backslashes
embedded in element values will be backslash-escaped. For numeric
data types it is safe to assume that double quotes will never appear, but
for textual data types one should be prepared to cope with either the presence
@@ -731,10 +731,10 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1);
set to one. To represent arrays with other lower bounds, the array
subscript ranges can be specified explicitly before writing the
array contents.
- This decoration consists of square brackets (<literal>[]</>)
+ This decoration consists of square brackets (<literal>[]</literal>)
around each array dimension's lower and upper bounds, with
- a colon (<literal>:</>) delimiter character in between. The
- array dimension decoration is followed by an equal sign (<literal>=</>).
+ a colon (<literal>:</literal>) delimiter character in between. The
+ array dimension decoration is followed by an equal sign (<literal>=</literal>).
For example:
<programlisting>
SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2
@@ -750,23 +750,23 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2
</para>
<para>
- If the value written for an element is <literal>NULL</> (in any case
+ If the value written for an element is <literal>NULL</literal> (in any case
variant), the element is taken to be NULL. The presence of any quotes
or backslashes disables this and allows the literal string value
- <quote>NULL</> to be entered. Also, for backward compatibility with
- pre-8.2 versions of <productname>PostgreSQL</>, the <xref
+ <quote>NULL</quote> to be entered. Also, for backward compatibility with
+ pre-8.2 versions of <productname>PostgreSQL</productname>, the <xref
linkend="guc-array-nulls"> configuration parameter can be turned
- <literal>off</> to suppress recognition of <literal>NULL</> as a NULL.
+ <literal>off</literal> to suppress recognition of <literal>NULL</literal> as a NULL.
</para>
<para>
As shown previously, when writing an array value you can use double
- quotes around any individual array element. You <emphasis>must</> do so
+ quotes around any individual array element. You <emphasis>must</emphasis> do so
if the element value would otherwise confuse the array-value parser.
For example, elements containing curly braces, commas (or the data type's
delimiter character), double quotes, backslashes, or leading or trailing
whitespace must be double-quoted. Empty strings and strings matching the
- word <literal>NULL</> must be quoted, too. To put a double quote or
+ word <literal>NULL</literal> must be quoted, too. To put a double quote or
backslash in a quoted array element value, use escape string syntax
and precede it with a backslash. Alternatively, you can avoid quotes and use
backslash-escaping to protect all data characters that would otherwise
@@ -785,17 +785,17 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2
<para>
Remember that what you write in an SQL command will first be interpreted
as a string literal, and then as an array. This doubles the number of
- backslashes you need. For example, to insert a <type>text</> array
+ backslashes you need. For example, to insert a <type>text</type> array
value containing a backslash and a double quote, you'd need to write:
<programlisting>
INSERT ... VALUES (E'{"\\\\","\\""}');
</programlisting>
The escape string processor removes one level of backslashes, so that
- what arrives at the array-value parser looks like <literal>{"\\","\""}</>.
- In turn, the strings fed to the <type>text</> data type's input routine
- become <literal>\</> and <literal>"</> respectively. (If we were working
+ what arrives at the array-value parser looks like <literal>{"\\","\""}</literal>.
+ In turn, the strings fed to the <type>text</type> data type's input routine
+ become <literal>\</literal> and <literal>"</literal> respectively. (If we were working
with a data type whose input routine also treated backslashes specially,
- <type>bytea</> for example, we might need as many as eight backslashes
+ <type>bytea</type> for example, we might need as many as eight backslashes
in the command to get one backslash into the stored array element.)
Dollar quoting (see <xref linkend="sql-syntax-dollar-quoting">) can be
used to avoid the need to double backslashes.
@@ -804,10 +804,10 @@ INSERT ... VALUES (E'{"\\\\","\\""}');
<tip>
<para>
- The <literal>ARRAY</> constructor syntax (see
+ The <literal>ARRAY</literal> constructor syntax (see
<xref linkend="sql-syntax-array-constructors">) is often easier to work
with than the array-literal syntax when writing array values in SQL
- commands. In <literal>ARRAY</>, individual element values are written the
+ commands. In <literal>ARRAY</literal>, individual element values are written the
same way they would be written when not members of an array.
</para>
</tip>
diff --git a/doc/src/sgml/auth-delay.sgml b/doc/src/sgml/auth-delay.sgml
index 9a6e3e9bb4..9221d2dfb6 100644
--- a/doc/src/sgml/auth-delay.sgml
+++ b/doc/src/sgml/auth-delay.sgml
@@ -18,7 +18,7 @@
<para>
In order to function, this module must be loaded via
- <xref linkend="guc-shared-preload-libraries"> in <filename>postgresql.conf</>.
+ <xref linkend="guc-shared-preload-libraries"> in <filename>postgresql.conf</filename>.
</para>
<sect2>
@@ -29,7 +29,7 @@
<term>
<varname>auth_delay.milliseconds</varname> (<type>int</type>)
<indexterm>
- <primary><varname>auth_delay.milliseconds</> configuration parameter</primary>
+ <primary><varname>auth_delay.milliseconds</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -42,7 +42,7 @@
</variablelist>
<para>
- These parameters must be set in <filename>postgresql.conf</>.
+ These parameters must be set in <filename>postgresql.conf</filename>.
Typical usage might be:
</para>
diff --git a/doc/src/sgml/auto-explain.sgml b/doc/src/sgml/auto-explain.sgml
index 38e6f50c80..240098c82f 100644
--- a/doc/src/sgml/auto-explain.sgml
+++ b/doc/src/sgml/auto-explain.sgml
@@ -24,10 +24,10 @@ LOAD 'auto_explain';
</programlisting>
(You must be superuser to do that.) More typical usage is to preload
- it into some or all sessions by including <literal>auto_explain</> in
+ it into some or all sessions by including <literal>auto_explain</literal> in
<xref linkend="guc-session-preload-libraries"> or
<xref linkend="guc-shared-preload-libraries"> in
- <filename>postgresql.conf</>. Then you can track unexpectedly slow queries
+ <filename>postgresql.conf</filename>. Then you can track unexpectedly slow queries
no matter when they happen. Of course there is a price in overhead for
that.
</para>
@@ -47,7 +47,7 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.log_min_duration</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>auto_explain.log_min_duration</> configuration parameter</primary>
+ <primary><varname>auto_explain.log_min_duration</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -66,13 +66,13 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.log_analyze</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>auto_explain.log_analyze</> configuration parameter</primary>
+ <primary><varname>auto_explain.log_analyze</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- <varname>auto_explain.log_analyze</varname> causes <command>EXPLAIN ANALYZE</>
- output, rather than just <command>EXPLAIN</> output, to be printed
+ <varname>auto_explain.log_analyze</varname> causes <command>EXPLAIN ANALYZE</command>
+ output, rather than just <command>EXPLAIN</command> output, to be printed
when an execution plan is logged. This parameter is off by default.
Only superusers can change this setting.
</para>
@@ -92,14 +92,14 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.log_buffers</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>auto_explain.log_buffers</> configuration parameter</primary>
+ <primary><varname>auto_explain.log_buffers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
<varname>auto_explain.log_buffers</varname> controls whether buffer
usage statistics are printed when an execution plan is logged; it's
- equivalent to the <literal>BUFFERS</> option of <command>EXPLAIN</>.
+ equivalent to the <literal>BUFFERS</literal> option of <command>EXPLAIN</command>.
This parameter has no effect
unless <varname>auto_explain.log_analyze</varname> is enabled.
This parameter is off by default.
@@ -112,14 +112,14 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.log_timing</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>auto_explain.log_timing</> configuration parameter</primary>
+ <primary><varname>auto_explain.log_timing</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
<varname>auto_explain.log_timing</varname> controls whether per-node
timing information is printed when an execution plan is logged; it's
- equivalent to the <literal>TIMING</> option of <command>EXPLAIN</>.
+ equivalent to the <literal>TIMING</literal> option of <command>EXPLAIN</command>.
The overhead of repeatedly reading the system clock can slow down
queries significantly on some systems, so it may be useful to set this
parameter to off when only actual row counts, and not exact times, are
@@ -136,7 +136,7 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.log_triggers</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>auto_explain.log_triggers</> configuration parameter</primary>
+ <primary><varname>auto_explain.log_triggers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -155,14 +155,14 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.log_verbose</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>auto_explain.log_verbose</> configuration parameter</primary>
+ <primary><varname>auto_explain.log_verbose</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
<varname>auto_explain.log_verbose</varname> controls whether verbose
details are printed when an execution plan is logged; it's
- equivalent to the <literal>VERBOSE</> option of <command>EXPLAIN</>.
+ equivalent to the <literal>VERBOSE</literal> option of <command>EXPLAIN</command>.
This parameter is off by default.
Only superusers can change this setting.
</para>
@@ -173,13 +173,13 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.log_format</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>auto_explain.log_format</> configuration parameter</primary>
+ <primary><varname>auto_explain.log_format</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
<varname>auto_explain.log_format</varname> selects the
- <command>EXPLAIN</> output format to be used.
+ <command>EXPLAIN</command> output format to be used.
The allowed values are <literal>text</literal>, <literal>xml</literal>,
<literal>json</literal>, and <literal>yaml</literal>. The default is text.
Only superusers can change this setting.
@@ -191,7 +191,7 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.log_nested_statements</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>auto_explain.log_nested_statements</> configuration parameter</primary>
+ <primary><varname>auto_explain.log_nested_statements</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -208,7 +208,7 @@ LOAD 'auto_explain';
<term>
<varname>auto_explain.sample_rate</varname> (<type>real</type>)
<indexterm>
- <primary><varname>auto_explain.sample_rate</> configuration parameter</primary>
+ <primary><varname>auto_explain.sample_rate</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -224,7 +224,7 @@ LOAD 'auto_explain';
<para>
In ordinary usage, these parameters are set
- in <filename>postgresql.conf</>, although superusers can alter them
+ in <filename>postgresql.conf</filename>, although superusers can alter them
on-the-fly within their own sessions.
Typical usage might be:
</para>
diff --git a/doc/src/sgml/backup.sgml b/doc/src/sgml/backup.sgml
index bd55e8bb77..dd9c1bff5b 100644
--- a/doc/src/sgml/backup.sgml
+++ b/doc/src/sgml/backup.sgml
@@ -3,10 +3,10 @@
<chapter id="backup">
<title>Backup and Restore</title>
- <indexterm zone="backup"><primary>backup</></>
+ <indexterm zone="backup"><primary>backup</primary></indexterm>
<para>
- As with everything that contains valuable data, <productname>PostgreSQL</>
+ As with everything that contains valuable data, <productname>PostgreSQL</productname>
databases should be backed up regularly. While the procedure is
essentially simple, it is important to have a clear understanding of
the underlying techniques and assumptions.
@@ -14,9 +14,9 @@
<para>
There are three fundamentally different approaches to backing up
- <productname>PostgreSQL</> data:
+ <productname>PostgreSQL</productname> data:
<itemizedlist>
- <listitem><para><acronym>SQL</> dump</para></listitem>
+ <listitem><para><acronym>SQL</acronym> dump</para></listitem>
<listitem><para>File system level backup</para></listitem>
<listitem><para>Continuous archiving</para></listitem>
</itemizedlist>
@@ -25,30 +25,30 @@
</para>
<sect1 id="backup-dump">
- <title><acronym>SQL</> Dump</title>
+ <title><acronym>SQL</acronym> Dump</title>
<para>
The idea behind this dump method is to generate a file with SQL
commands that, when fed back to the server, will recreate the
database in the same state as it was at the time of the dump.
- <productname>PostgreSQL</> provides the utility program
+ <productname>PostgreSQL</productname> provides the utility program
<xref linkend="app-pgdump"> for this purpose. The basic usage of this
command is:
<synopsis>
pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable class="parameter">outfile</replaceable>
</synopsis>
- As you see, <application>pg_dump</> writes its result to the
+ As you see, <application>pg_dump</application> writes its result to the
standard output. We will see below how this can be useful.
- While the above command creates a text file, <application>pg_dump</>
+ While the above command creates a text file, <application>pg_dump</application>
can create files in other formats that allow for parallelism and more
fine-grained control of object restoration.
</para>
<para>
- <application>pg_dump</> is a regular <productname>PostgreSQL</>
+ <application>pg_dump</application> is a regular <productname>PostgreSQL</productname>
client application (albeit a particularly clever one). This means
that you can perform this backup procedure from any remote host that has
- access to the database. But remember that <application>pg_dump</>
+ access to the database. But remember that <application>pg_dump</application>
does not operate with special permissions. In particular, it must
have read access to all tables that you want to back up, so in order
to back up the entire database you almost always have to run it as a
@@ -60,9 +60,9 @@ pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable cl
</para>
<para>
- To specify which database server <application>pg_dump</> should
+ To specify which database server <application>pg_dump</application> should
contact, use the command line options <option>-h
- <replaceable>host</></> and <option>-p <replaceable>port</></>. The
+ <replaceable>host</replaceable></option> and <option>-p <replaceable>port</replaceable></option>. The
default host is the local host or whatever your
<envar>PGHOST</envar> environment variable specifies. Similarly,
the default port is indicated by the <envar>PGPORT</envar>
@@ -72,30 +72,30 @@ pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable cl
</para>
<para>
- Like any other <productname>PostgreSQL</> client application,
- <application>pg_dump</> will by default connect with the database
+ Like any other <productname>PostgreSQL</productname> client application,
+ <application>pg_dump</application> will by default connect with the database
user name that is equal to the current operating system user name. To override
this, either specify the <option>-U</option> option or set the
environment variable <envar>PGUSER</envar>. Remember that
- <application>pg_dump</> connections are subject to the normal
+ <application>pg_dump</application> connections are subject to the normal
client authentication mechanisms (which are described in <xref
linkend="client-authentication">).
</para>
<para>
- An important advantage of <application>pg_dump</> over the other backup
- methods described later is that <application>pg_dump</>'s output can
- generally be re-loaded into newer versions of <productname>PostgreSQL</>,
+ An important advantage of <application>pg_dump</application> over the other backup
+ methods described later is that <application>pg_dump</application>'s output can
+ generally be re-loaded into newer versions of <productname>PostgreSQL</productname>,
whereas file-level backups and continuous archiving are both extremely
- server-version-specific. <application>pg_dump</> is also the only method
+ server-version-specific. <application>pg_dump</application> is also the only method
that will work when transferring a database to a different machine
architecture, such as going from a 32-bit to a 64-bit server.
</para>
<para>
- Dumps created by <application>pg_dump</> are internally consistent,
+ Dumps created by <application>pg_dump</application> are internally consistent,
meaning, the dump represents a snapshot of the database at the time
- <application>pg_dump</> began running. <application>pg_dump</> does not
+ <application>pg_dump</application> began running. <application>pg_dump</application> does not
block other operations on the database while it is working.
(Exceptions are those operations that need to operate with an
exclusive lock, such as most forms of <command>ALTER TABLE</command>.)
@@ -105,20 +105,20 @@ pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable cl
<title>Restoring the Dump</title>
<para>
- Text files created by <application>pg_dump</> are intended to
+ Text files created by <application>pg_dump</application> are intended to
be read in by the <application>psql</application> program. The
general command form to restore a dump is
<synopsis>
psql <replaceable class="parameter">dbname</replaceable> &lt; <replaceable class="parameter">infile</replaceable>
</synopsis>
where <replaceable class="parameter">infile</replaceable> is the
- file output by the <application>pg_dump</> command. The database <replaceable
+ file output by the <application>pg_dump</application> command. The database <replaceable
class="parameter">dbname</replaceable> will not be created by this
- command, so you must create it yourself from <literal>template0</>
- before executing <application>psql</> (e.g., with
+ command, so you must create it yourself from <literal>template0</literal>
+ before executing <application>psql</application> (e.g., with
<literal>createdb -T template0 <replaceable
- class="parameter">dbname</></literal>). <application>psql</>
- supports options similar to <application>pg_dump</> for specifying
+ class="parameter">dbname</replaceable></literal>). <application>psql</application>
+ supports options similar to <application>pg_dump</application> for specifying
the database server to connect to and the user name to use. See
the <xref linkend="app-psql"> reference page for more information.
Non-text file dumps are restored using the <xref
@@ -134,10 +134,10 @@ psql <replaceable class="parameter">dbname</replaceable> &lt; <replaceable class
</para>
<para>
- By default, the <application>psql</> script will continue to
+ By default, the <application>psql</application> script will continue to
execute after an SQL error is encountered. You might wish to run
<application>psql</application> with
- the <literal>ON_ERROR_STOP</> variable set to alter that
+ the <literal>ON_ERROR_STOP</literal> variable set to alter that
behavior and have <application>psql</application> exit with an
exit status of 3 if an SQL error occurs:
<programlisting>
@@ -147,8 +147,8 @@ psql --set ON_ERROR_STOP=on dbname &lt; infile
Alternatively, you can specify that the whole dump should be
restored as a single transaction, so the restore is either fully
completed or fully rolled back. This mode can be specified by
- passing the <option>-1</> or <option>--single-transaction</>
- command-line options to <application>psql</>. When using this
+ passing the <option>-1</option> or <option>--single-transaction</option>
+ command-line options to <application>psql</application>. When using this
mode, be aware that even a minor error can rollback a
restore that has already run for many hours. However, that might
still be preferable to manually cleaning up a complex database
@@ -156,22 +156,22 @@ psql --set ON_ERROR_STOP=on dbname &lt; infile
</para>
<para>
- The ability of <application>pg_dump</> and <application>psql</> to
+ The ability of <application>pg_dump</application> and <application>psql</application> to
write to or read from pipes makes it possible to dump a database
directly from one server to another, for example:
<programlisting>
-pg_dump -h <replaceable>host1</> <replaceable>dbname</> | psql -h <replaceable>host2</> <replaceable>dbname</>
+pg_dump -h <replaceable>host1</replaceable> <replaceable>dbname</replaceable> | psql -h <replaceable>host2</replaceable> <replaceable>dbname</replaceable>
</programlisting>
</para>
<important>
<para>
- The dumps produced by <application>pg_dump</> are relative to
- <literal>template0</>. This means that any languages, procedures,
- etc. added via <literal>template1</> will also be dumped by
- <application>pg_dump</>. As a result, when restoring, if you are
- using a customized <literal>template1</>, you must create the
- empty database from <literal>template0</>, as in the example
+ The dumps produced by <application>pg_dump</application> are relative to
+ <literal>template0</literal>. This means that any languages, procedures,
+ etc. added via <literal>template1</literal> will also be dumped by
+ <application>pg_dump</application>. As a result, when restoring, if you are
+ using a customized <literal>template1</literal>, you must create the
+ empty database from <literal>template0</literal>, as in the example
above.
</para>
</important>
@@ -183,52 +183,52 @@ pg_dump -h <replaceable>host1</> <replaceable>dbname</> | psql -h <replaceable>h
see <xref linkend="vacuum-for-statistics">
and <xref linkend="autovacuum"> for more information.
For more advice on how to load large amounts of data
- into <productname>PostgreSQL</> efficiently, refer to <xref
+ into <productname>PostgreSQL</productname> efficiently, refer to <xref
linkend="populate">.
</para>
</sect2>
<sect2 id="backup-dump-all">
- <title>Using <application>pg_dumpall</></title>
+ <title>Using <application>pg_dumpall</application></title>
<para>
- <application>pg_dump</> dumps only a single database at a time,
+ <application>pg_dump</application> dumps only a single database at a time,
and it does not dump information about roles or tablespaces
(because those are cluster-wide rather than per-database).
To support convenient dumping of the entire contents of a database
cluster, the <xref linkend="app-pg-dumpall"> program is provided.
- <application>pg_dumpall</> backs up each database in a given
+ <application>pg_dumpall</application> backs up each database in a given
cluster, and also preserves cluster-wide data such as role and
tablespace definitions. The basic usage of this command is:
<synopsis>
-pg_dumpall &gt; <replaceable>outfile</>
+pg_dumpall &gt; <replaceable>outfile</replaceable>
</synopsis>
- The resulting dump can be restored with <application>psql</>:
+ The resulting dump can be restored with <application>psql</application>:
<synopsis>
psql -f <replaceable class="parameter">infile</replaceable> postgres
</synopsis>
(Actually, you can specify any existing database name to start from,
- but if you are loading into an empty cluster then <literal>postgres</>
+ but if you are loading into an empty cluster then <literal>postgres</literal>
should usually be used.) It is always necessary to have
- database superuser access when restoring a <application>pg_dumpall</>
+ database superuser access when restoring a <application>pg_dumpall</application>
dump, as that is required to restore the role and tablespace information.
If you use tablespaces, make sure that the tablespace paths in the
dump are appropriate for the new installation.
</para>
<para>
- <application>pg_dumpall</> works by emitting commands to re-create
+ <application>pg_dumpall</application> works by emitting commands to re-create
roles, tablespaces, and empty databases, then invoking
- <application>pg_dump</> for each database. This means that while
+ <application>pg_dump</application> for each database. This means that while
each database will be internally consistent, the snapshots of
different databases are not synchronized.
</para>
<para>
Cluster-wide data can be dumped alone using the
- <application>pg_dumpall</> <option>--globals-only</> option.
+ <application>pg_dumpall</application> <option>--globals-only</option> option.
This is necessary to fully backup the cluster if running the
- <application>pg_dump</> command on individual databases.
+ <application>pg_dump</application> command on individual databases.
</para>
</sect2>
@@ -237,8 +237,8 @@ psql -f <replaceable class="parameter">infile</replaceable> postgres
<para>
Some operating systems have maximum file size limits that cause
- problems when creating large <application>pg_dump</> output files.
- Fortunately, <application>pg_dump</> can write to the standard
+ problems when creating large <application>pg_dump</application> output files.
+ Fortunately, <application>pg_dump</application> can write to the standard
output, so you can use standard Unix tools to work around this
potential problem. There are several possible methods:
</para>
@@ -268,7 +268,7 @@ cat <replaceable class="parameter">filename</replaceable>.gz | gunzip | psql <re
</formalpara>
<formalpara>
- <title>Use <command>split</>.</title>
+ <title>Use <command>split</command>.</title>
<para>
The <command>split</command> command
allows you to split the output into smaller files that are
@@ -288,10 +288,10 @@ cat <replaceable class="parameter">filename</replaceable>* | psql <replaceable c
</formalpara>
<formalpara>
- <title>Use <application>pg_dump</>'s custom dump format.</title>
+ <title>Use <application>pg_dump</application>'s custom dump format.</title>
<para>
If <productname>PostgreSQL</productname> was built on a system with the
- <application>zlib</> compression library installed, the custom dump
+ <application>zlib</application> compression library installed, the custom dump
format will compress data as it writes it to the output file. This will
produce dump file sizes similar to using <command>gzip</command>, but it
has the added advantage that tables can be restored selectively. The
@@ -301,8 +301,8 @@ cat <replaceable class="parameter">filename</replaceable>* | psql <replaceable c
pg_dump -Fc <replaceable class="parameter">dbname</replaceable> &gt; <replaceable class="parameter">filename</replaceable>
</programlisting>
- A custom-format dump is not a script for <application>psql</>, but
- instead must be restored with <application>pg_restore</>, for example:
+ A custom-format dump is not a script for <application>psql</application>, but
+ instead must be restored with <application>pg_restore</application>, for example:
<programlisting>
pg_restore -d <replaceable class="parameter">dbname</replaceable> <replaceable class="parameter">filename</replaceable>
@@ -314,12 +314,12 @@ pg_restore -d <replaceable class="parameter">dbname</replaceable> <replaceable c
</formalpara>
<para>
- For very large databases, you might need to combine <command>split</>
+ For very large databases, you might need to combine <command>split</command>
with one of the other two approaches.
</para>
<formalpara>
- <title>Use <application>pg_dump</>'s parallel dump feature.</title>
+ <title>Use <application>pg_dump</application>'s parallel dump feature.</title>
<para>
To speed up the dump of a large database, you can use
<application>pg_dump</application>'s parallel mode. This will dump
@@ -344,7 +344,7 @@ pg_dump -j <replaceable class="parameter">num</replaceable> -F d -f <replaceable
<para>
An alternative backup strategy is to directly copy the files that
- <productname>PostgreSQL</> uses to store the data in the database;
+ <productname>PostgreSQL</productname> uses to store the data in the database;
<xref linkend="creating-cluster"> explains where these files
are located. You can use whatever method you prefer
for doing file system backups; for example:
@@ -356,13 +356,13 @@ tar -cf backup.tar /usr/local/pgsql/data
<para>
There are two restrictions, however, which make this method
- impractical, or at least inferior to the <application>pg_dump</>
+ impractical, or at least inferior to the <application>pg_dump</application>
method:
<orderedlist>
<listitem>
<para>
- The database server <emphasis>must</> be shut down in order to
+ The database server <emphasis>must</emphasis> be shut down in order to
get a usable backup. Half-way measures such as disallowing all
connections will <emphasis>not</emphasis> work
(in part because <command>tar</command> and similar tools do not take
@@ -379,7 +379,7 @@ tar -cf backup.tar /usr/local/pgsql/data
If you have dug into the details of the file system layout of the
database, you might be tempted to try to back up or restore only certain
individual tables or databases from their respective files or
- directories. This will <emphasis>not</> work because the
+ directories. This will <emphasis>not</emphasis> work because the
information contained in these files is not usable without
the commit log files,
<filename>pg_xact/*</filename>, which contain the commit status of
@@ -399,7 +399,7 @@ tar -cf backup.tar /usr/local/pgsql/data
<quote>consistent snapshot</quote> of the data directory, if the
file system supports that functionality (and you are willing to
trust that it is implemented correctly). The typical procedure is
- to make a <quote>frozen snapshot</> of the volume containing the
+ to make a <quote>frozen snapshot</quote> of the volume containing the
database, then copy the whole data directory (not just parts, see
above) from the snapshot to a backup device, then release the frozen
snapshot. This will work even while the database server is running.
@@ -419,7 +419,7 @@ tar -cf backup.tar /usr/local/pgsql/data
the volumes. For example, if your data files and WAL log are on different
disks, or if tablespaces are on different file systems, it might
not be possible to use snapshot backup because the snapshots
- <emphasis>must</> be simultaneous.
+ <emphasis>must</emphasis> be simultaneous.
Read your file system documentation very carefully before trusting
the consistent-snapshot technique in such situations.
</para>
@@ -435,13 +435,13 @@ tar -cf backup.tar /usr/local/pgsql/data
</para>
<para>
- Another option is to use <application>rsync</> to perform a file
- system backup. This is done by first running <application>rsync</>
+ Another option is to use <application>rsync</application> to perform a file
+ system backup. This is done by first running <application>rsync</application>
while the database server is running, then shutting down the database
- server long enough to do an <command>rsync --checksum</>.
- (<option>--checksum</> is necessary because <command>rsync</> only
+ server long enough to do an <command>rsync --checksum</command>.
+ (<option>--checksum</option> is necessary because <command>rsync</command> only
has file modification-time granularity of one second.) The
- second <application>rsync</> will be quicker than the first,
+ second <application>rsync</application> will be quicker than the first,
because it has relatively little data to transfer, and the end result
will be consistent because the server was down. This method
allows a file system backup to be performed with minimal downtime.
@@ -471,12 +471,12 @@ tar -cf backup.tar /usr/local/pgsql/data
</indexterm>
<para>
- At all times, <productname>PostgreSQL</> maintains a
- <firstterm>write ahead log</> (WAL) in the <filename>pg_wal/</>
+ At all times, <productname>PostgreSQL</productname> maintains a
+ <firstterm>write ahead log</firstterm> (WAL) in the <filename>pg_wal/</filename>
subdirectory of the cluster's data directory. The log records
every change made to the database's data files. This log exists
primarily for crash-safety purposes: if the system crashes, the
- database can be restored to consistency by <quote>replaying</> the
+ database can be restored to consistency by <quote>replaying</quote> the
log entries made since the last checkpoint. However, the existence
of the log makes it possible to use a third strategy for backing up
databases: we can combine a file-system-level backup with backup of
@@ -492,7 +492,7 @@ tar -cf backup.tar /usr/local/pgsql/data
Any internal inconsistency in the backup will be corrected by log
replay (this is not significantly different from what happens during
crash recovery). So we do not need a file system snapshot capability,
- just <application>tar</> or a similar archiving tool.
+ just <application>tar</application> or a similar archiving tool.
</para>
</listitem>
<listitem>
@@ -508,7 +508,7 @@ tar -cf backup.tar /usr/local/pgsql/data
It is not necessary to replay the WAL entries all the
way to the end. We could stop the replay at any point and have a
consistent snapshot of the database as it was at that time. Thus,
- this technique supports <firstterm>point-in-time recovery</>: it is
+ this technique supports <firstterm>point-in-time recovery</firstterm>: it is
possible to restore the database to its state at any time since your base
backup was taken.
</para>
@@ -517,7 +517,7 @@ tar -cf backup.tar /usr/local/pgsql/data
<para>
If we continuously feed the series of WAL files to another
machine that has been loaded with the same base backup file, we
- have a <firstterm>warm standby</> system: at any point we can bring up
+ have a <firstterm>warm standby</firstterm> system: at any point we can bring up
the second machine and it will have a nearly-current copy of the
database.
</para>
@@ -530,7 +530,7 @@ tar -cf backup.tar /usr/local/pgsql/data
<application>pg_dump</application> and
<application>pg_dumpall</application> do not produce file-system-level
backups and cannot be used as part of a continuous-archiving solution.
- Such dumps are <emphasis>logical</> and do not contain enough
+ Such dumps are <emphasis>logical</emphasis> and do not contain enough
information to be used by WAL replay.
</para>
</note>
@@ -546,10 +546,10 @@ tar -cf backup.tar /usr/local/pgsql/data
<para>
To recover successfully using continuous archiving (also called
- <quote>online backup</> by many database vendors), you need a continuous
+ <quote>online backup</quote> by many database vendors), you need a continuous
sequence of archived WAL files that extends back at least as far as the
start time of your backup. So to get started, you should set up and test
- your procedure for archiving WAL files <emphasis>before</> you take your
+ your procedure for archiving WAL files <emphasis>before</emphasis> you take your
first base backup. Accordingly, we first discuss the mechanics of
archiving WAL files.
</para>
@@ -558,15 +558,15 @@ tar -cf backup.tar /usr/local/pgsql/data
<title>Setting Up WAL Archiving</title>
<para>
- In an abstract sense, a running <productname>PostgreSQL</> system
+ In an abstract sense, a running <productname>PostgreSQL</productname> system
produces an indefinitely long sequence of WAL records. The system
physically divides this sequence into WAL <firstterm>segment
- files</>, which are normally 16MB apiece (although the segment size
- can be altered during <application>initdb</>). The segment
+ files</firstterm>, which are normally 16MB apiece (although the segment size
+ can be altered during <application>initdb</application>). The segment
files are given numeric names that reflect their position in the
abstract WAL sequence. When not using WAL archiving, the system
normally creates just a few segment files and then
- <quote>recycles</> them by renaming no-longer-needed segment files
+ <quote>recycles</quote> them by renaming no-longer-needed segment files
to higher segment numbers. It's assumed that segment files whose
contents precede the checkpoint-before-last are no longer of
interest and can be recycled.
@@ -577,33 +577,33 @@ tar -cf backup.tar /usr/local/pgsql/data
file once it is filled, and save that data somewhere before the segment
file is recycled for reuse. Depending on the application and the
available hardware, there could be many different ways of <quote>saving
- the data somewhere</>: we could copy the segment files to an NFS-mounted
+ the data somewhere</quote>: we could copy the segment files to an NFS-mounted
directory on another machine, write them onto a tape drive (ensuring that
you have a way of identifying the original name of each file), or batch
them together and burn them onto CDs, or something else entirely. To
provide the database administrator with flexibility,
- <productname>PostgreSQL</> tries not to make any assumptions about how
- the archiving will be done. Instead, <productname>PostgreSQL</> lets
+ <productname>PostgreSQL</productname> tries not to make any assumptions about how
+ the archiving will be done. Instead, <productname>PostgreSQL</productname> lets
the administrator specify a shell command to be executed to copy a
completed segment file to wherever it needs to go. The command could be
- as simple as a <literal>cp</>, or it could invoke a complex shell
+ as simple as a <literal>cp</literal>, or it could invoke a complex shell
script &mdash; it's all up to you.
</para>
<para>
To enable WAL archiving, set the <xref linkend="guc-wal-level">
- configuration parameter to <literal>replica</> or higher,
- <xref linkend="guc-archive-mode"> to <literal>on</>,
+ configuration parameter to <literal>replica</literal> or higher,
+ <xref linkend="guc-archive-mode"> to <literal>on</literal>,
and specify the shell command to use in the <xref
linkend="guc-archive-command"> configuration parameter. In practice
these settings will always be placed in the
<filename>postgresql.conf</filename> file.
- In <varname>archive_command</>,
- <literal>%p</> is replaced by the path name of the file to
- archive, while <literal>%f</> is replaced by only the file name.
+ In <varname>archive_command</varname>,
+ <literal>%p</literal> is replaced by the path name of the file to
+ archive, while <literal>%f</literal> is replaced by only the file name.
(The path name is relative to the current working directory,
i.e., the cluster's data directory.)
- Use <literal>%%</> if you need to embed an actual <literal>%</>
+ Use <literal>%%</literal> if you need to embed an actual <literal>%</literal>
character in the command. The simplest useful command is something
like:
<programlisting>
@@ -611,9 +611,9 @@ archive_command = 'test ! -f /mnt/server/archivedir/%f &amp;&amp; cp %p /mnt/ser
archive_command = 'copy "%p" "C:\\server\\archivedir\\%f"' # Windows
</programlisting>
which will copy archivable WAL segments to the directory
- <filename>/mnt/server/archivedir</>. (This is an example, not a
+ <filename>/mnt/server/archivedir</filename>. (This is an example, not a
recommendation, and might not work on all platforms.) After the
- <literal>%p</> and <literal>%f</> parameters have been replaced,
+ <literal>%p</literal> and <literal>%f</literal> parameters have been replaced,
the actual command executed might look like this:
<programlisting>
test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/00000001000000A900000065 /mnt/server/archivedir/00000001000000A900000065
@@ -623,7 +623,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<para>
The archive command will be executed under the ownership of the same
- user that the <productname>PostgreSQL</> server is running as. Since
+ user that the <productname>PostgreSQL</productname> server is running as. Since
the series of WAL files being archived contains effectively everything
in your database, you will want to be sure that the archived data is
protected from prying eyes; for example, archive into a directory that
@@ -633,9 +633,9 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<para>
It is important that the archive command return zero exit status if and
only if it succeeds. Upon getting a zero result,
- <productname>PostgreSQL</> will assume that the file has been
+ <productname>PostgreSQL</productname> will assume that the file has been
successfully archived, and will remove or recycle it. However, a nonzero
- status tells <productname>PostgreSQL</> that the file was not archived;
+ status tells <productname>PostgreSQL</productname> that the file was not archived;
it will try again periodically until it succeeds.
</para>
@@ -650,14 +650,14 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<para>
It is advisable to test your proposed archive command to ensure that it
indeed does not overwrite an existing file, <emphasis>and that it returns
- nonzero status in this case</>.
+ nonzero status in this case</emphasis>.
The example command above for Unix ensures this by including a separate
- <command>test</> step. On some Unix platforms, <command>cp</> has
- switches such as <option>-i</> that can be used to do the same thing
+ <command>test</command> step. On some Unix platforms, <command>cp</command> has
+ switches such as <option>-i</option> that can be used to do the same thing
less verbosely, but you should not rely on these without verifying that
- the right exit status is returned. (In particular, GNU <command>cp</>
- will return status zero when <option>-i</> is used and the target file
- already exists, which is <emphasis>not</> the desired behavior.)
+ the right exit status is returned. (In particular, GNU <command>cp</command>
+ will return status zero when <option>-i</option> is used and the target file
+ already exists, which is <emphasis>not</emphasis> the desired behavior.)
</para>
<para>
@@ -668,10 +668,10 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
fills, nothing further can be archived until the tape is swapped.
You should ensure that any error condition or request to a human operator
is reported appropriately so that the situation can be
- resolved reasonably quickly. The <filename>pg_wal/</> directory will
+ resolved reasonably quickly. The <filename>pg_wal/</filename> directory will
continue to fill with WAL segment files until the situation is resolved.
- (If the file system containing <filename>pg_wal/</> fills up,
- <productname>PostgreSQL</> will do a PANIC shutdown. No committed
+ (If the file system containing <filename>pg_wal/</filename> fills up,
+ <productname>PostgreSQL</productname> will do a PANIC shutdown. No committed
transactions will be lost, but the database will remain offline until
you free some space.)
</para>
@@ -682,7 +682,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
operation continues even if the archiving process falls a little behind.
If archiving falls significantly behind, this will increase the amount of
data that would be lost in the event of a disaster. It will also mean that
- the <filename>pg_wal/</> directory will contain large numbers of
+ the <filename>pg_wal/</filename> directory will contain large numbers of
not-yet-archived segment files, which could eventually exceed available
disk space. You are advised to monitor the archiving process to ensure that
it is working as you intend.
@@ -692,16 +692,16 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
In writing your archive command, you should assume that the file names to
be archived can be up to 64 characters long and can contain any
combination of ASCII letters, digits, and dots. It is not necessary to
- preserve the original relative path (<literal>%p</>) but it is necessary to
- preserve the file name (<literal>%f</>).
+ preserve the original relative path (<literal>%p</literal>) but it is necessary to
+ preserve the file name (<literal>%f</literal>).
</para>
<para>
Note that although WAL archiving will allow you to restore any
- modifications made to the data in your <productname>PostgreSQL</> database,
+ modifications made to the data in your <productname>PostgreSQL</productname> database,
it will not restore changes made to configuration files (that is,
- <filename>postgresql.conf</>, <filename>pg_hba.conf</> and
- <filename>pg_ident.conf</>), since those are edited manually rather
+ <filename>postgresql.conf</filename>, <filename>pg_hba.conf</filename> and
+ <filename>pg_ident.conf</filename>), since those are edited manually rather
than through SQL operations.
You might wish to keep the configuration files in a location that will
be backed up by your regular file system backup procedures. See
@@ -719,32 +719,32 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
to a new WAL segment file at least that often. Note that archived
files that are archived early due to a forced switch are still the same
length as completely full files. It is therefore unwise to set a very
- short <varname>archive_timeout</> &mdash; it will bloat your archive
- storage. <varname>archive_timeout</> settings of a minute or so are
+ short <varname>archive_timeout</varname> &mdash; it will bloat your archive
+ storage. <varname>archive_timeout</varname> settings of a minute or so are
usually reasonable.
</para>
<para>
Also, you can force a segment switch manually with
- <function>pg_switch_wal</> if you want to ensure that a
+ <function>pg_switch_wal</function> if you want to ensure that a
just-finished transaction is archived as soon as possible. Other utility
functions related to WAL management are listed in <xref
linkend="functions-admin-backup-table">.
</para>
<para>
- When <varname>wal_level</> is <literal>minimal</> some SQL commands
+ When <varname>wal_level</varname> is <literal>minimal</literal> some SQL commands
are optimized to avoid WAL logging, as described in <xref
linkend="populate-pitr">. If archiving or streaming replication were
turned on during execution of one of these statements, WAL would not
contain enough information for archive recovery. (Crash recovery is
- unaffected.) For this reason, <varname>wal_level</> can only be changed at
- server start. However, <varname>archive_command</> can be changed with a
+ unaffected.) For this reason, <varname>wal_level</varname> can only be changed at
+ server start. However, <varname>archive_command</varname> can be changed with a
configuration file reload. If you wish to temporarily stop archiving,
- one way to do it is to set <varname>archive_command</> to the empty
- string (<literal>''</>).
- This will cause WAL files to accumulate in <filename>pg_wal/</> until a
- working <varname>archive_command</> is re-established.
+ one way to do it is to set <varname>archive_command</varname> to the empty
+ string (<literal>''</literal>).
+ This will cause WAL files to accumulate in <filename>pg_wal/</filename> until a
+ working <varname>archive_command</varname> is re-established.
</para>
</sect2>
@@ -763,8 +763,8 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<para>
It is not necessary to be concerned about the amount of time it takes
to make a base backup. However, if you normally run the
- server with <varname>full_page_writes</> disabled, you might notice a drop
- in performance while the backup runs since <varname>full_page_writes</> is
+ server with <varname>full_page_writes</varname> disabled, you might notice a drop
+ in performance while the backup runs since <varname>full_page_writes</varname> is
effectively forced on during backup mode.
</para>
@@ -772,13 +772,13 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
To make use of the backup, you will need to keep all the WAL
segment files generated during and after the file system backup.
To aid you in doing this, the base backup process
- creates a <firstterm>backup history file</> that is immediately
+ creates a <firstterm>backup history file</firstterm> that is immediately
stored into the WAL archive area. This file is named after the first
WAL segment file that you need for the file system backup.
For example, if the starting WAL file is
- <literal>0000000100001234000055CD</> the backup history file will be
+ <literal>0000000100001234000055CD</literal> the backup history file will be
named something like
- <literal>0000000100001234000055CD.007C9330.backup</>. (The second
+ <literal>0000000100001234000055CD.007C9330.backup</literal>. (The second
part of the file name stands for an exact position within the WAL
file, and can ordinarily be ignored.) Once you have safely archived
the file system backup and the WAL segment files used during the
@@ -847,14 +847,14 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<programlisting>
SELECT pg_start_backup('label', false, false);
</programlisting>
- where <literal>label</> is any string you want to use to uniquely
+ where <literal>label</literal> is any string you want to use to uniquely
identify this backup operation. The connection
- calling <function>pg_start_backup</> must be maintained until the end of
+ calling <function>pg_start_backup</function> must be maintained until the end of
the backup, or the backup will be automatically aborted.
</para>
<para>
- By default, <function>pg_start_backup</> can take a long time to finish.
+ By default, <function>pg_start_backup</function> can take a long time to finish.
This is because it performs a checkpoint, and the I/O
required for the checkpoint will be spread out over a significant
period of time, by default half your inter-checkpoint interval
@@ -862,19 +862,19 @@ SELECT pg_start_backup('label', false, false);
<xref linkend="guc-checkpoint-completion-target">). This is
usually what you want, because it minimizes the impact on query
processing. If you want to start the backup as soon as
- possible, change the second parameter to <literal>true</>, which will
+ possible, change the second parameter to <literal>true</literal>, which will
issue an immediate checkpoint using as much I/O as available.
</para>
<para>
- The third parameter being <literal>false</> tells
- <function>pg_start_backup</> to initiate a non-exclusive base backup.
+ The third parameter being <literal>false</literal> tells
+ <function>pg_start_backup</function> to initiate a non-exclusive base backup.
</para>
</listitem>
<listitem>
<para>
Perform the backup, using any convenient file-system-backup tool
- such as <application>tar</> or <application>cpio</> (not
+ such as <application>tar</application> or <application>cpio</application> (not
<application>pg_dump</application> or
<application>pg_dumpall</application>). It is neither
necessary nor desirable to stop normal operation of the database
@@ -898,45 +898,45 @@ SELECT * FROM pg_stop_backup(false, true);
ready to archive.
</para>
<para>
- The <function>pg_stop_backup</> will return one row with three
+ The <function>pg_stop_backup</function> will return one row with three
values. The second of these fields should be written to a file named
- <filename>backup_label</> in the root directory of the backup. The
+ <filename>backup_label</filename> in the root directory of the backup. The
third field should be written to a file named
- <filename>tablespace_map</> unless the field is empty. These files are
+ <filename>tablespace_map</filename> unless the field is empty. These files are
vital to the backup working, and must be written without modification.
</para>
</listitem>
<listitem>
<para>
Once the WAL segment files active during the backup are archived, you are
- done. The file identified by <function>pg_stop_backup</>'s first return
+ done. The file identified by <function>pg_stop_backup</function>'s first return
value is the last segment that is required to form a complete set of
- backup files. On a primary, if <varname>archive_mode</> is enabled and the
- <literal>wait_for_archive</> parameter is <literal>true</>,
- <function>pg_stop_backup</> does not return until the last segment has
+ backup files. On a primary, if <varname>archive_mode</varname> is enabled and the
+ <literal>wait_for_archive</literal> parameter is <literal>true</literal>,
+ <function>pg_stop_backup</function> does not return until the last segment has
been archived.
- On a standby, <varname>archive_mode</> must be <literal>always</> in order
- for <function>pg_stop_backup</> to wait.
+ On a standby, <varname>archive_mode</varname> must be <literal>always</literal> in order
+ for <function>pg_stop_backup</function> to wait.
Archiving of these files happens automatically since you have
- already configured <varname>archive_command</>. In most cases this
+ already configured <varname>archive_command</varname>. In most cases this
happens quickly, but you are advised to monitor your archive
system to ensure there are no delays.
If the archive process has fallen behind
because of failures of the archive command, it will keep retrying
until the archive succeeds and the backup is complete.
If you wish to place a time limit on the execution of
- <function>pg_stop_backup</>, set an appropriate
+ <function>pg_stop_backup</function>, set an appropriate
<varname>statement_timeout</varname> value, but make note that if
- <function>pg_stop_backup</> terminates because of this your backup
+ <function>pg_stop_backup</function> terminates because of this your backup
may not be valid.
</para>
<para>
If the backup process monitors and ensures that all WAL segment files
required for the backup are successfully archived then the
- <literal>wait_for_archive</> parameter (which defaults to true) can be set
+ <literal>wait_for_archive</literal> parameter (which defaults to true) can be set
to false to have
- <function>pg_stop_backup</> return as soon as the stop backup record is
- written to the WAL. By default, <function>pg_stop_backup</> will wait
+ <function>pg_stop_backup</function> return as soon as the stop backup record is
+ written to the WAL. By default, <function>pg_stop_backup</function> will wait
until all WAL has been archived, which can take some time. This option
must be used with caution: if WAL archiving is not monitored correctly
then the backup might not include all of the WAL files and will
@@ -952,7 +952,7 @@ SELECT * FROM pg_stop_backup(false, true);
The process for an exclusive backup is mostly the same as for a
non-exclusive one, but it differs in a few key steps. This type of backup
can only be taken on a primary and does not allow concurrent backups.
- Prior to <productname>PostgreSQL</> 9.6, this
+ Prior to <productname>PostgreSQL</productname> 9.6, this
was the only low-level method available, but it is now recommended that
all users upgrade their scripts to use non-exclusive backups if possible.
</para>
@@ -971,20 +971,20 @@ SELECT * FROM pg_stop_backup(false, true);
<programlisting>
SELECT pg_start_backup('label');
</programlisting>
- where <literal>label</> is any string you want to use to uniquely
+ where <literal>label</literal> is any string you want to use to uniquely
identify this backup operation.
- <function>pg_start_backup</> creates a <firstterm>backup label</> file,
- called <filename>backup_label</>, in the cluster directory with
+ <function>pg_start_backup</function> creates a <firstterm>backup label</firstterm> file,
+ called <filename>backup_label</filename>, in the cluster directory with
information about your backup, including the start time and label string.
- The function also creates a <firstterm>tablespace map</> file,
- called <filename>tablespace_map</>, in the cluster directory with
- information about tablespace symbolic links in <filename>pg_tblspc/</> if
+ The function also creates a <firstterm>tablespace map</firstterm> file,
+ called <filename>tablespace_map</filename>, in the cluster directory with
+ information about tablespace symbolic links in <filename>pg_tblspc/</filename> if
one or more such link is present. Both files are critical to the
integrity of the backup, should you need to restore from it.
</para>
<para>
- By default, <function>pg_start_backup</> can take a long time to finish.
+ By default, <function>pg_start_backup</function> can take a long time to finish.
This is because it performs a checkpoint, and the I/O
required for the checkpoint will be spread out over a significant
period of time, by default half your inter-checkpoint interval
@@ -1002,7 +1002,7 @@ SELECT pg_start_backup('label', true);
<listitem>
<para>
Perform the backup, using any convenient file-system-backup tool
- such as <application>tar</> or <application>cpio</> (not
+ such as <application>tar</application> or <application>cpio</application> (not
<application>pg_dump</application> or
<application>pg_dumpall</application>). It is neither
necessary nor desirable to stop normal operation of the database
@@ -1012,7 +1012,7 @@ SELECT pg_start_backup('label', true);
</para>
<para>
Note that if the server crashes during the backup it may not be
- possible to restart until the <literal>backup_label</> file has been
+ possible to restart until the <literal>backup_label</literal> file has been
manually deleted from the <envar>PGDATA</envar> directory.
</para>
</listitem>
@@ -1033,22 +1033,22 @@ SELECT pg_stop_backup();
<listitem>
<para>
Once the WAL segment files active during the backup are archived, you are
- done. The file identified by <function>pg_stop_backup</>'s result is
+ done. The file identified by <function>pg_stop_backup</function>'s result is
the last segment that is required to form a complete set of backup files.
- If <varname>archive_mode</> is enabled,
- <function>pg_stop_backup</> does not return until the last segment has
+ If <varname>archive_mode</varname> is enabled,
+ <function>pg_stop_backup</function> does not return until the last segment has
been archived.
Archiving of these files happens automatically since you have
- already configured <varname>archive_command</>. In most cases this
+ already configured <varname>archive_command</varname>. In most cases this
happens quickly, but you are advised to monitor your archive
system to ensure there are no delays.
If the archive process has fallen behind
because of failures of the archive command, it will keep retrying
until the archive succeeds and the backup is complete.
If you wish to place a time limit on the execution of
- <function>pg_stop_backup</>, set an appropriate
+ <function>pg_stop_backup</function>, set an appropriate
<varname>statement_timeout</varname> value, but make note that if
- <function>pg_stop_backup</> terminates because of this your backup
+ <function>pg_stop_backup</function> terminates because of this your backup
may not be valid.
</para>
</listitem>
@@ -1063,21 +1063,21 @@ SELECT pg_stop_backup();
When taking a base backup of an active database, this situation is normal
and not an error. However, you need to ensure that you can distinguish
complaints of this sort from real errors. For example, some versions
- of <application>rsync</> return a separate exit code for
- <quote>vanished source files</>, and you can write a driver script to
+ of <application>rsync</application> return a separate exit code for
+ <quote>vanished source files</quote>, and you can write a driver script to
accept this exit code as a non-error case. Also, some versions of
- GNU <application>tar</> return an error code indistinguishable from
- a fatal error if a file was truncated while <application>tar</> was
- copying it. Fortunately, GNU <application>tar</> versions 1.16 and
+ GNU <application>tar</application> return an error code indistinguishable from
+ a fatal error if a file was truncated while <application>tar</application> was
+ copying it. Fortunately, GNU <application>tar</application> versions 1.16 and
later exit with 1 if a file was changed during the backup,
- and 2 for other errors. With GNU <application>tar</> version 1.23 and
+ and 2 for other errors. With GNU <application>tar</application> version 1.23 and
later, you can use the warning options <literal>--warning=no-file-changed
--warning=no-file-removed</literal> to hide the related warning messages.
</para>
<para>
Be certain that your backup includes all of the files under
- the database cluster directory (e.g., <filename>/usr/local/pgsql/data</>).
+ the database cluster directory (e.g., <filename>/usr/local/pgsql/data</filename>).
If you are using tablespaces that do not reside underneath this directory,
be careful to include them as well (and be sure that your backup
archives symbolic links as links, otherwise the restore will corrupt
@@ -1086,21 +1086,21 @@ SELECT pg_stop_backup();
<para>
You should, however, omit from the backup the files within the
- cluster's <filename>pg_wal/</> subdirectory. This
+ cluster's <filename>pg_wal/</filename> subdirectory. This
slight adjustment is worthwhile because it reduces the risk
of mistakes when restoring. This is easy to arrange if
- <filename>pg_wal/</> is a symbolic link pointing to someplace outside
+ <filename>pg_wal/</filename> is a symbolic link pointing to someplace outside
the cluster directory, which is a common setup anyway for performance
- reasons. You might also want to exclude <filename>postmaster.pid</>
- and <filename>postmaster.opts</>, which record information
- about the running <application>postmaster</>, not about the
- <application>postmaster</> which will eventually use this backup.
- (These files can confuse <application>pg_ctl</>.)
+ reasons. You might also want to exclude <filename>postmaster.pid</filename>
+ and <filename>postmaster.opts</filename>, which record information
+ about the running <application>postmaster</application>, not about the
+ <application>postmaster</application> which will eventually use this backup.
+ (These files can confuse <application>pg_ctl</application>.)
</para>
<para>
It is often a good idea to also omit from the backup the files
- within the cluster's <filename>pg_replslot/</> directory, so that
+ within the cluster's <filename>pg_replslot/</filename> directory, so that
replication slots that exist on the master do not become part of the
backup. Otherwise, the subsequent use of the backup to create a standby
may result in indefinite retention of WAL files on the standby, and
@@ -1114,10 +1114,10 @@ SELECT pg_stop_backup();
</para>
<para>
- The contents of the directories <filename>pg_dynshmem/</>,
- <filename>pg_notify/</>, <filename>pg_serial/</>,
- <filename>pg_snapshots/</>, <filename>pg_stat_tmp/</>,
- and <filename>pg_subtrans/</> (but not the directories themselves) can be
+ The contents of the directories <filename>pg_dynshmem/</filename>,
+ <filename>pg_notify/</filename>, <filename>pg_serial/</filename>,
+ <filename>pg_snapshots/</filename>, <filename>pg_stat_tmp/</filename>,
+ and <filename>pg_subtrans/</filename> (but not the directories themselves) can be
omitted from the backup as they will be initialized on postmaster startup.
If <xref linkend="guc-stats-temp-directory"> is set and is under the data
directory then the contents of that directory can also be omitted.
@@ -1131,13 +1131,13 @@ SELECT pg_stop_backup();
<para>
The backup label
- file includes the label string you gave to <function>pg_start_backup</>,
- as well as the time at which <function>pg_start_backup</> was run, and
+ file includes the label string you gave to <function>pg_start_backup</function>,
+ as well as the time at which <function>pg_start_backup</function> was run, and
the name of the starting WAL file. In case of confusion it is therefore
possible to look inside a backup file and determine exactly which
backup session the dump file came from. The tablespace map file includes
the symbolic link names as they exist in the directory
- <filename>pg_tblspc/</> and the full path of each symbolic link.
+ <filename>pg_tblspc/</filename> and the full path of each symbolic link.
These files are not merely for your information; their presence and
contents are critical to the proper operation of the system's recovery
process.
@@ -1146,7 +1146,7 @@ SELECT pg_stop_backup();
<para>
It is also possible to make a backup while the server is
stopped. In this case, you obviously cannot use
- <function>pg_start_backup</> or <function>pg_stop_backup</>, and
+ <function>pg_start_backup</function> or <function>pg_stop_backup</function>, and
you will therefore be left to your own devices to keep track of which
backup is which and how far back the associated WAL files go.
It is generally better to follow the continuous archiving procedure above.
@@ -1173,7 +1173,7 @@ SELECT pg_stop_backup();
location in case you need them later. Note that this precaution will
require that you have enough free space on your system to hold two
copies of your existing database. If you do not have enough space,
- you should at least save the contents of the cluster's <filename>pg_wal</>
+ you should at least save the contents of the cluster's <filename>pg_wal</filename>
subdirectory, as it might contain logs which
were not archived before the system went down.
</para>
@@ -1188,17 +1188,17 @@ SELECT pg_stop_backup();
<para>
Restore the database files from your file system backup. Be sure that they
are restored with the right ownership (the database system user, not
- <literal>root</>!) and with the right permissions. If you are using
+ <literal>root</literal>!) and with the right permissions. If you are using
tablespaces,
- you should verify that the symbolic links in <filename>pg_tblspc/</>
+ you should verify that the symbolic links in <filename>pg_tblspc/</filename>
were correctly restored.
</para>
</listitem>
<listitem>
<para>
- Remove any files present in <filename>pg_wal/</>; these came from the
+ Remove any files present in <filename>pg_wal/</filename>; these came from the
file system backup and are therefore probably obsolete rather than current.
- If you didn't archive <filename>pg_wal/</> at all, then recreate
+ If you didn't archive <filename>pg_wal/</filename> at all, then recreate
it with proper permissions,
being careful to ensure that you re-establish it as a symbolic link
if you had it set up that way before.
@@ -1207,16 +1207,16 @@ SELECT pg_stop_backup();
<listitem>
<para>
If you have unarchived WAL segment files that you saved in step 2,
- copy them into <filename>pg_wal/</>. (It is best to copy them,
+ copy them into <filename>pg_wal/</filename>. (It is best to copy them,
not move them, so you still have the unmodified files if a
problem occurs and you have to start over.)
</para>
</listitem>
<listitem>
<para>
- Create a recovery command file <filename>recovery.conf</> in the cluster
+ Create a recovery command file <filename>recovery.conf</filename> in the cluster
data directory (see <xref linkend="recovery-config">). You might
- also want to temporarily modify <filename>pg_hba.conf</> to prevent
+ also want to temporarily modify <filename>pg_hba.conf</filename> to prevent
ordinary users from connecting until you are sure the recovery was successful.
</para>
</listitem>
@@ -1227,7 +1227,7 @@ SELECT pg_stop_backup();
recovery be terminated because of an external error, the server can
simply be restarted and it will continue recovery. Upon completion
of the recovery process, the server will rename
- <filename>recovery.conf</> to <filename>recovery.done</> (to prevent
+ <filename>recovery.conf</filename> to <filename>recovery.done</filename> (to prevent
accidentally re-entering recovery mode later) and then
commence normal database operations.
</para>
@@ -1236,7 +1236,7 @@ SELECT pg_stop_backup();
<para>
Inspect the contents of the database to ensure you have recovered to
the desired state. If not, return to step 1. If all is well,
- allow your users to connect by restoring <filename>pg_hba.conf</> to normal.
+ allow your users to connect by restoring <filename>pg_hba.conf</filename> to normal.
</para>
</listitem>
</orderedlist>
@@ -1245,32 +1245,32 @@ SELECT pg_stop_backup();
<para>
The key part of all this is to set up a recovery configuration file that
describes how you want to recover and how far the recovery should
- run. You can use <filename>recovery.conf.sample</> (normally
- located in the installation's <filename>share/</> directory) as a
+ run. You can use <filename>recovery.conf.sample</filename> (normally
+ located in the installation's <filename>share/</filename> directory) as a
prototype. The one thing that you absolutely must specify in
- <filename>recovery.conf</> is the <varname>restore_command</>,
- which tells <productname>PostgreSQL</> how to retrieve archived
- WAL file segments. Like the <varname>archive_command</>, this is
- a shell command string. It can contain <literal>%f</>, which is
- replaced by the name of the desired log file, and <literal>%p</>,
+ <filename>recovery.conf</filename> is the <varname>restore_command</varname>,
+ which tells <productname>PostgreSQL</productname> how to retrieve archived
+ WAL file segments. Like the <varname>archive_command</varname>, this is
+ a shell command string. It can contain <literal>%f</literal>, which is
+ replaced by the name of the desired log file, and <literal>%p</literal>,
which is replaced by the path name to copy the log file to.
(The path name is relative to the current working directory,
i.e., the cluster's data directory.)
- Write <literal>%%</> if you need to embed an actual <literal>%</>
+ Write <literal>%%</literal> if you need to embed an actual <literal>%</literal>
character in the command. The simplest useful command is
something like:
<programlisting>
restore_command = 'cp /mnt/server/archivedir/%f %p'
</programlisting>
which will copy previously archived WAL segments from the directory
- <filename>/mnt/server/archivedir</>. Of course, you can use something
+ <filename>/mnt/server/archivedir</filename>. Of course, you can use something
much more complicated, perhaps even a shell script that requests the
operator to mount an appropriate tape.
</para>
<para>
It is important that the command return nonzero exit status on failure.
- The command <emphasis>will</> be called requesting files that are not
+ The command <emphasis>will</emphasis> be called requesting files that are not
present in the archive; it must return nonzero when so asked. This is not
an error condition. An exception is that if the command was terminated by
a signal (other than <systemitem>SIGTERM</systemitem>, which is used as
@@ -1282,27 +1282,27 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
<para>
Not all of the requested files will be WAL segment
files; you should also expect requests for files with a suffix of
- <literal>.backup</> or <literal>.history</>. Also be aware that
- the base name of the <literal>%p</> path will be different from
- <literal>%f</>; do not expect them to be interchangeable.
+ <literal>.backup</literal> or <literal>.history</literal>. Also be aware that
+ the base name of the <literal>%p</literal> path will be different from
+ <literal>%f</literal>; do not expect them to be interchangeable.
</para>
<para>
WAL segments that cannot be found in the archive will be sought in
- <filename>pg_wal/</>; this allows use of recent un-archived segments.
+ <filename>pg_wal/</filename>; this allows use of recent un-archived segments.
However, segments that are available from the archive will be used in
- preference to files in <filename>pg_wal/</>.
+ preference to files in <filename>pg_wal/</filename>.
</para>
<para>
Normally, recovery will proceed through all available WAL segments,
thereby restoring the database to the current point in time (or as
close as possible given the available WAL segments). Therefore, a normal
- recovery will end with a <quote>file not found</> message, the exact text
+ recovery will end with a <quote>file not found</quote> message, the exact text
of the error message depending upon your choice of
- <varname>restore_command</>. You may also see an error message
+ <varname>restore_command</varname>. You may also see an error message
at the start of recovery for a file named something like
- <filename>00000001.history</>. This is also normal and does not
+ <filename>00000001.history</filename>. This is also normal and does not
indicate a problem in simple recovery situations; see
<xref linkend="backup-timelines"> for discussion.
</para>
@@ -1310,8 +1310,8 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
<para>
If you want to recover to some previous point in time (say, right before
the junior DBA dropped your main transaction table), just specify the
- required <link linkend="recovery-target-settings">stopping point</link> in <filename>recovery.conf</>. You can specify
- the stop point, known as the <quote>recovery target</>, either by
+ required <link linkend="recovery-target-settings">stopping point</link> in <filename>recovery.conf</filename>. You can specify
+ the stop point, known as the <quote>recovery target</quote>, either by
date/time, named restore point or by completion of a specific transaction
ID. As of this writing only the date/time and named restore point options
are very usable, since there are no tools to help you identify with any
@@ -1321,7 +1321,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
<note>
<para>
The stop point must be after the ending time of the base backup, i.e.,
- the end time of <function>pg_stop_backup</>. You cannot use a base backup
+ the end time of <function>pg_stop_backup</function>. You cannot use a base backup
to recover to a time when that backup was in progress. (To
recover to such a time, you must go back to your previous base backup
and roll forward from there.)
@@ -1332,14 +1332,14 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
If recovery finds corrupted WAL data, recovery will
halt at that point and the server will not start. In such a case the
recovery process could be re-run from the beginning, specifying a
- <quote>recovery target</> before the point of corruption so that recovery
+ <quote>recovery target</quote> before the point of corruption so that recovery
can complete normally.
If recovery fails for an external reason, such as a system crash or
if the WAL archive has become inaccessible, then the recovery can simply
be restarted and it will restart almost from where it failed.
Recovery restart works much like checkpointing in normal operation:
the server periodically forces all its state to disk, and then updates
- the <filename>pg_control</> file to indicate that the already-processed
+ the <filename>pg_control</filename> file to indicate that the already-processed
WAL data need not be scanned again.
</para>
@@ -1359,7 +1359,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
suppose you dropped a critical table at 5:15PM on Tuesday evening, but
didn't realize your mistake until Wednesday noon.
Unfazed, you get out your backup, restore to the point-in-time 5:14PM
- Tuesday evening, and are up and running. In <emphasis>this</> history of
+ Tuesday evening, and are up and running. In <emphasis>this</emphasis> history of
the database universe, you never dropped the table. But suppose
you later realize this wasn't such a great idea, and would like
to return to sometime Wednesday morning in the original history.
@@ -1372,8 +1372,8 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
</para>
<para>
- To deal with this problem, <productname>PostgreSQL</> has a notion
- of <firstterm>timelines</>. Whenever an archive recovery completes,
+ To deal with this problem, <productname>PostgreSQL</productname> has a notion
+ of <firstterm>timelines</firstterm>. Whenever an archive recovery completes,
a new timeline is created to identify the series of WAL records
generated after that recovery. The timeline
ID number is part of WAL segment file names so a new timeline does
@@ -1384,13 +1384,13 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
and so have to do several point-in-time recoveries by trial and error
until you find the best place to branch off from the old history. Without
timelines this process would soon generate an unmanageable mess. With
- timelines, you can recover to <emphasis>any</> prior state, including
+ timelines, you can recover to <emphasis>any</emphasis> prior state, including
states in timeline branches that you abandoned earlier.
</para>
<para>
- Every time a new timeline is created, <productname>PostgreSQL</> creates
- a <quote>timeline history</> file that shows which timeline it branched
+ Every time a new timeline is created, <productname>PostgreSQL</productname> creates
+ a <quote>timeline history</quote> file that shows which timeline it branched
off from and when. These history files are necessary to allow the system
to pick the right WAL segment files when recovering from an archive that
contains multiple timelines. Therefore, they are archived into the WAL
@@ -1408,7 +1408,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
that was current when the base backup was taken. If you wish to recover
into some child timeline (that is, you want to return to some state that
was itself generated after a recovery attempt), you need to specify the
- target timeline ID in <filename>recovery.conf</>. You cannot recover into
+ target timeline ID in <filename>recovery.conf</filename>. You cannot recover into
timelines that branched off earlier than the base backup.
</para>
</sect2>
@@ -1424,18 +1424,18 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
<title>Standalone Hot Backups</title>
<para>
- It is possible to use <productname>PostgreSQL</>'s backup facilities to
+ It is possible to use <productname>PostgreSQL</productname>'s backup facilities to
produce standalone hot backups. These are backups that cannot be used
for point-in-time recovery, yet are typically much faster to backup and
- restore than <application>pg_dump</> dumps. (They are also much larger
- than <application>pg_dump</> dumps, so in some cases the speed advantage
+ restore than <application>pg_dump</application> dumps. (They are also much larger
+ than <application>pg_dump</application> dumps, so in some cases the speed advantage
might be negated.)
</para>
<para>
As with base backups, the easiest way to produce a standalone
hot backup is to use the <xref linkend="app-pgbasebackup">
- tool. If you include the <literal>-X</> parameter when calling
+ tool. If you include the <literal>-X</literal> parameter when calling
it, all the write-ahead log required to use the backup will be
included in the backup automatically, and no special action is
required to restore the backup.
@@ -1445,16 +1445,16 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
If more flexibility in copying the backup files is needed, a lower
level process can be used for standalone hot backups as well.
To prepare for low level standalone hot backups, make sure
- <varname>wal_level</> is set to
- <literal>replica</> or higher, <varname>archive_mode</> to
- <literal>on</>, and set up an <varname>archive_command</> that performs
- archiving only when a <emphasis>switch file</> exists. For example:
+ <varname>wal_level</varname> is set to
+ <literal>replica</literal> or higher, <varname>archive_mode</varname> to
+ <literal>on</literal>, and set up an <varname>archive_command</varname> that performs
+ archiving only when a <emphasis>switch file</emphasis> exists. For example:
<programlisting>
archive_command = 'test ! -f /var/lib/pgsql/backup_in_progress || (test ! -f /var/lib/pgsql/archive/%f &amp;&amp; cp %p /var/lib/pgsql/archive/%f)'
</programlisting>
This command will perform archiving when
- <filename>/var/lib/pgsql/backup_in_progress</> exists, and otherwise
- silently return zero exit status (allowing <productname>PostgreSQL</>
+ <filename>/var/lib/pgsql/backup_in_progress</filename> exists, and otherwise
+ silently return zero exit status (allowing <productname>PostgreSQL</productname>
to recycle the unwanted WAL file).
</para>
@@ -1469,11 +1469,11 @@ psql -c "select pg_stop_backup();"
rm /var/lib/pgsql/backup_in_progress
tar -rf /var/lib/pgsql/backup.tar /var/lib/pgsql/archive/
</programlisting>
- The switch file <filename>/var/lib/pgsql/backup_in_progress</> is
+ The switch file <filename>/var/lib/pgsql/backup_in_progress</filename> is
created first, enabling archiving of completed WAL files to occur.
After the backup the switch file is removed. Archived WAL files are
then added to the backup so that both base backup and all required
- WAL files are part of the same <application>tar</> file.
+ WAL files are part of the same <application>tar</application> file.
Please remember to add error handling to your backup scripts.
</para>
@@ -1488,7 +1488,7 @@ tar -rf /var/lib/pgsql/backup.tar /var/lib/pgsql/archive/
<programlisting>
archive_command = 'gzip &lt; %p &gt; /var/lib/pgsql/archive/%f'
</programlisting>
- You will then need to use <application>gunzip</> during recovery:
+ You will then need to use <application>gunzip</application> during recovery:
<programlisting>
restore_command = 'gunzip &lt; /mnt/server/archivedir/%f &gt; %p'
</programlisting>
@@ -1501,7 +1501,7 @@ restore_command = 'gunzip &lt; /mnt/server/archivedir/%f &gt; %p'
<para>
Many people choose to use scripts to define their
<varname>archive_command</varname>, so that their
- <filename>postgresql.conf</> entry looks very simple:
+ <filename>postgresql.conf</filename> entry looks very simple:
<programlisting>
archive_command = 'local_backup_script.sh "%p" "%f"'
</programlisting>
@@ -1509,7 +1509,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"'
more than a single command in the archiving process.
This allows all complexity to be managed within the script, which
can be written in a popular scripting language such as
- <application>bash</> or <application>perl</>.
+ <application>bash</application> or <application>perl</application>.
</para>
<para>
@@ -1543,7 +1543,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"'
<para>
When using an <varname>archive_command</varname> script, it's desirable
to enable <xref linkend="guc-logging-collector">.
- Any messages written to <systemitem>stderr</> from the script will then
+ Any messages written to <systemitem>stderr</systemitem> from the script will then
appear in the database server log, allowing complex configurations to
be diagnosed easily if they fail.
</para>
@@ -1563,7 +1563,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"'
<para>
If a <xref linkend="sql-createdatabase">
command is executed while a base backup is being taken, and then
- the template database that the <command>CREATE DATABASE</> copied
+ the template database that the <command>CREATE DATABASE</command> copied
is modified while the base backup is still in progress, it is
possible that recovery will cause those modifications to be
propagated into the created database as well. This is of course
@@ -1602,7 +1602,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"'
before you do so.) Turning off page snapshots does not prevent
use of the logs for PITR operations. An area for future
development is to compress archived WAL data by removing
- unnecessary page copies even when <varname>full_page_writes</> is
+ unnecessary page copies even when <varname>full_page_writes</varname> is
on. In the meantime, administrators might wish to reduce the number
of page snapshots included in WAL by increasing the checkpoint
interval parameters as much as feasible.
diff --git a/doc/src/sgml/bgworker.sgml b/doc/src/sgml/bgworker.sgml
index ea1b5c0c8e..0b092f6e49 100644
--- a/doc/src/sgml/bgworker.sgml
+++ b/doc/src/sgml/bgworker.sgml
@@ -11,17 +11,17 @@
PostgreSQL can be extended to run user-supplied code in separate processes.
Such processes are started, stopped and monitored by <command>postgres</command>,
which permits them to have a lifetime closely linked to the server's status.
- These processes have the option to attach to <productname>PostgreSQL</>'s
+ These processes have the option to attach to <productname>PostgreSQL</productname>'s
shared memory area and to connect to databases internally; they can also run
multiple transactions serially, just like a regular client-connected server
- process. Also, by linking to <application>libpq</> they can connect to the
+ process. Also, by linking to <application>libpq</application> they can connect to the
server and behave like a regular client application.
</para>
<warning>
<para>
There are considerable robustness and security risks in using background
- worker processes because, being written in the <literal>C</> language,
+ worker processes because, being written in the <literal>C</literal> language,
they have unrestricted access to data. Administrators wishing to enable
modules that include background worker process should exercise extreme
caution. Only carefully audited modules should be permitted to run
@@ -31,15 +31,15 @@
<para>
Background workers can be initialized at the time that
- <productname>PostgreSQL</> is started by including the module name in
- <varname>shared_preload_libraries</>. A module wishing to run a background
+ <productname>PostgreSQL</productname> is started by including the module name in
+ <varname>shared_preload_libraries</varname>. A module wishing to run a background
worker can register it by calling
<function>RegisterBackgroundWorker(<type>BackgroundWorker *worker</type>)</function>
- from its <function>_PG_init()</>. Background workers can also be started
+ from its <function>_PG_init()</function>. Background workers can also be started
after the system is up and running by calling the function
<function>RegisterDynamicBackgroundWorker(<type>BackgroundWorker
*worker, BackgroundWorkerHandle **handle</type>)</function>. Unlike
- <function>RegisterBackgroundWorker</>, which can only be called from within
+ <function>RegisterBackgroundWorker</function>, which can only be called from within
the postmaster, <function>RegisterDynamicBackgroundWorker</function> must be
called from a regular backend.
</para>
@@ -65,7 +65,7 @@ typedef struct BackgroundWorker
</para>
<para>
- <structfield>bgw_name</> and <structfield>bgw_type</structfield> are
+ <structfield>bgw_name</structfield> and <structfield>bgw_type</structfield> are
strings to be used in log messages, process listings and similar contexts.
<structfield>bgw_type</structfield> should be the same for all background
workers of the same type, so that it is possible to group such workers in a
@@ -76,7 +76,7 @@ typedef struct BackgroundWorker
</para>
<para>
- <structfield>bgw_flags</> is a bitwise-or'd bit mask indicating the
+ <structfield>bgw_flags</structfield> is a bitwise-or'd bit mask indicating the
capabilities that the module wants. Possible values are:
<variablelist>
@@ -114,14 +114,14 @@ typedef struct BackgroundWorker
<para>
<structfield>bgw_start_time</structfield> is the server state during which
- <command>postgres</> should start the process; it can be one of
- <literal>BgWorkerStart_PostmasterStart</> (start as soon as
- <command>postgres</> itself has finished its own initialization; processes
+ <command>postgres</command> should start the process; it can be one of
+ <literal>BgWorkerStart_PostmasterStart</literal> (start as soon as
+ <command>postgres</command> itself has finished its own initialization; processes
requesting this are not eligible for database connections),
- <literal>BgWorkerStart_ConsistentState</> (start as soon as a consistent state
+ <literal>BgWorkerStart_ConsistentState</literal> (start as soon as a consistent state
has been reached in a hot standby, allowing processes to connect to
databases and run read-only queries), and
- <literal>BgWorkerStart_RecoveryFinished</> (start as soon as the system has
+ <literal>BgWorkerStart_RecoveryFinished</literal> (start as soon as the system has
entered normal read-write state). Note the last two values are equivalent
in a server that's not a hot standby. Note that this setting only indicates
when the processes are to be started; they do not stop when a different state
@@ -152,9 +152,9 @@ typedef struct BackgroundWorker
</para>
<para>
- <structfield>bgw_main_arg</structfield> is the <type>Datum</> argument
+ <structfield>bgw_main_arg</structfield> is the <type>Datum</type> argument
to the background worker main function. This main function should take a
- single argument of type <type>Datum</> and return <type>void</>.
+ single argument of type <type>Datum</type> and return <type>void</type>.
<structfield>bgw_main_arg</structfield> will be passed as the argument.
In addition, the global variable <literal>MyBgworkerEntry</literal>
points to a copy of the <structname>BackgroundWorker</structname> structure
@@ -165,39 +165,39 @@ typedef struct BackgroundWorker
<para>
On Windows (and anywhere else where <literal>EXEC_BACKEND</literal> is
defined) or in dynamic background workers it is not safe to pass a
- <type>Datum</> by reference, only by value. If an argument is required, it
+ <type>Datum</type> by reference, only by value. If an argument is required, it
is safest to pass an int32 or other small value and use that as an index
- into an array allocated in shared memory. If a value like a <type>cstring</>
+ into an array allocated in shared memory. If a value like a <type>cstring</type>
or <type>text</type> is passed then the pointer won't be valid from the
new background worker process.
</para>
<para>
<structfield>bgw_extra</structfield> can contain extra data to be passed
- to the background worker. Unlike <structfield>bgw_main_arg</>, this data
+ to the background worker. Unlike <structfield>bgw_main_arg</structfield>, this data
is not passed as an argument to the worker's main function, but it can be
accessed via <literal>MyBgworkerEntry</literal>, as discussed above.
</para>
<para>
<structfield>bgw_notify_pid</structfield> is the PID of a PostgreSQL
- backend process to which the postmaster should send <literal>SIGUSR1</>
+ backend process to which the postmaster should send <literal>SIGUSR1</literal>
when the process is started or exits. It should be 0 for workers registered
at postmaster startup time, or when the backend registering the worker does
not wish to wait for the worker to start up. Otherwise, it should be
- initialized to <literal>MyProcPid</>.
+ initialized to <literal>MyProcPid</literal>.
</para>
<para>Once running, the process can connect to a database by calling
<function>BackgroundWorkerInitializeConnection(<parameter>char *dbname</parameter>, <parameter>char *username</parameter>)</function> or
<function>BackgroundWorkerInitializeConnectionByOid(<parameter>Oid dboid</parameter>, <parameter>Oid useroid</parameter>)</function>.
This allows the process to run transactions and queries using the
- <literal>SPI</literal> interface. If <varname>dbname</> is NULL or
- <varname>dboid</> is <literal>InvalidOid</>, the session is not connected
+ <literal>SPI</literal> interface. If <varname>dbname</varname> is NULL or
+ <varname>dboid</varname> is <literal>InvalidOid</literal>, the session is not connected
to any particular database, but shared catalogs can be accessed.
- If <varname>username</> is NULL or <varname>useroid</> is
- <literal>InvalidOid</>, the process will run as the superuser created
- during <command>initdb</>.
+ If <varname>username</varname> is NULL or <varname>useroid</varname> is
+ <literal>InvalidOid</literal>, the process will run as the superuser created
+ during <command>initdb</command>.
A background worker can only call one of these two functions, and only
once. It is not possible to switch databases.
</para>
@@ -207,24 +207,24 @@ typedef struct BackgroundWorker
background worker's main function, and must be unblocked by it; this is to
allow the process to customize its signal handlers, if necessary.
Signals can be unblocked in the new process by calling
- <function>BackgroundWorkerUnblockSignals</> and blocked by calling
- <function>BackgroundWorkerBlockSignals</>.
+ <function>BackgroundWorkerUnblockSignals</function> and blocked by calling
+ <function>BackgroundWorkerBlockSignals</function>.
</para>
<para>
If <structfield>bgw_restart_time</structfield> for a background worker is
- configured as <literal>BGW_NEVER_RESTART</>, or if it exits with an exit
- code of 0 or is terminated by <function>TerminateBackgroundWorker</>,
+ configured as <literal>BGW_NEVER_RESTART</literal>, or if it exits with an exit
+ code of 0 or is terminated by <function>TerminateBackgroundWorker</function>,
it will be automatically unregistered by the postmaster on exit.
Otherwise, it will be restarted after the time period configured via
- <structfield>bgw_restart_time</>, or immediately if the postmaster
+ <structfield>bgw_restart_time</structfield>, or immediately if the postmaster
reinitializes the cluster due to a backend failure. Backends which need
to suspend execution only temporarily should use an interruptible sleep
rather than exiting; this can be achieved by calling
<function>WaitLatch()</function>. Make sure the
- <literal>WL_POSTMASTER_DEATH</> flag is set when calling that function, and
+ <literal>WL_POSTMASTER_DEATH</literal> flag is set when calling that function, and
verify the return code for a prompt exit in the emergency case that
- <command>postgres</> itself has terminated.
+ <command>postgres</command> itself has terminated.
</para>
<para>
@@ -238,29 +238,29 @@ typedef struct BackgroundWorker
opaque handle that can subsequently be passed to
<function>GetBackgroundWorkerPid(<parameter>BackgroundWorkerHandle *</parameter>, <parameter>pid_t *</parameter>)</function> or
<function>TerminateBackgroundWorker(<parameter>BackgroundWorkerHandle *</parameter>)</function>.
- <function>GetBackgroundWorkerPid</> can be used to poll the status of the
- worker: a return value of <literal>BGWH_NOT_YET_STARTED</> indicates that
+ <function>GetBackgroundWorkerPid</function> can be used to poll the status of the
+ worker: a return value of <literal>BGWH_NOT_YET_STARTED</literal> indicates that
the worker has not yet been started by the postmaster;
<literal>BGWH_STOPPED</literal> indicates that it has been started but is
no longer running; and <literal>BGWH_STARTED</literal> indicates that it is
currently running. In this last case, the PID will also be returned via the
second argument.
- <function>TerminateBackgroundWorker</> causes the postmaster to send
- <literal>SIGTERM</> to the worker if it is running, and to unregister it
+ <function>TerminateBackgroundWorker</function> causes the postmaster to send
+ <literal>SIGTERM</literal> to the worker if it is running, and to unregister it
as soon as it is not.
</para>
<para>
In some cases, a process which registers a background worker may wish to
wait for the worker to start up. This can be accomplished by initializing
- <structfield>bgw_notify_pid</structfield> to <literal>MyProcPid</> and
+ <structfield>bgw_notify_pid</structfield> to <literal>MyProcPid</literal> and
then passing the <type>BackgroundWorkerHandle *</type> obtained at
registration time to
<function>WaitForBackgroundWorkerStartup(<parameter>BackgroundWorkerHandle
*handle</parameter>, <parameter>pid_t *</parameter>)</function> function.
This function will block until the postmaster has attempted to start the
background worker, or until the postmaster dies. If the background runner
- is running, the return value will <literal>BGWH_STARTED</>, and
+ is running, the return value will <literal>BGWH_STARTED</literal>, and
the PID will be written to the provided address. Otherwise, the return
value will be <literal>BGWH_STOPPED</literal> or
<literal>BGWH_POSTMASTER_DIED</literal>.
@@ -279,7 +279,7 @@ typedef struct BackgroundWorker
</para>
<para>
- The <filename>src/test/modules/worker_spi</> module
+ The <filename>src/test/modules/worker_spi</filename> module
contains a working example,
which demonstrates some useful techniques.
</para>
diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml
index 5462bc38e4..d7547e6e92 100644
--- a/doc/src/sgml/biblio.sgml
+++ b/doc/src/sgml/biblio.sgml
@@ -171,7 +171,7 @@ [email protected]
<abstract>
<para>
Discusses SQL history and syntax, and describes the addition of
- <literal>INTERSECT</> and <literal>EXCEPT</> constructs into
+ <literal>INTERSECT</literal> and <literal>EXCEPT</literal> constructs into
<productname>PostgreSQL</productname>. Prepared as a Master's
Thesis with the support of O. Univ. Prof. Dr. Georg Gottlob and
Univ. Ass. Mag. Katrin Seyr at Vienna University of Technology.
diff --git a/doc/src/sgml/bki.sgml b/doc/src/sgml/bki.sgml
index af6d8d1d2a..33378b46ea 100644
--- a/doc/src/sgml/bki.sgml
+++ b/doc/src/sgml/bki.sgml
@@ -21,7 +21,7 @@
input file used by <application>initdb</application> is created as
part of building and installing <productname>PostgreSQL</productname>
by a program named <filename>genbki.pl</filename>, which reads some
- specially formatted C header files in the <filename>src/include/catalog/</>
+ specially formatted C header files in the <filename>src/include/catalog/</filename>
directory of the source tree. The created <acronym>BKI</acronym> file
is called <filename>postgres.bki</filename> and is
normally installed in the
@@ -67,13 +67,13 @@
<variablelist>
<varlistentry>
<term>
- <literal>create</>
+ <literal>create</literal>
<replaceable class="parameter">tablename</replaceable>
<replaceable class="parameter">tableoid</replaceable>
- <optional><literal>bootstrap</></optional>
- <optional><literal>shared_relation</></optional>
- <optional><literal>without_oids</></optional>
- <optional><literal>rowtype_oid</> <replaceable>oid</></optional>
+ <optional><literal>bootstrap</literal></optional>
+ <optional><literal>shared_relation</literal></optional>
+ <optional><literal>without_oids</literal></optional>
+ <optional><literal>rowtype_oid</literal> <replaceable>oid</replaceable></optional>
(<replaceable class="parameter">name1</replaceable> =
<replaceable class="parameter">type1</replaceable>
<optional>FORCE NOT NULL | FORCE NULL </optional> <optional>,
@@ -93,7 +93,7 @@
<para>
The following column types are supported directly by
- <filename>bootstrap.c</>: <type>bool</type>,
+ <filename>bootstrap.c</filename>: <type>bool</type>,
<type>bytea</type>, <type>char</type> (1 byte),
<type>name</type>, <type>int2</type>,
<type>int4</type>, <type>regproc</type>, <type>regclass</type>,
@@ -104,31 +104,31 @@
<type>_oid</type> (array), <type>_char</type> (array),
<type>_aclitem</type> (array). Although it is possible to create
tables containing columns of other types, this cannot be done until
- after <structname>pg_type</> has been created and filled with
+ after <structname>pg_type</structname> has been created and filled with
appropriate entries. (That effectively means that only these
column types can be used in bootstrapped tables, but non-bootstrap
catalogs can contain any built-in type.)
</para>
<para>
- When <literal>bootstrap</> is specified,
+ When <literal>bootstrap</literal> is specified,
the table will only be created on disk; nothing is entered into
<structname>pg_class</structname>,
<structname>pg_attribute</structname>, etc, for it. Thus the
table will not be accessible by ordinary SQL operations until
- such entries are made the hard way (with <literal>insert</>
+ such entries are made the hard way (with <literal>insert</literal>
commands). This option is used for creating
<structname>pg_class</structname> etc themselves.
</para>
<para>
- The table is created as shared if <literal>shared_relation</> is
+ The table is created as shared if <literal>shared_relation</literal> is
specified.
- It will have OIDs unless <literal>without_oids</> is specified.
- The table's row type OID (<structname>pg_type</> OID) can optionally
- be specified via the <literal>rowtype_oid</> clause; if not specified,
- an OID is automatically generated for it. (The <literal>rowtype_oid</>
- clause is useless if <literal>bootstrap</> is specified, but it can be
+ It will have OIDs unless <literal>without_oids</literal> is specified.
+ The table's row type OID (<structname>pg_type</structname> OID) can optionally
+ be specified via the <literal>rowtype_oid</literal> clause; if not specified,
+ an OID is automatically generated for it. (The <literal>rowtype_oid</literal>
+ clause is useless if <literal>bootstrap</literal> is specified, but it can be
provided anyway for documentation.)
</para>
</listitem>
@@ -136,7 +136,7 @@
<varlistentry>
<term>
- <literal>open</> <replaceable class="parameter">tablename</replaceable>
+ <literal>open</literal> <replaceable class="parameter">tablename</replaceable>
</term>
<listitem>
@@ -150,7 +150,7 @@
<varlistentry>
<term>
- <literal>close</> <optional><replaceable class="parameter">tablename</replaceable></optional>
+ <literal>close</literal> <optional><replaceable class="parameter">tablename</replaceable></optional>
</term>
<listitem>
@@ -163,7 +163,7 @@
<varlistentry>
<term>
- <literal>insert</> <optional><literal>OID =</> <replaceable class="parameter">oid_value</replaceable></optional> <literal>(</> <replaceable class="parameter">value1</replaceable> <replaceable class="parameter">value2</replaceable> ... <literal>)</>
+ <literal>insert</literal> <optional><literal>OID =</literal> <replaceable class="parameter">oid_value</replaceable></optional> <literal>(</literal> <replaceable class="parameter">value1</replaceable> <replaceable class="parameter">value2</replaceable> ... <literal>)</literal>
</term>
<listitem>
@@ -188,14 +188,14 @@
<varlistentry>
<term>
- <literal>declare</> <optional><literal>unique</></optional>
- <literal>index</> <replaceable class="parameter">indexname</replaceable>
+ <literal>declare</literal> <optional><literal>unique</literal></optional>
+ <literal>index</literal> <replaceable class="parameter">indexname</replaceable>
<replaceable class="parameter">indexoid</replaceable>
- <literal>on</> <replaceable class="parameter">tablename</replaceable>
- <literal>using</> <replaceable class="parameter">amname</replaceable>
- <literal>(</> <replaceable class="parameter">opclass1</replaceable>
+ <literal>on</literal> <replaceable class="parameter">tablename</replaceable>
+ <literal>using</literal> <replaceable class="parameter">amname</replaceable>
+ <literal>(</literal> <replaceable class="parameter">opclass1</replaceable>
<replaceable class="parameter">name1</replaceable>
- <optional>, ...</optional> <literal>)</>
+ <optional>, ...</optional> <literal>)</literal>
</term>
<listitem>
@@ -220,10 +220,10 @@
<varlistentry>
<term>
- <literal>declare toast</>
+ <literal>declare toast</literal>
<replaceable class="parameter">toasttableoid</replaceable>
<replaceable class="parameter">toastindexoid</replaceable>
- <literal>on</> <replaceable class="parameter">tablename</replaceable>
+ <literal>on</literal> <replaceable class="parameter">tablename</replaceable>
</term>
<listitem>
@@ -234,14 +234,14 @@
<replaceable class="parameter">toasttableoid</replaceable>
and its index is assigned OID
<replaceable class="parameter">toastindexoid</replaceable>.
- As with <literal>declare index</>, filling of the index
+ As with <literal>declare index</literal>, filling of the index
is postponed.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>build indices</></term>
+ <term><literal>build indices</literal></term>
<listitem>
<para>
@@ -257,17 +257,17 @@
<title>Structure of the Bootstrap <acronym>BKI</acronym> File</title>
<para>
- The <literal>open</> command cannot be used until the tables it uses
+ The <literal>open</literal> command cannot be used until the tables it uses
exist and have entries for the table that is to be opened.
- (These minimum tables are <structname>pg_class</>,
- <structname>pg_attribute</>, <structname>pg_proc</>, and
- <structname>pg_type</>.) To allow those tables themselves to be filled,
- <literal>create</> with the <literal>bootstrap</> option implicitly opens
+ (These minimum tables are <structname>pg_class</structname>,
+ <structname>pg_attribute</structname>, <structname>pg_proc</structname>, and
+ <structname>pg_type</structname>.) To allow those tables themselves to be filled,
+ <literal>create</literal> with the <literal>bootstrap</literal> option implicitly opens
the created table for data insertion.
</para>
<para>
- Also, the <literal>declare index</> and <literal>declare toast</>
+ Also, the <literal>declare index</literal> and <literal>declare toast</literal>
commands cannot be used until the system catalogs they need have been
created and filled in.
</para>
@@ -278,17 +278,17 @@
<orderedlist>
<listitem>
<para>
- <literal>create bootstrap</> one of the critical tables
+ <literal>create bootstrap</literal> one of the critical tables
</para>
</listitem>
<listitem>
<para>
- <literal>insert</> data describing at least the critical tables
+ <literal>insert</literal> data describing at least the critical tables
</para>
</listitem>
<listitem>
<para>
- <literal>close</>
+ <literal>close</literal>
</para>
</listitem>
<listitem>
@@ -298,22 +298,22 @@
</listitem>
<listitem>
<para>
- <literal>create</> (without <literal>bootstrap</>) a noncritical table
+ <literal>create</literal> (without <literal>bootstrap</literal>) a noncritical table
</para>
</listitem>
<listitem>
<para>
- <literal>open</>
+ <literal>open</literal>
</para>
</listitem>
<listitem>
<para>
- <literal>insert</> desired data
+ <literal>insert</literal> desired data
</para>
</listitem>
<listitem>
<para>
- <literal>close</>
+ <literal>close</literal>
</para>
</listitem>
<listitem>
@@ -328,7 +328,7 @@
</listitem>
<listitem>
<para>
- <literal>build indices</>
+ <literal>build indices</literal>
</para>
</listitem>
</orderedlist>
diff --git a/doc/src/sgml/bloom.sgml b/doc/src/sgml/bloom.sgml
index 396348c523..e13ebf80fd 100644
--- a/doc/src/sgml/bloom.sgml
+++ b/doc/src/sgml/bloom.sgml
@@ -8,7 +8,7 @@
</indexterm>
<para>
- <literal>bloom</> provides an index access method based on
+ <literal>bloom</literal> provides an index access method based on
<ulink url="http://en.wikipedia.org/wiki/Bloom_filter">Bloom filters</ulink>.
</para>
@@ -42,29 +42,29 @@
<title>Parameters</title>
<para>
- A <literal>bloom</> index accepts the following parameters in its
- <literal>WITH</> clause:
+ A <literal>bloom</literal> index accepts the following parameters in its
+ <literal>WITH</literal> clause:
</para>
<variablelist>
<varlistentry>
- <term><literal>length</></term>
+ <term><literal>length</literal></term>
<listitem>
<para>
Length of each signature (index entry) in bits. The default
- is <literal>80</> bits and maximum is <literal>4096</>.
+ is <literal>80</literal> bits and maximum is <literal>4096</literal>.
</para>
</listitem>
</varlistentry>
</variablelist>
<variablelist>
<varlistentry>
- <term><literal>col1 &mdash; col32</></term>
+ <term><literal>col1 &mdash; col32</literal></term>
<listitem>
<para>
Number of bits generated for each index column. Each parameter's name
refers to the number of the index column that it controls. The default
- is <literal>2</> bits and maximum is <literal>4095</>. Parameters for
+ is <literal>2</literal> bits and maximum is <literal>4095</literal>. Parameters for
index columns not actually used are ignored.
</para>
</listitem>
@@ -87,8 +87,8 @@ CREATE INDEX bloomidx ON tbloom USING bloom (i1,i2,i3)
<para>
The index is created with a signature length of 80 bits, with attributes
i1 and i2 mapped to 2 bits, and attribute i3 mapped to 4 bits. We could
- have omitted the <literal>length</>, <literal>col1</>,
- and <literal>col2</> specifications since those have the default values.
+ have omitted the <literal>length</literal>, <literal>col1</literal>,
+ and <literal>col2</literal> specifications since those have the default values.
</para>
<para>
@@ -175,7 +175,7 @@ CREATE INDEX
Note the relatively large number of false positives: 2439 rows were
selected to be visited in the heap, but none actually matched the
query. We could reduce that by specifying a larger signature length.
- In this example, creating the index with <literal>length=200</>
+ In this example, creating the index with <literal>length=200</literal>
reduced the number of false positives to 55; but it doubled the index size
(to 306 MB) and ended up being slower for this query (125 ms overall).
</para>
@@ -213,7 +213,7 @@ CREATE INDEX
<para>
An operator class for bloom indexes requires only a hash function for the
indexed data type and an equality operator for searching. This example
- shows the operator class definition for the <type>text</> data type:
+ shows the operator class definition for the <type>text</type> data type:
</para>
<programlisting>
@@ -230,7 +230,7 @@ DEFAULT FOR TYPE text USING bloom AS
<itemizedlist>
<listitem>
<para>
- Only operator classes for <type>int4</> and <type>text</> are
+ Only operator classes for <type>int4</type> and <type>text</type> are
included with the module.
</para>
</listitem>
diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml
index 8dcc29925b..91c01700ed 100644
--- a/doc/src/sgml/brin.sgml
+++ b/doc/src/sgml/brin.sgml
@@ -16,7 +16,7 @@
<acronym>BRIN</acronym> is designed for handling very large tables
in which certain columns have some natural correlation with their
physical location within the table.
- A <firstterm>block range</> is a group of pages that are physically
+ A <firstterm>block range</firstterm> is a group of pages that are physically
adjacent in the table; for each block range, some summary info is stored
by the index.
For example, a table storing a store's sale orders might have
@@ -29,7 +29,7 @@
<para>
<acronym>BRIN</acronym> indexes can satisfy queries via regular bitmap
index scans, and will return all tuples in all pages within each range if
- the summary info stored by the index is <firstterm>consistent</> with the
+ the summary info stored by the index is <firstterm>consistent</firstterm> with the
query conditions.
The query executor is in charge of rechecking these tuples and discarding
those that do not match the query conditions &mdash; in other words, these
@@ -51,9 +51,9 @@
<para>
The size of the block range is determined at index creation time by
- the <literal>pages_per_range</> storage parameter. The number of index
+ the <literal>pages_per_range</literal> storage parameter. The number of index
entries will be equal to the size of the relation in pages divided by
- the selected value for <literal>pages_per_range</>. Therefore, the smaller
+ the selected value for <literal>pages_per_range</literal>. Therefore, the smaller
the number, the larger the index becomes (because of the need to
store more index entries), but at the same time the summary data stored can
be more precise and more data blocks can be skipped during an index scan.
@@ -99,9 +99,9 @@
</para>
<para>
- The <firstterm>minmax</>
+ The <firstterm>minmax</firstterm>
operator classes store the minimum and the maximum values appearing
- in the indexed column within the range. The <firstterm>inclusion</>
+ in the indexed column within the range. The <firstterm>inclusion</firstterm>
operator classes store a value which includes the values in the indexed
column within the range.
</para>
@@ -162,21 +162,21 @@
</entry>
</row>
<row>
- <entry><literal>box_inclusion_ops</></entry>
+ <entry><literal>box_inclusion_ops</literal></entry>
<entry><type>box</type></entry>
<entry>
- <literal>&lt;&lt;</>
- <literal>&amp;&lt;</>
- <literal>&amp;&amp;</>
- <literal>&amp;&gt;</>
- <literal>&gt;&gt;</>
- <literal>~=</>
- <literal>@&gt;</>
- <literal>&lt;@</>
- <literal>&amp;&lt;|</>
- <literal>&lt;&lt;|</>
+ <literal>&lt;&lt;</literal>
+ <literal>&amp;&lt;</literal>
+ <literal>&amp;&amp;</literal>
+ <literal>&amp;&gt;</literal>
+ <literal>&gt;&gt;</literal>
+ <literal>~=</literal>
+ <literal>@&gt;</literal>
+ <literal>&lt;@</literal>
+ <literal>&amp;&lt;|</literal>
+ <literal>&lt;&lt;|</literal>
<literal>|&gt;&gt;</literal>
- <literal>|&amp;&gt;</>
+ <literal>|&amp;&gt;</literal>
</entry>
</row>
<row>
@@ -249,11 +249,11 @@
<entry><literal>network_inclusion_ops</literal></entry>
<entry><type>inet</type></entry>
<entry>
- <literal>&amp;&amp;</>
- <literal>&gt;&gt;=</>
+ <literal>&amp;&amp;</literal>
+ <literal>&gt;&gt;=</literal>
<literal>&lt;&lt;=</literal>
<literal>=</literal>
- <literal>&gt;&gt;</>
+ <literal>&gt;&gt;</literal>
<literal>&lt;&lt;</literal>
</entry>
</row>
@@ -346,18 +346,18 @@
</entry>
</row>
<row>
- <entry><literal>range_inclusion_ops</></entry>
+ <entry><literal>range_inclusion_ops</literal></entry>
<entry><type>any range type</type></entry>
<entry>
- <literal>&lt;&lt;</>
- <literal>&amp;&lt;</>
- <literal>&amp;&amp;</>
- <literal>&amp;&gt;</>
- <literal>&gt;&gt;</>
- <literal>@&gt;</>
- <literal>&lt;@</>
- <literal>-|-</>
- <literal>=</>
+ <literal>&lt;&lt;</literal>
+ <literal>&amp;&lt;</literal>
+ <literal>&amp;&amp;</literal>
+ <literal>&amp;&gt;</literal>
+ <literal>&gt;&gt;</literal>
+ <literal>@&gt;</literal>
+ <literal>&lt;@</literal>
+ <literal>-|-</literal>
+ <literal>=</literal>
<literal>&lt;</literal>
<literal>&lt;=</literal>
<literal>=</literal>
@@ -505,11 +505,11 @@
<variablelist>
<varlistentry>
- <term><function>BrinOpcInfo *opcInfo(Oid type_oid)</></term>
+ <term><function>BrinOpcInfo *opcInfo(Oid type_oid)</function></term>
<listitem>
<para>
Returns internal information about the indexed columns' summary data.
- The return value must point to a palloc'd <structname>BrinOpcInfo</>,
+ The return value must point to a palloc'd <structname>BrinOpcInfo</structname>,
which has this definition:
<programlisting>
typedef struct BrinOpcInfo
@@ -524,7 +524,7 @@ typedef struct BrinOpcInfo
TypeCacheEntry *oi_typcache[FLEXIBLE_ARRAY_MEMBER];
} BrinOpcInfo;
</programlisting>
- <structname>BrinOpcInfo</>.<structfield>oi_opaque</> can be used by the
+ <structname>BrinOpcInfo</structname>.<structfield>oi_opaque</structfield> can be used by the
operator class routines to pass information between support procedures
during an index scan.
</para>
@@ -797,8 +797,8 @@ typedef struct BrinOpcInfo
It should accept two arguments with the same data type as the operator class,
and return the union of them. The inclusion operator class can store union
values with different data types if it is defined with the
- <literal>STORAGE</> parameter. The return value of the union
- function should match the <literal>STORAGE</> data type.
+ <literal>STORAGE</literal> parameter. The return value of the union
+ function should match the <literal>STORAGE</literal> data type.
</para>
<para>
@@ -823,11 +823,11 @@ typedef struct BrinOpcInfo
on another operator strategy as shown in
<xref linkend="brin-extensibility-inclusion-table">, or the same
operator strategy as themselves. They require the dependency
- operator to be defined with the <literal>STORAGE</> data type as the
+ operator to be defined with the <literal>STORAGE</literal> data type as the
left-hand-side argument and the other supported data type to be the
right-hand-side argument of the supported operator. See
- <literal>float4_minmax_ops</> as an example of minmax, and
- <literal>box_inclusion_ops</> as an example of inclusion.
+ <literal>float4_minmax_ops</literal> as an example of minmax, and
+ <literal>box_inclusion_ops</literal> as an example of inclusion.
</para>
</sect1>
</chapter>
diff --git a/doc/src/sgml/btree-gin.sgml b/doc/src/sgml/btree-gin.sgml
index 375e7ec4be..e491fa76e7 100644
--- a/doc/src/sgml/btree-gin.sgml
+++ b/doc/src/sgml/btree-gin.sgml
@@ -8,16 +8,16 @@
</indexterm>
<para>
- <filename>btree_gin</> provides sample GIN operator classes that
+ <filename>btree_gin</filename> provides sample GIN operator classes that
implement B-tree equivalent behavior for the data types
- <type>int2</>, <type>int4</>, <type>int8</>, <type>float4</>,
- <type>float8</>, <type>timestamp with time zone</>,
- <type>timestamp without time zone</>, <type>time with time zone</>,
- <type>time without time zone</>, <type>date</>, <type>interval</>,
- <type>oid</>, <type>money</>, <type>"char"</>,
- <type>varchar</>, <type>text</>, <type>bytea</>, <type>bit</>,
- <type>varbit</>, <type>macaddr</>, <type>macaddr8</>, <type>inet</>,
- <type>cidr</>, and all <type>enum</> types.
+ <type>int2</type>, <type>int4</type>, <type>int8</type>, <type>float4</type>,
+ <type>float8</type>, <type>timestamp with time zone</type>,
+ <type>timestamp without time zone</type>, <type>time with time zone</type>,
+ <type>time without time zone</type>, <type>date</type>, <type>interval</type>,
+ <type>oid</type>, <type>money</type>, <type>"char"</type>,
+ <type>varchar</type>, <type>text</type>, <type>bytea</type>, <type>bit</type>,
+ <type>varbit</type>, <type>macaddr</type>, <type>macaddr8</type>, <type>inet</type>,
+ <type>cidr</type>, and all <type>enum</type> types.
</para>
<para>
diff --git a/doc/src/sgml/btree-gist.sgml b/doc/src/sgml/btree-gist.sgml
index f3c639c2f3..dcb939f1fb 100644
--- a/doc/src/sgml/btree-gist.sgml
+++ b/doc/src/sgml/btree-gist.sgml
@@ -8,16 +8,16 @@
</indexterm>
<para>
- <filename>btree_gist</> provides GiST index operator classes that
+ <filename>btree_gist</filename> provides GiST index operator classes that
implement B-tree equivalent behavior for the data types
- <type>int2</>, <type>int4</>, <type>int8</>, <type>float4</>,
- <type>float8</>, <type>numeric</>, <type>timestamp with time zone</>,
- <type>timestamp without time zone</>, <type>time with time zone</>,
- <type>time without time zone</>, <type>date</>, <type>interval</>,
- <type>oid</>, <type>money</>, <type>char</>,
- <type>varchar</>, <type>text</>, <type>bytea</>, <type>bit</>,
- <type>varbit</>, <type>macaddr</>, <type>macaddr8</>, <type>inet</>,
- <type>cidr</>, <type>uuid</>, and all <type>enum</> types.
+ <type>int2</type>, <type>int4</type>, <type>int8</type>, <type>float4</type>,
+ <type>float8</type>, <type>numeric</type>, <type>timestamp with time zone</type>,
+ <type>timestamp without time zone</type>, <type>time with time zone</type>,
+ <type>time without time zone</type>, <type>date</type>, <type>interval</type>,
+ <type>oid</type>, <type>money</type>, <type>char</type>,
+ <type>varchar</type>, <type>text</type>, <type>bytea</type>, <type>bit</type>,
+ <type>varbit</type>, <type>macaddr</type>, <type>macaddr8</type>, <type>inet</type>,
+ <type>cidr</type>, <type>uuid</type>, and all <type>enum</type> types.
</para>
<para>
@@ -33,7 +33,7 @@
</para>
<para>
- In addition to the typical B-tree search operators, <filename>btree_gist</>
+ In addition to the typical B-tree search operators, <filename>btree_gist</filename>
also provides index support for <literal>&lt;&gt;</literal> (<quote>not
equals</quote>). This may be useful in combination with an
<link linkend="SQL-CREATETABLE-EXCLUDE">exclusion constraint</link>,
@@ -42,14 +42,14 @@
<para>
Also, for data types for which there is a natural distance metric,
- <filename>btree_gist</> defines a distance operator <literal>&lt;-&gt;</>,
+ <filename>btree_gist</filename> defines a distance operator <literal>&lt;-&gt;</literal>,
and provides GiST index support for nearest-neighbor searches using
this operator. Distance operators are provided for
- <type>int2</>, <type>int4</>, <type>int8</>, <type>float4</>,
- <type>float8</>, <type>timestamp with time zone</>,
- <type>timestamp without time zone</>,
- <type>time without time zone</>, <type>date</>, <type>interval</>,
- <type>oid</>, and <type>money</>.
+ <type>int2</type>, <type>int4</type>, <type>int8</type>, <type>float4</type>,
+ <type>float8</type>, <type>timestamp with time zone</type>,
+ <type>timestamp without time zone</type>,
+ <type>time without time zone</type>, <type>date</type>, <type>interval</type>,
+ <type>oid</type>, and <type>money</type>.
</para>
<sect2>
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index cfec2465d2..ef60a58631 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -387,7 +387,7 @@
</para>
<table>
- <title><structname>pg_aggregate</> Columns</title>
+ <title><structname>pg_aggregate</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -410,9 +410,9 @@
<entry><type>char</type></entry>
<entry></entry>
<entry>Aggregate kind:
- <literal>n</literal> for <quote>normal</> aggregates,
- <literal>o</literal> for <quote>ordered-set</> aggregates, or
- <literal>h</literal> for <quote>hypothetical-set</> aggregates
+ <literal>n</literal> for <quote>normal</quote> aggregates,
+ <literal>o</literal> for <quote>ordered-set</quote> aggregates, or
+ <literal>h</literal> for <quote>hypothetical-set</quote> aggregates
</entry>
</row>
<row>
@@ -421,7 +421,7 @@
<entry></entry>
<entry>Number of direct (non-aggregated) arguments of an ordered-set or
hypothetical-set aggregate, counting a variadic array as one argument.
- If equal to <structfield>pronargs</>, the aggregate must be variadic
+ If equal to <structfield>pronargs</structfield>, the aggregate must be variadic
and the variadic array describes the aggregated arguments as well as
the final direct arguments.
Always zero for normal aggregates.</entry>
@@ -592,7 +592,7 @@
</para>
<table>
- <title><structname>pg_am</> Columns</title>
+ <title><structname>pg_am</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -644,7 +644,7 @@
<note>
<para>
- Before <productname>PostgreSQL</> 9.6, <structname>pg_am</structname>
+ Before <productname>PostgreSQL</productname> 9.6, <structname>pg_am</structname>
contained many additional columns representing properties of index access
methods. That data is now only directly visible at the C code level.
However, <function>pg_index_column_has_property()</function> and related
@@ -667,8 +667,8 @@
The catalog <structname>pg_amop</structname> stores information about
operators associated with access method operator families. There is one
row for each operator that is a member of an operator family. A family
- member can be either a <firstterm>search</> operator or an
- <firstterm>ordering</> operator. An operator
+ member can be either a <firstterm>search</firstterm> operator or an
+ <firstterm>ordering</firstterm> operator. An operator
can appear in more than one family, but cannot appear in more than one
search position nor more than one ordering position within a family.
(It is allowed, though unlikely, for an operator to be used for both
@@ -676,7 +676,7 @@
</para>
<table>
- <title><structname>pg_amop</> Columns</title>
+ <title><structname>pg_amop</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -728,8 +728,8 @@
<entry><structfield>amoppurpose</structfield></entry>
<entry><type>char</type></entry>
<entry></entry>
- <entry>Operator purpose, either <literal>s</> for search or
- <literal>o</> for ordering</entry>
+ <entry>Operator purpose, either <literal>s</literal> for search or
+ <literal>o</literal> for ordering</entry>
</row>
<row>
@@ -759,26 +759,26 @@
</table>
<para>
- A <quote>search</> operator entry indicates that an index of this operator
+ A <quote>search</quote> operator entry indicates that an index of this operator
family can be searched to find all rows satisfying
- <literal>WHERE</>
- <replaceable>indexed_column</>
- <replaceable>operator</>
- <replaceable>constant</>.
+ <literal>WHERE</literal>
+ <replaceable>indexed_column</replaceable>
+ <replaceable>operator</replaceable>
+ <replaceable>constant</replaceable>.
Obviously, such an operator must return <type>boolean</type>, and its left-hand input
type must match the index's column data type.
</para>
<para>
- An <quote>ordering</> operator entry indicates that an index of this
+ An <quote>ordering</quote> operator entry indicates that an index of this
operator family can be scanned to return rows in the order represented by
- <literal>ORDER BY</>
- <replaceable>indexed_column</>
- <replaceable>operator</>
- <replaceable>constant</>.
+ <literal>ORDER BY</literal>
+ <replaceable>indexed_column</replaceable>
+ <replaceable>operator</replaceable>
+ <replaceable>constant</replaceable>.
Such an operator could return any sortable data type, though again
its left-hand input type must match the index's column data type.
- The exact semantics of the <literal>ORDER BY</> are specified by the
+ The exact semantics of the <literal>ORDER BY</literal> are specified by the
<structfield>amopsortfamily</structfield> column, which must reference
a B-tree operator family for the operator's result type.
</para>
@@ -787,19 +787,19 @@
<para>
At present, it's assumed that the sort order for an ordering operator
is the default for the referenced operator family, i.e., <literal>ASC NULLS
- LAST</>. This might someday be relaxed by adding additional columns
+ LAST</literal>. This might someday be relaxed by adding additional columns
to specify sort options explicitly.
</para>
</note>
<para>
- An entry's <structfield>amopmethod</> must match the
- <structname>opfmethod</> of its containing operator family (including
- <structfield>amopmethod</> here is an intentional denormalization of the
+ An entry's <structfield>amopmethod</structfield> must match the
+ <structname>opfmethod</structname> of its containing operator family (including
+ <structfield>amopmethod</structfield> here is an intentional denormalization of the
catalog structure for performance reasons). Also,
- <structfield>amoplefttype</> and <structfield>amoprighttype</> must match
- the <structfield>oprleft</> and <structfield>oprright</> fields of the
- referenced <structname>pg_operator</> entry.
+ <structfield>amoplefttype</structfield> and <structfield>amoprighttype</structfield> must match
+ the <structfield>oprleft</structfield> and <structfield>oprright</structfield> fields of the
+ referenced <structname>pg_operator</structname> entry.
</para>
</sect1>
@@ -880,14 +880,14 @@
<para>
The usual interpretation of the
- <structfield>amproclefttype</> and <structfield>amprocrighttype</> fields
+ <structfield>amproclefttype</structfield> and <structfield>amprocrighttype</structfield> fields
is that they identify the left and right input types of the operator(s)
that a particular support procedure supports. For some access methods
these match the input data type(s) of the support procedure itself, for
- others not. There is a notion of <quote>default</> support procedures for
- an index, which are those with <structfield>amproclefttype</> and
- <structfield>amprocrighttype</> both equal to the index operator class's
- <structfield>opcintype</>.
+ others not. There is a notion of <quote>default</quote> support procedures for
+ an index, which are those with <structfield>amproclefttype</structfield> and
+ <structfield>amprocrighttype</structfield> both equal to the index operator class's
+ <structfield>opcintype</structfield>.
</para>
</sect1>
@@ -909,7 +909,7 @@
</para>
<table>
- <title><structname>pg_attrdef</> Columns</title>
+ <title><structname>pg_attrdef</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -964,7 +964,7 @@
The <structfield>adsrc</structfield> field is historical, and is best
not used, because it does not track outside changes that might affect
the representation of the default value. Reverse-compiling the
- <structfield>adbin</structfield> field (with <function>pg_get_expr</> for
+ <structfield>adbin</structfield> field (with <function>pg_get_expr</function> for
example) is a better way to display the default value.
</para>
@@ -993,7 +993,7 @@
</para>
<table>
- <title><structname>pg_attribute</> Columns</title>
+ <title><structname>pg_attribute</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -1072,7 +1072,7 @@
<entry>
Number of dimensions, if the column is an array type; otherwise 0.
(Presently, the number of dimensions of an array is not enforced,
- so any nonzero value effectively means <quote>it's an array</>.)
+ so any nonzero value effectively means <quote>it's an array</quote>.)
</entry>
</row>
@@ -1096,7 +1096,7 @@
supplied at table creation time (for example, the maximum
length of a <type>varchar</type> column). It is passed to
type-specific input functions and length coercion functions.
- The value will generally be -1 for types that do not need <structfield>atttypmod</>.
+ The value will generally be -1 for types that do not need <structfield>atttypmod</structfield>.
</entry>
</row>
@@ -1105,7 +1105,7 @@
<entry><type>bool</type></entry>
<entry></entry>
<entry>
- A copy of <literal>pg_type.typbyval</> of this column's type
+ A copy of <literal>pg_type.typbyval</literal> of this column's type
</entry>
</row>
@@ -1114,7 +1114,7 @@
<entry><type>char</type></entry>
<entry></entry>
<entry>
- Normally a copy of <literal>pg_type.typstorage</> of this
+ Normally a copy of <literal>pg_type.typstorage</literal> of this
column's type. For TOAST-able data types, this can be altered
after column creation to control storage policy.
</entry>
@@ -1125,7 +1125,7 @@
<entry><type>char</type></entry>
<entry></entry>
<entry>
- A copy of <literal>pg_type.typalign</> of this column's type
+ A copy of <literal>pg_type.typalign</literal> of this column's type
</entry>
</row>
@@ -1216,7 +1216,7 @@
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- Attribute-level options, as <quote>keyword=value</> strings
+ Attribute-level options, as <quote>keyword=value</quote> strings
</entry>
</row>
@@ -1225,7 +1225,7 @@
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- Attribute-level foreign data wrapper options, as <quote>keyword=value</> strings
+ Attribute-level foreign data wrapper options, as <quote>keyword=value</quote> strings
</entry>
</row>
@@ -1237,9 +1237,9 @@
In a dropped column's <structname>pg_attribute</structname> entry,
<structfield>atttypid</structfield> is reset to zero, but
<structfield>attlen</structfield> and the other fields copied from
- <structname>pg_type</> are still valid. This arrangement is needed
+ <structname>pg_type</structname> are still valid. This arrangement is needed
to cope with the situation where the dropped column's data type was
- later dropped, and so there is no <structname>pg_type</> row anymore.
+ later dropped, and so there is no <structname>pg_type</structname> row anymore.
<structfield>attlen</structfield> and the other fields can be used
to interpret the contents of a row of the table.
</para>
@@ -1256,9 +1256,9 @@
<para>
The catalog <structname>pg_authid</structname> contains information about
database authorization identifiers (roles). A role subsumes the concepts
- of <quote>users</> and <quote>groups</>. A user is essentially just a
- role with the <structfield>rolcanlogin</> flag set. Any role (with or
- without <structfield>rolcanlogin</>) can have other roles as members; see
+ of <quote>users</quote> and <quote>groups</quote>. A user is essentially just a
+ role with the <structfield>rolcanlogin</structfield> flag set. Any role (with or
+ without <structfield>rolcanlogin</structfield>) can have other roles as members; see
<link linkend="catalog-pg-auth-members"><structname>pg_auth_members</structname></link>.
</para>
@@ -1283,7 +1283,7 @@
</para>
<table>
- <title><structname>pg_authid</> Columns</title>
+ <title><structname>pg_authid</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -1390,20 +1390,20 @@
<para>
For an MD5 encrypted password, <structfield>rolpassword</structfield>
- column will begin with the string <literal>md5</> followed by a
+ column will begin with the string <literal>md5</literal> followed by a
32-character hexadecimal MD5 hash. The MD5 hash will be of the user's
password concatenated to their user name. For example, if user
- <literal>joe</> has password <literal>xyzzy</>, <productname>PostgreSQL</>
- will store the md5 hash of <literal>xyzzyjoe</>.
+ <literal>joe</literal> has password <literal>xyzzy</literal>, <productname>PostgreSQL</productname>
+ will store the md5 hash of <literal>xyzzyjoe</literal>.
</para>
<para>
If the password is encrypted with SCRAM-SHA-256, it has the format:
<synopsis>
-SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;</>$<replaceable>&lt;StoredKey&gt;</>:<replaceable>&lt;ServerKey&gt;</>
+SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&lt;salt&gt;</replaceable>$<replaceable>&lt;StoredKey&gt;</replaceable>:<replaceable>&lt;ServerKey&gt;</replaceable>
</synopsis>
- where <replaceable>salt</>, <replaceable>StoredKey</> and
- <replaceable>ServerKey</> are in Base64 encoded format. This format is
+ where <replaceable>salt</replaceable>, <replaceable>StoredKey</replaceable> and
+ <replaceable>ServerKey</replaceable> are in Base64 encoded format. This format is
the same as that specified by RFC 5803.
</para>
@@ -1435,7 +1435,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_auth_members</> Columns</title>
+ <title><structname>pg_auth_members</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -1459,7 +1459,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>member</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-authid"><structname>pg_authid</structname></link>.oid</literal></entry>
- <entry>ID of a role that is a member of <structfield>roleid</></entry>
+ <entry>ID of a role that is a member of <structfield>roleid</structfield></entry>
</row>
<row>
@@ -1473,8 +1473,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>admin_option</structfield></entry>
<entry><type>bool</type></entry>
<entry></entry>
- <entry>True if <structfield>member</> can grant membership in
- <structfield>roleid</> to others</entry>
+ <entry>True if <structfield>member</structfield> can grant membership in
+ <structfield>roleid</structfield> to others</entry>
</row>
</tbody>
</tgroup>
@@ -1501,14 +1501,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
cannot be deduced from some generic rule. For example, casting between a
domain and its base type is not explicitly represented in
<structname>pg_cast</structname>. Another important exception is that
- <quote>automatic I/O conversion casts</>, those performed using a data
- type's own I/O functions to convert to or from <type>text</> or other
+ <quote>automatic I/O conversion casts</quote>, those performed using a data
+ type's own I/O functions to convert to or from <type>text</type> or other
string types, are not explicitly represented in
<structname>pg_cast</structname>.
</para>
<table>
- <title><structname>pg_cast</> Columns</title>
+ <title><structname>pg_cast</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -1558,11 +1558,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
Indicates what contexts the cast can be invoked in.
- <literal>e</> means only as an explicit cast (using
- <literal>CAST</> or <literal>::</> syntax).
- <literal>a</> means implicitly in assignment
+ <literal>e</literal> means only as an explicit cast (using
+ <literal>CAST</literal> or <literal>::</literal> syntax).
+ <literal>a</literal> means implicitly in assignment
to a target column, as well as explicitly.
- <literal>i</> means implicitly in expressions, as well as the
+ <literal>i</literal> means implicitly in expressions, as well as the
other cases.
</entry>
</row>
@@ -1572,9 +1572,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
Indicates how the cast is performed.
- <literal>f</> means that the function specified in the <structfield>castfunc</> field is used.
- <literal>i</> means that the input/output functions are used.
- <literal>b</> means that the types are binary-coercible, thus no conversion is required.
+ <literal>f</literal> means that the function specified in the <structfield>castfunc</structfield> field is used.
+ <literal>i</literal> means that the input/output functions are used.
+ <literal>b</literal> means that the types are binary-coercible, thus no conversion is required.
</entry>
</row>
</tbody>
@@ -1586,18 +1586,18 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
always take the cast source type as their first argument type, and
return the cast destination type as their result type. A cast
function can have up to three arguments. The second argument,
- if present, must be type <type>integer</>; it receives the type
+ if present, must be type <type>integer</type>; it receives the type
modifier associated with the destination type, or -1
if there is none. The third argument,
- if present, must be type <type>boolean</>; it receives <literal>true</>
- if the cast is an explicit cast, <literal>false</> otherwise.
+ if present, must be type <type>boolean</type>; it receives <literal>true</literal>
+ if the cast is an explicit cast, <literal>false</literal> otherwise.
</para>
<para>
It is legitimate to create a <structname>pg_cast</structname> entry
in which the source and target types are the same, if the associated
function takes more than one argument. Such entries represent
- <quote>length coercion functions</> that coerce values of the type
+ <quote>length coercion functions</quote> that coerce values of the type
to be legal for a particular type modifier value.
</para>
@@ -1624,14 +1624,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
table. This includes indexes (but see also
<structname>pg_index</structname>), sequences (but see also
<structname>pg_sequence</structname>), views, materialized
- views, composite types, and TOAST tables; see <structfield>relkind</>.
+ views, composite types, and TOAST tables; see <structfield>relkind</structfield>.
Below, when we mean all of these
kinds of objects we speak of <quote>relations</quote>. Not all
columns are meaningful for all relation types.
</para>
<table>
- <title><structname>pg_class</> Columns</title>
+ <title><structname>pg_class</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -1673,7 +1673,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><literal><link linkend="catalog-pg-type"><structname>pg_type</structname></link>.oid</literal></entry>
<entry>
The OID of the data type that corresponds to this table's row type,
- if any (zero for indexes, which have no <structname>pg_type</> entry)
+ if any (zero for indexes, which have no <structname>pg_type</structname> entry)
</entry>
</row>
@@ -1706,7 +1706,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>oid</type></entry>
<entry></entry>
<entry>Name of the on-disk file of this relation; zero means this
- is a <quote>mapped</> relation whose disk file name is determined
+ is a <quote>mapped</quote> relation whose disk file name is determined
by low-level state</entry>
</row>
@@ -1795,8 +1795,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>
- <literal>p</> = permanent table, <literal>u</> = unlogged table,
- <literal>t</> = temporary table
+ <literal>p</literal> = permanent table, <literal>u</literal> = unlogged table,
+ <literal>t</literal> = temporary table
</entry>
</row>
@@ -1805,15 +1805,15 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>
- <literal>r</> = ordinary table,
- <literal>i</> = index,
- <literal>S</> = sequence,
- <literal>t</> = TOAST table,
- <literal>v</> = view,
- <literal>m</> = materialized view,
- <literal>c</> = composite type,
- <literal>f</> = foreign table,
- <literal>p</> = partitioned table
+ <literal>r</literal> = ordinary table,
+ <literal>i</literal> = index,
+ <literal>S</literal> = sequence,
+ <literal>t</literal> = TOAST table,
+ <literal>v</literal> = view,
+ <literal>m</literal> = materialized view,
+ <literal>c</literal> = composite type,
+ <literal>f</literal> = foreign table,
+ <literal>p</literal> = partitioned table
</entry>
</row>
@@ -1834,7 +1834,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>int2</type></entry>
<entry></entry>
<entry>
- Number of <literal>CHECK</> constraints on the table; see
+ Number of <literal>CHECK</literal> constraints on the table; see
<link linkend="catalog-pg-constraint"><structname>pg_constraint</structname></link> catalog
</entry>
</row>
@@ -1917,11 +1917,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>
- Columns used to form <quote>replica identity</> for rows:
- <literal>d</> = default (primary key, if any),
- <literal>n</> = nothing,
- <literal>f</> = all columns
- <literal>i</> = index with <structfield>indisreplident</structfield> set, or default
+ Columns used to form <quote>replica identity</quote> for rows:
+ <literal>d</literal> = default (primary key, if any),
+ <literal>n</literal> = nothing,
+ <literal>f</literal> = all columns
+ <literal>i</literal> = index with <structfield>indisreplident</structfield> set, or default
</entry>
</row>
@@ -1938,9 +1938,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
All transaction IDs before this one have been replaced with a permanent
- (<quote>frozen</>) transaction ID in this table. This is used to track
+ (<quote>frozen</quote>) transaction ID in this table. This is used to track
whether the table needs to be vacuumed in order to prevent transaction
- ID wraparound or to allow <literal>pg_xact</> to be shrunk. Zero
+ ID wraparound or to allow <literal>pg_xact</literal> to be shrunk. Zero
(<symbol>InvalidTransactionId</symbol>) if the relation is not a table.
</entry>
</row>
@@ -1953,7 +1953,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
All multixact IDs before this one have been replaced by a
transaction ID in this table. This is used to track
whether the table needs to be vacuumed in order to prevent multixact ID
- wraparound or to allow <literal>pg_multixact</> to be shrunk. Zero
+ wraparound or to allow <literal>pg_multixact</literal> to be shrunk. Zero
(<symbol>InvalidMultiXactId</symbol>) if the relation is not a table.
</entry>
</row>
@@ -1975,7 +1975,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- Access-method-specific options, as <quote>keyword=value</> strings
+ Access-method-specific options, as <quote>keyword=value</quote> strings
</entry>
</row>
@@ -1993,13 +1993,13 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</table>
<para>
- Several of the Boolean flags in <structname>pg_class</> are maintained
+ Several of the Boolean flags in <structname>pg_class</structname> are maintained
lazily: they are guaranteed to be true if that's the correct state, but
may not be reset to false immediately when the condition is no longer
- true. For example, <structfield>relhasindex</> is set by
+ true. For example, <structfield>relhasindex</structfield> is set by
<command>CREATE INDEX</command>, but it is never cleared by
<command>DROP INDEX</command>. Instead, <command>VACUUM</command> clears
- <structfield>relhasindex</> if it finds the table has no indexes. This
+ <structfield>relhasindex</structfield> if it finds the table has no indexes. This
arrangement avoids race conditions and improves concurrency.
</para>
</sect1>
@@ -2019,7 +2019,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_collation</> Columns</title>
+ <title><structname>pg_collation</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -2082,14 +2082,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>collcollate</structfield></entry>
<entry><type>name</type></entry>
<entry></entry>
- <entry><symbol>LC_COLLATE</> for this collation object</entry>
+ <entry><symbol>LC_COLLATE</symbol> for this collation object</entry>
</row>
<row>
<entry><structfield>collctype</structfield></entry>
<entry><type>name</type></entry>
<entry></entry>
- <entry><symbol>LC_CTYPE</> for this collation object</entry>
+ <entry><symbol>LC_CTYPE</symbol> for this collation object</entry>
</row>
<row>
@@ -2107,27 +2107,27 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</table>
<para>
- Note that the unique key on this catalog is (<structfield>collname</>,
- <structfield>collencoding</>, <structfield>collnamespace</>) not just
- (<structfield>collname</>, <structfield>collnamespace</>).
+ Note that the unique key on this catalog is (<structfield>collname</structfield>,
+ <structfield>collencoding</structfield>, <structfield>collnamespace</structfield>) not just
+ (<structfield>collname</structfield>, <structfield>collnamespace</structfield>).
<productname>PostgreSQL</productname> generally ignores all
- collations that do not have <structfield>collencoding</> equal to
+ collations that do not have <structfield>collencoding</structfield> equal to
either the current database's encoding or -1, and creation of new entries
- with the same name as an entry with <structfield>collencoding</> = -1
+ with the same name as an entry with <structfield>collencoding</structfield> = -1
is forbidden. Therefore it is sufficient to use a qualified SQL name
- (<replaceable>schema</>.<replaceable>name</>) to identify a collation,
+ (<replaceable>schema</replaceable>.<replaceable>name</replaceable>) to identify a collation,
even though this is not unique according to the catalog definition.
The reason for defining the catalog this way is that
- <application>initdb</> fills it in at cluster initialization time with
+ <application>initdb</application> fills it in at cluster initialization time with
entries for all locales available on the system, so it must be able to
hold entries for all encodings that might ever be used in the cluster.
</para>
<para>
- In the <literal>template0</> database, it could be useful to create
+ In the <literal>template0</literal> database, it could be useful to create
collations whose encoding does not match the database encoding,
since they could match the encodings of databases later cloned from
- <literal>template0</>. This would currently have to be done manually.
+ <literal>template0</literal>. This would currently have to be done manually.
</para>
</sect1>
@@ -2143,13 +2143,13 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
key, unique, foreign key, and exclusion constraints on tables.
(Column constraints are not treated specially. Every column constraint is
equivalent to some table constraint.)
- Not-null constraints are represented in the <structname>pg_attribute</>
+ Not-null constraints are represented in the <structname>pg_attribute</structname>
catalog, not here.
</para>
<para>
User-defined constraint triggers (created with <command>CREATE CONSTRAINT
- TRIGGER</>) also give rise to an entry in this table.
+ TRIGGER</command>) also give rise to an entry in this table.
</para>
<para>
@@ -2157,7 +2157,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_constraint</> Columns</title>
+ <title><structname>pg_constraint</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -2198,12 +2198,12 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>
- <literal>c</> = check constraint,
- <literal>f</> = foreign key constraint,
- <literal>p</> = primary key constraint,
- <literal>u</> = unique constraint,
- <literal>t</> = constraint trigger,
- <literal>x</> = exclusion constraint
+ <literal>c</literal> = check constraint,
+ <literal>f</literal> = foreign key constraint,
+ <literal>p</literal> = primary key constraint,
+ <literal>u</literal> = unique constraint,
+ <literal>t</literal> = constraint trigger,
+ <literal>x</literal> = exclusion constraint
</entry>
</row>
@@ -2263,11 +2263,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>Foreign key update action code:
- <literal>a</> = no action,
- <literal>r</> = restrict,
- <literal>c</> = cascade,
- <literal>n</> = set null,
- <literal>d</> = set default
+ <literal>a</literal> = no action,
+ <literal>r</literal> = restrict,
+ <literal>c</literal> = cascade,
+ <literal>n</literal> = set null,
+ <literal>d</literal> = set default
</entry>
</row>
@@ -2276,11 +2276,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>Foreign key deletion action code:
- <literal>a</> = no action,
- <literal>r</> = restrict,
- <literal>c</> = cascade,
- <literal>n</> = set null,
- <literal>d</> = set default
+ <literal>a</literal> = no action,
+ <literal>r</literal> = restrict,
+ <literal>c</literal> = cascade,
+ <literal>n</literal> = set null,
+ <literal>d</literal> = set default
</entry>
</row>
@@ -2289,9 +2289,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>Foreign key match type:
- <literal>f</> = full,
- <literal>p</> = partial,
- <literal>s</> = simple
+ <literal>f</literal> = full,
+ <literal>p</literal> = partial,
+ <literal>s</literal> = simple
</entry>
</row>
@@ -2329,7 +2329,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<row>
<entry><structfield>conkey</structfield></entry>
<entry><type>int2[]</type></entry>
- <entry><literal><link linkend="catalog-pg-attribute"><structname>pg_attribute</structname></link>.attnum</></entry>
+ <entry><literal><link linkend="catalog-pg-attribute"><structname>pg_attribute</structname></link>.attnum</literal></entry>
<entry>If a table constraint (including foreign keys, but not constraint
triggers), list of the constrained columns</entry>
</row>
@@ -2337,35 +2337,35 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<row>
<entry><structfield>confkey</structfield></entry>
<entry><type>int2[]</type></entry>
- <entry><literal><link linkend="catalog-pg-attribute"><structname>pg_attribute</structname></link>.attnum</></entry>
+ <entry><literal><link linkend="catalog-pg-attribute"><structname>pg_attribute</structname></link>.attnum</literal></entry>
<entry>If a foreign key, list of the referenced columns</entry>
</row>
<row>
<entry><structfield>conpfeqop</structfield></entry>
<entry><type>oid[]</type></entry>
- <entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</></entry>
+ <entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</literal></entry>
<entry>If a foreign key, list of the equality operators for PK = FK comparisons</entry>
</row>
<row>
<entry><structfield>conppeqop</structfield></entry>
<entry><type>oid[]</type></entry>
- <entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</></entry>
+ <entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</literal></entry>
<entry>If a foreign key, list of the equality operators for PK = PK comparisons</entry>
</row>
<row>
<entry><structfield>conffeqop</structfield></entry>
<entry><type>oid[]</type></entry>
- <entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</></entry>
+ <entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</literal></entry>
<entry>If a foreign key, list of the equality operators for FK = FK comparisons</entry>
</row>
<row>
<entry><structfield>conexclop</structfield></entry>
<entry><type>oid[]</type></entry>
- <entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</></entry>
+ <entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</literal></entry>
<entry>If an exclusion constraint, list of the per-column exclusion operators</entry>
</row>
@@ -2392,7 +2392,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
For other cases, a zero appears in <structfield>conkey</structfield>
and the associated index must be consulted to discover the expression
that is constrained. (<structfield>conkey</structfield> thus has the
- same contents as <structname>pg_index</>.<structfield>indkey</> for the
+ same contents as <structname>pg_index</structname>.<structfield>indkey</structfield> for the
index.)
</para>
@@ -2400,7 +2400,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
<structfield>consrc</structfield> is not updated when referenced objects
change; for example, it won't track renaming of columns. Rather than
- relying on this field, it's best to use <function>pg_get_constraintdef()</>
+ relying on this field, it's best to use <function>pg_get_constraintdef()</function>
to extract the definition of a check constraint.
</para>
</note>
@@ -2429,7 +2429,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_conversion</> Columns</title>
+ <title><structname>pg_conversion</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -2529,7 +2529,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_database</> Columns</title>
+ <title><structname>pg_database</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -2592,7 +2592,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
If true, then this database can be cloned by
- any user with <literal>CREATEDB</> privileges;
+ any user with <literal>CREATEDB</literal> privileges;
if false, then only superusers or the owner of
the database can clone it.
</entry>
@@ -2604,7 +2604,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
If false then no one can connect to this database. This is
- used to protect the <literal>template0</> database from being altered.
+ used to protect the <literal>template0</literal> database from being altered.
</entry>
</row>
@@ -2634,11 +2634,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
All transaction IDs before this one have been replaced with a permanent
- (<quote>frozen</>) transaction ID in this database. This is used to
+ (<quote>frozen</quote>) transaction ID in this database. This is used to
track whether the database needs to be vacuumed in order to prevent
- transaction ID wraparound or to allow <literal>pg_xact</> to be shrunk.
+ transaction ID wraparound or to allow <literal>pg_xact</literal> to be shrunk.
It is the minimum of the per-table
- <structname>pg_class</>.<structfield>relfrozenxid</> values.
+ <structname>pg_class</structname>.<structfield>relfrozenxid</structfield> values.
</entry>
</row>
@@ -2650,9 +2650,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
All multixact IDs before this one have been replaced with a
transaction ID in this database. This is used to
track whether the database needs to be vacuumed in order to prevent
- multixact ID wraparound or to allow <literal>pg_multixact</> to be shrunk.
+ multixact ID wraparound or to allow <literal>pg_multixact</literal> to be shrunk.
It is the minimum of the per-table
- <structname>pg_class</>.<structfield>relminmxid</> values.
+ <structname>pg_class</structname>.<structfield>relminmxid</structfield> values.
</entry>
</row>
@@ -2663,7 +2663,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry>
The default tablespace for the database.
Within this database, all tables for which
- <structname>pg_class</>.<structfield>reltablespace</> is zero
+ <structname>pg_class</structname>.<structfield>reltablespace</structfield> is zero
will be stored in this tablespace; in particular, all the non-shared
system catalogs will be there.
</entry>
@@ -2707,7 +2707,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_db_role_setting</> Columns</title>
+ <title><structname>pg_db_role_setting</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -2754,12 +2754,12 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</indexterm>
<para>
- The catalog <structname>pg_default_acl</> stores initial
+ The catalog <structname>pg_default_acl</structname> stores initial
privileges to be assigned to newly created objects.
</para>
<table>
- <title><structname>pg_default_acl</> Columns</title>
+ <title><structname>pg_default_acl</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -2800,10 +2800,10 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
Type of object this entry is for:
- <literal>r</> = relation (table, view),
- <literal>S</> = sequence,
- <literal>f</> = function,
- <literal>T</> = type
+ <literal>r</literal> = relation (table, view),
+ <literal>S</literal> = sequence,
+ <literal>f</literal> = function,
+ <literal>T</literal> = type
</entry>
</row>
@@ -2820,21 +2820,21 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</table>
<para>
- A <structname>pg_default_acl</> entry shows the initial privileges to
+ A <structname>pg_default_acl</structname> entry shows the initial privileges to
be assigned to an object belonging to the indicated user. There are
- currently two types of entry: <quote>global</> entries with
- <structfield>defaclnamespace</> = 0, and <quote>per-schema</> entries
+ currently two types of entry: <quote>global</quote> entries with
+ <structfield>defaclnamespace</structfield> = 0, and <quote>per-schema</quote> entries
that reference a particular schema. If a global entry is present then
- it <emphasis>overrides</> the normal hard-wired default privileges
+ it <emphasis>overrides</emphasis> the normal hard-wired default privileges
for the object type. A per-schema entry, if present, represents privileges
- to be <emphasis>added to</> the global or hard-wired default privileges.
+ to be <emphasis>added to</emphasis> the global or hard-wired default privileges.
</para>
<para>
Note that when an ACL entry in another catalog is null, it is taken
to represent the hard-wired default privileges for its object,
- <emphasis>not</> whatever might be in <structname>pg_default_acl</>
- at the moment. <structname>pg_default_acl</> is only consulted during
+ <emphasis>not</emphasis> whatever might be in <structname>pg_default_acl</structname>
+ at the moment. <structname>pg_default_acl</structname> is only consulted during
object creation.
</para>
@@ -2851,9 +2851,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
The catalog <structname>pg_depend</structname> records the dependency
relationships between database objects. This information allows
- <command>DROP</> commands to find which other objects must be dropped
- by <command>DROP CASCADE</> or prevent dropping in the <command>DROP
- RESTRICT</> case.
+ <command>DROP</command> commands to find which other objects must be dropped
+ by <command>DROP CASCADE</command> or prevent dropping in the <command>DROP
+ RESTRICT</command> case.
</para>
<para>
@@ -2863,7 +2863,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_depend</> Columns</title>
+ <title><structname>pg_depend</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -2896,7 +2896,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
For a table column, this is the column number (the
- <structfield>objid</> and <structfield>classid</> refer to the
+ <structfield>objid</structfield> and <structfield>classid</structfield> refer to the
table itself). For all other object types, this column is
zero.
</entry>
@@ -2922,7 +2922,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
For a table column, this is the column number (the
- <structfield>refobjid</> and <structfield>refclassid</> refer
+ <structfield>refobjid</structfield> and <structfield>refclassid</structfield> refer
to the table itself). For all other object types, this column
is zero.
</entry>
@@ -2945,17 +2945,17 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
In all cases, a <structname>pg_depend</structname> entry indicates that the
referenced object cannot be dropped without also dropping the dependent
object. However, there are several subflavors identified by
- <structfield>deptype</>:
+ <structfield>deptype</structfield>:
<variablelist>
<varlistentry>
- <term><symbol>DEPENDENCY_NORMAL</> (<literal>n</>)</term>
+ <term><symbol>DEPENDENCY_NORMAL</symbol> (<literal>n</literal>)</term>
<listitem>
<para>
A normal relationship between separately-created objects. The
dependent object can be dropped without affecting the
referenced object. The referenced object can only be dropped
- by specifying <literal>CASCADE</>, in which case the dependent
+ by specifying <literal>CASCADE</literal>, in which case the dependent
object is dropped, too. Example: a table column has a normal
dependency on its data type.
</para>
@@ -2963,12 +2963,12 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</varlistentry>
<varlistentry>
- <term><symbol>DEPENDENCY_AUTO</> (<literal>a</>)</term>
+ <term><symbol>DEPENDENCY_AUTO</symbol> (<literal>a</literal>)</term>
<listitem>
<para>
The dependent object can be dropped separately from the
referenced object, and should be automatically dropped
- (regardless of <literal>RESTRICT</> or <literal>CASCADE</>
+ (regardless of <literal>RESTRICT</literal> or <literal>CASCADE</literal>
mode) if the referenced object is dropped. Example: a named
constraint on a table is made autodependent on the table, so
that it will go away if the table is dropped.
@@ -2977,41 +2977,41 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</varlistentry>
<varlistentry>
- <term><symbol>DEPENDENCY_INTERNAL</> (<literal>i</>)</term>
+ <term><symbol>DEPENDENCY_INTERNAL</symbol> (<literal>i</literal>)</term>
<listitem>
<para>
The dependent object was created as part of creation of the
referenced object, and is really just a part of its internal
- implementation. A <command>DROP</> of the dependent object
+ implementation. A <command>DROP</command> of the dependent object
will be disallowed outright (we'll tell the user to issue a
- <command>DROP</> against the referenced object, instead). A
- <command>DROP</> of the referenced object will be propagated
+ <command>DROP</command> against the referenced object, instead). A
+ <command>DROP</command> of the referenced object will be propagated
through to drop the dependent object whether
- <command>CASCADE</> is specified or not. Example: a trigger
+ <command>CASCADE</command> is specified or not. Example: a trigger
that's created to enforce a foreign-key constraint is made
internally dependent on the constraint's
- <structname>pg_constraint</> entry.
+ <structname>pg_constraint</structname> entry.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><symbol>DEPENDENCY_EXTENSION</> (<literal>e</>)</term>
+ <term><symbol>DEPENDENCY_EXTENSION</symbol> (<literal>e</literal>)</term>
<listitem>
<para>
- The dependent object is a member of the <firstterm>extension</> that is
+ The dependent object is a member of the <firstterm>extension</firstterm> that is
the referenced object (see
<link linkend="catalog-pg-extension"><structname>pg_extension</structname></link>).
The dependent object can be dropped only via
- <command>DROP EXTENSION</> on the referenced object. Functionally
+ <command>DROP EXTENSION</command> on the referenced object. Functionally
this dependency type acts the same as an internal dependency, but
- it's kept separate for clarity and to simplify <application>pg_dump</>.
+ it's kept separate for clarity and to simplify <application>pg_dump</application>.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><symbol>DEPENDENCY_AUTO_EXTENSION</> (<literal>x</>)</term>
+ <term><symbol>DEPENDENCY_AUTO_EXTENSION</symbol> (<literal>x</literal>)</term>
<listitem>
<para>
The dependent object is not a member of the extension that is the
@@ -3024,7 +3024,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</varlistentry>
<varlistentry>
- <term><symbol>DEPENDENCY_PIN</> (<literal>p</>)</term>
+ <term><symbol>DEPENDENCY_PIN</symbol> (<literal>p</literal>)</term>
<listitem>
<para>
There is no dependent object; this type of entry is a signal
@@ -3051,7 +3051,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</indexterm>
<para>
- The catalog <structname>pg_description</> stores optional descriptions
+ The catalog <structname>pg_description</structname> stores optional descriptions
(comments) for each database object. Descriptions can be manipulated
with the <xref linkend="sql-comment"> command and viewed with
<application>psql</application>'s <literal>\d</literal> commands.
@@ -3066,7 +3066,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_description</> Columns</title>
+ <title><structname>pg_description</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3099,7 +3099,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
For a comment on a table column, this is the column number (the
- <structfield>objoid</> and <structfield>classoid</> refer to
+ <structfield>objoid</structfield> and <structfield>classoid</structfield> refer to
the table itself). For all other object types, this column is
zero.
</entry>
@@ -3133,7 +3133,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_enum</> Columns</title>
+ <title><structname>pg_enum</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3157,7 +3157,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>enumtypid</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-type"><structname>pg_type</structname></link>.oid</literal></entry>
- <entry>The OID of the <structname>pg_type</> entry owning this enum value</entry>
+ <entry>The OID of the <structname>pg_type</structname> entry owning this enum value</entry>
</row>
<row>
@@ -3191,7 +3191,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
When an enum type is created, its members are assigned sort-order
- positions 1..<replaceable>n</>. But members added later might be given
+ positions 1..<replaceable>n</replaceable>. But members added later might be given
negative or fractional values of <structfield>enumsortorder</structfield>.
The only requirement on these values is that they be correctly
ordered and unique within each enum type.
@@ -3212,7 +3212,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_event_trigger</> Columns</title>
+ <title><structname>pg_event_trigger</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3260,10 +3260,10 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry>
Controls in which <xref linkend="guc-session-replication-role"> modes
the event trigger fires.
- <literal>O</> = trigger fires in <quote>origin</> and <quote>local</> modes,
- <literal>D</> = trigger is disabled,
- <literal>R</> = trigger fires in <quote>replica</> mode,
- <literal>A</> = trigger fires always.
+ <literal>O</literal> = trigger fires in <quote>origin</quote> and <quote>local</quote> modes,
+ <literal>D</literal> = trigger is disabled,
+ <literal>R</literal> = trigger fires in <quote>replica</quote> mode,
+ <literal>A</literal> = trigger fires always.
</entry>
</row>
@@ -3296,7 +3296,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_extension</> Columns</title>
+ <title><structname>pg_extension</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3355,16 +3355,16 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>extconfig</structfield></entry>
<entry><type>oid[]</type></entry>
<entry><literal><link linkend="catalog-pg-class"><structname>pg_class</structname></link>.oid</literal></entry>
- <entry>Array of <type>regclass</> OIDs for the extension's configuration
- table(s), or <literal>NULL</> if none</entry>
+ <entry>Array of <type>regclass</type> OIDs for the extension's configuration
+ table(s), or <literal>NULL</literal> if none</entry>
</row>
<row>
<entry><structfield>extcondition</structfield></entry>
<entry><type>text[]</type></entry>
<entry></entry>
- <entry>Array of <literal>WHERE</>-clause filter conditions for the
- extension's configuration table(s), or <literal>NULL</> if none</entry>
+ <entry>Array of <literal>WHERE</literal>-clause filter conditions for the
+ extension's configuration table(s), or <literal>NULL</literal> if none</entry>
</row>
</tbody>
@@ -3372,7 +3372,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</table>
<para>
- Note that unlike most catalogs with a <quote>namespace</> column,
+ Note that unlike most catalogs with a <quote>namespace</quote> column,
<structfield>extnamespace</structfield> is not meant to imply
that the extension belongs to that schema. Extension names are never
schema-qualified. Rather, <structfield>extnamespace</structfield>
@@ -3399,7 +3399,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_foreign_data_wrapper</> Columns</title>
+ <title><structname>pg_foreign_data_wrapper</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3474,7 +3474,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- Foreign-data wrapper specific options, as <quote>keyword=value</> strings
+ Foreign-data wrapper specific options, as <quote>keyword=value</quote> strings
</entry>
</row>
</tbody>
@@ -3498,7 +3498,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_foreign_server</> Columns</title>
+ <title><structname>pg_foreign_server</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3570,7 +3570,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- Foreign server specific options, as <quote>keyword=value</> strings
+ Foreign server specific options, as <quote>keyword=value</quote> strings
</entry>
</row>
</tbody>
@@ -3596,7 +3596,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_foreign_table</> Columns</title>
+ <title><structname>pg_foreign_table</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3613,7 +3613,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>ftrelid</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-class"><structname>pg_class</structname></link>.oid</literal></entry>
- <entry>OID of the <structname>pg_class</> entry for this foreign table</entry>
+ <entry>OID of the <structname>pg_class</structname> entry for this foreign table</entry>
</row>
<row>
@@ -3628,7 +3628,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- Foreign table options, as <quote>keyword=value</> strings
+ Foreign table options, as <quote>keyword=value</quote> strings
</entry>
</row>
</tbody>
@@ -3651,7 +3651,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_index</> Columns</title>
+ <title><structname>pg_index</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3668,14 +3668,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>indexrelid</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-class"><structname>pg_class</structname></link>.oid</literal></entry>
- <entry>The OID of the <structname>pg_class</> entry for this index</entry>
+ <entry>The OID of the <structname>pg_class</structname> entry for this index</entry>
</row>
<row>
<entry><structfield>indrelid</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-class"><structname>pg_class</structname></link>.oid</literal></entry>
- <entry>The OID of the <structname>pg_class</> entry for the table this index is for</entry>
+ <entry>The OID of the <structname>pg_class</structname> entry for the table this index is for</entry>
</row>
<row>
@@ -3698,7 +3698,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>bool</type></entry>
<entry></entry>
<entry>If true, this index represents the primary key of the table
- (<structfield>indisunique</> should always be true when this is true)</entry>
+ (<structfield>indisunique</structfield> should always be true when this is true)</entry>
</row>
<row>
@@ -3714,7 +3714,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>If true, the uniqueness check is enforced immediately on
insertion
- (irrelevant if <structfield>indisunique</> is not true)</entry>
+ (irrelevant if <structfield>indisunique</structfield> is not true)</entry>
</row>
<row>
@@ -3731,7 +3731,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry>
If true, the index is currently valid for queries. False means the
index is possibly incomplete: it must still be modified by
- <command>INSERT</>/<command>UPDATE</> operations, but it cannot safely
+ <command>INSERT</command>/<command>UPDATE</command> operations, but it cannot safely
be used for queries. If it is unique, the uniqueness property is not
guaranteed true either.
</entry>
@@ -3742,8 +3742,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>bool</type></entry>
<entry></entry>
<entry>
- If true, queries must not use the index until the <structfield>xmin</>
- of this <structname>pg_index</> row is below their <symbol>TransactionXmin</symbol>
+ If true, queries must not use the index until the <structfield>xmin</structfield>
+ of this <structname>pg_index</structname> row is below their <symbol>TransactionXmin</symbol>
event horizon, because the table may contain broken HOT chains with
incompatible rows that they can see
</entry>
@@ -3755,7 +3755,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
If true, the index is currently ready for inserts. False means the
- index must be ignored by <command>INSERT</>/<command>UPDATE</>
+ index must be ignored by <command>INSERT</command>/<command>UPDATE</command>
operations.
</entry>
</row>
@@ -3775,9 +3775,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>bool</type></entry>
<entry></entry>
<entry>
- If true this index has been chosen as <quote>replica identity</>
+ If true this index has been chosen as <quote>replica identity</quote>
using <command>ALTER TABLE ... REPLICA IDENTITY USING INDEX
- ...</>
+ ...</command>
</entry>
</row>
@@ -3836,7 +3836,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
Expression trees (in <function>nodeToString()</function>
representation) for index attributes that are not simple column
references. This is a list with one element for each zero
- entry in <structfield>indkey</>. Null if all index attributes
+ entry in <structfield>indkey</structfield>. Null if all index attributes
are simple references.
</entry>
</row>
@@ -3866,14 +3866,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</indexterm>
<para>
- The catalog <structname>pg_inherits</> records information about
+ The catalog <structname>pg_inherits</structname> records information about
table inheritance hierarchies. There is one entry for each direct
parent-child table relationship in the database. (Indirect inheritance can be determined
by following chains of entries.)
</para>
<table>
- <title><structname>pg_inherits</> Columns</title>
+ <title><structname>pg_inherits</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3928,7 +3928,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</indexterm>
<para>
- The catalog <structname>pg_init_privs</> records information about
+ The catalog <structname>pg_init_privs</structname> records information about
the initial privileges of objects in the system. There is one entry
for each object in the database which has a non-default (non-NULL)
initial set of privileges.
@@ -3936,7 +3936,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
Objects can have initial privileges either by having those privileges set
- when the system is initialized (by <application>initdb</>) or when the
+ when the system is initialized (by <application>initdb</application>) or when the
object is created during a <command>CREATE EXTENSION</command> and the
extension script sets initial privileges using the <command>GRANT</command>
system. Note that the system will automatically handle recording of the
@@ -3944,12 +3944,12 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
only use the <command>GRANT</command> and <command>REVOKE</command>
statements in their script to have the privileges recorded. The
<literal>privtype</literal> column indicates if the initial privilege was
- set by <application>initdb</> or during a
+ set by <application>initdb</application> or during a
<command>CREATE EXTENSION</command> command.
</para>
<para>
- Objects which have initial privileges set by <application>initdb</> will
+ Objects which have initial privileges set by <application>initdb</application> will
have entries where <literal>privtype</literal> is
<literal>'i'</literal>, while objects which have initial privileges set
by <command>CREATE EXTENSION</command> will have entries where
@@ -3957,7 +3957,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_init_privs</> Columns</title>
+ <title><structname>pg_init_privs</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -3990,7 +3990,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
For a table column, this is the column number (the
- <structfield>objoid</> and <structfield>classoid</> refer to the
+ <structfield>objoid</structfield> and <structfield>classoid</structfield> refer to the
table itself). For all other object types, this column is
zero.
</entry>
@@ -4039,7 +4039,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_language</> Columns</title>
+ <title><structname>pg_language</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -4116,7 +4116,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
<entry>
This references a function that is responsible for executing
- <quote>inline</> anonymous code blocks
+ <quote>inline</quote> anonymous code blocks
(<xref linkend="sql-do"> blocks).
Zero if inline blocks are not supported.
</entry>
@@ -4162,24 +4162,24 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
The catalog <structname>pg_largeobject</structname> holds the data making up
<quote>large objects</quote>. A large object is identified by an OID
assigned when it is created. Each large object is broken into
- segments or <quote>pages</> small enough to be conveniently stored as rows
+ segments or <quote>pages</quote> small enough to be conveniently stored as rows
in <structname>pg_largeobject</structname>.
- The amount of data per page is defined to be <symbol>LOBLKSIZE</> (which is currently
- <literal>BLCKSZ/4</>, or typically 2 kB).
+ The amount of data per page is defined to be <symbol>LOBLKSIZE</symbol> (which is currently
+ <literal>BLCKSZ/4</literal>, or typically 2 kB).
</para>
<para>
- Prior to <productname>PostgreSQL</> 9.0, there was no permission structure
+ Prior to <productname>PostgreSQL</productname> 9.0, there was no permission structure
associated with large objects. As a result,
<structname>pg_largeobject</structname> was publicly readable and could be
used to obtain the OIDs (and contents) of all large objects in the system.
This is no longer the case; use
- <link linkend="catalog-pg-largeobject-metadata"><structname>pg_largeobject_metadata</></link>
+ <link linkend="catalog-pg-largeobject-metadata"><structname>pg_largeobject_metadata</structname></link>
to obtain a list of large object OIDs.
</para>
<table>
- <title><structname>pg_largeobject</> Columns</title>
+ <title><structname>pg_largeobject</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -4213,7 +4213,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
Actual data stored in the large object.
- This will never be more than <symbol>LOBLKSIZE</> bytes and might be less.
+ This will never be more than <symbol>LOBLKSIZE</symbol> bytes and might be less.
</entry>
</row>
</tbody>
@@ -4223,9 +4223,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
Each row of <structname>pg_largeobject</structname> holds data
for one page of a large object, beginning at
- byte offset (<literal>pageno * LOBLKSIZE</>) within the object. The implementation
+ byte offset (<literal>pageno * LOBLKSIZE</literal>) within the object. The implementation
allows sparse storage: pages might be missing, and might be shorter than
- <literal>LOBLKSIZE</> bytes even if they are not the last page of the object.
+ <literal>LOBLKSIZE</literal> bytes even if they are not the last page of the object.
Missing regions within a large object read as zeroes.
</para>
@@ -4242,11 +4242,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
The catalog <structname>pg_largeobject_metadata</structname>
holds metadata associated with large objects. The actual large object
data is stored in
- <link linkend="catalog-pg-largeobject"><structname>pg_largeobject</></link>.
+ <link linkend="catalog-pg-largeobject"><structname>pg_largeobject</structname></link>.
</para>
<table>
- <title><structname>pg_largeobject_metadata</> Columns</title>
+ <title><structname>pg_largeobject_metadata</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -4299,14 +4299,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</indexterm>
<para>
- The catalog <structname>pg_namespace</> stores namespaces.
+ The catalog <structname>pg_namespace</structname> stores namespaces.
A namespace is the structure underlying SQL schemas: each namespace
can have a separate collection of relations, types, etc. without name
conflicts.
</para>
<table>
- <title><structname>pg_namespace</> Columns</title>
+ <title><structname>pg_namespace</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -4381,7 +4381,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_opclass</> Columns</title>
+ <title><structname>pg_opclass</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -4447,14 +4447,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>opcdefault</structfield></entry>
<entry><type>bool</type></entry>
<entry></entry>
- <entry>True if this operator class is the default for <structfield>opcintype</></entry>
+ <entry>True if this operator class is the default for <structfield>opcintype</structfield></entry>
</row>
<row>
<entry><structfield>opckeytype</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-type"><structname>pg_type</structname></link>.oid</literal></entry>
- <entry>Type of data stored in index, or zero if same as <structfield>opcintype</></entry>
+ <entry>Type of data stored in index, or zero if same as <structfield>opcintype</structfield></entry>
</row>
</tbody>
@@ -4462,11 +4462,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</table>
<para>
- An operator class's <structfield>opcmethod</> must match the
- <structname>opfmethod</> of its containing operator family.
+ An operator class's <structfield>opcmethod</structfield> must match the
+ <structname>opfmethod</structname> of its containing operator family.
Also, there must be no more than one <structname>pg_opclass</structname>
- row having <structname>opcdefault</> true for any given combination of
- <structname>opcmethod</> and <structname>opcintype</>.
+ row having <structname>opcdefault</structname> true for any given combination of
+ <structname>opcmethod</structname> and <structname>opcintype</structname>.
</para>
</sect1>
@@ -4480,13 +4480,13 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</indexterm>
<para>
- The catalog <structname>pg_operator</> stores information about operators.
+ The catalog <structname>pg_operator</structname> stores information about operators.
See <xref linkend="sql-createoperator">
and <xref linkend="xoper"> for more information.
</para>
<table>
- <title><structname>pg_operator</> Columns</title>
+ <title><structname>pg_operator</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -4534,8 +4534,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>
- <literal>b</> = infix (<quote>both</quote>), <literal>l</> = prefix
- (<quote>left</quote>), <literal>r</> = postfix (<quote>right</quote>)
+ <literal>b</literal> = infix (<quote>both</quote>), <literal>l</literal> = prefix
+ (<quote>left</quote>), <literal>r</literal> = postfix (<quote>right</quote>)
</entry>
</row>
@@ -4632,7 +4632,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
Each operator family is a collection of operators and associated
support routines that implement the semantics specified for a particular
index access method. Furthermore, the operators in a family are all
- <quote>compatible</>, in a way that is specified by the access method.
+ <quote>compatible</quote>, in a way that is specified by the access method.
The operator family concept allows cross-data-type operators to be used
with indexes and to be reasoned about using knowledge of access method
semantics.
@@ -4643,7 +4643,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_opfamily</> Columns</title>
+ <title><structname>pg_opfamily</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -4720,7 +4720,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_partitioned_table</> Columns</title>
+ <title><structname>pg_partitioned_table</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -4738,7 +4738,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>partrelid</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-class"><structname>pg_class</structname></link>.oid</literal></entry>
- <entry>The OID of the <structname>pg_class</> entry for this partitioned table</entry>
+ <entry>The OID of the <structname>pg_class</structname> entry for this partitioned table</entry>
</row>
<row>
@@ -4746,8 +4746,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>
- Partitioning strategy; <literal>l</> = list partitioned table,
- <literal>r</> = range partitioned table
+ Partitioning strategy; <literal>l</literal> = list partitioned table,
+ <literal>r</literal> = range partitioned table
</entry>
</row>
@@ -4763,7 +4763,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-class"><structname>pg_class</structname></link>.oid</literal></entry>
<entry>
- The OID of the <structname>pg_class</> entry for the default partition
+ The OID of the <structname>pg_class</structname> entry for the default partition
of this partitioned table, or zero if this partitioned table does not
have a default partition.
</entry>
@@ -4813,7 +4813,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
Expression trees (in <function>nodeToString()</function>
representation) for partition key columns that are not simple column
references. This is a list with one element for each zero
- entry in <structfield>partattrs</>. Null if all partition key columns
+ entry in <structfield>partattrs</structfield>. Null if all partition key columns
are simple references.
</entry>
</row>
@@ -4833,9 +4833,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
The catalog <structname>pg_pltemplate</structname> stores
- <quote>template</> information for procedural languages.
+ <quote>template</quote> information for procedural languages.
A template for a language allows the language to be created in a
- particular database by a simple <command>CREATE LANGUAGE</> command,
+ particular database by a simple <command>CREATE LANGUAGE</command> command,
with no need to specify implementation details.
</para>
@@ -4848,7 +4848,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_pltemplate</> Columns</title>
+ <title><structname>pg_pltemplate</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -4921,7 +4921,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<note>
<para>
- It is likely that <structname>pg_pltemplate</> will be removed in some
+ It is likely that <structname>pg_pltemplate</structname> will be removed in some
future release of <productname>PostgreSQL</productname>, in favor of
keeping this knowledge about procedural languages in their respective
extension installation scripts.
@@ -4944,7 +4944,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
command that it applies to (possibly all commands), the roles that it
applies to, the expression to be added as a security-barrier
qualification to queries that include the table, and the expression
- to be added as a <literal>WITH CHECK</> option for queries that attempt to
+ to be added as a <literal>WITH CHECK</literal> option for queries that attempt to
add new records to the table.
</para>
@@ -4982,11 +4982,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>The command type to which the policy is applied:
- <literal>r</> for <command>SELECT</>,
- <literal>a</> for <command>INSERT</>,
- <literal>w</> for <command>UPDATE</>,
- <literal>d</> for <command>DELETE</>,
- or <literal>*</> for all</entry>
+ <literal>r</literal> for <command>SELECT</command>,
+ <literal>a</literal> for <command>INSERT</command>,
+ <literal>w</literal> for <command>UPDATE</command>,
+ <literal>d</literal> for <command>DELETE</command>,
+ or <literal>*</literal> for all</entry>
</row>
<row>
@@ -5023,8 +5023,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<note>
<para>
- Policies stored in <structname>pg_policy</> are applied only when
- <structname>pg_class</>.<structfield>relrowsecurity</> is set for
+ Policies stored in <structname>pg_policy</structname> are applied only when
+ <structname>pg_class</structname>.<structfield>relrowsecurity</structfield> is set for
their table.
</para>
</note>
@@ -5039,7 +5039,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</indexterm>
<para>
- The catalog <structname>pg_proc</> stores information about functions (or procedures).
+ The catalog <structname>pg_proc</structname> stores information about functions (or procedures).
See <xref linkend="sql-createfunction">
and <xref linkend="xfunc"> for more information.
</para>
@@ -5051,7 +5051,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_proc</> Columns</title>
+ <title><structname>pg_proc</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -5106,7 +5106,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>float4</type></entry>
<entry></entry>
<entry>Estimated execution cost (in units of
- <xref linkend="guc-cpu-operator-cost">); if <structfield>proretset</>,
+ <xref linkend="guc-cpu-operator-cost">); if <structfield>proretset</structfield>,
this is cost per row returned</entry>
</row>
@@ -5114,7 +5114,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>prorows</structfield></entry>
<entry><type>float4</type></entry>
<entry></entry>
- <entry>Estimated number of result rows (zero if not <structfield>proretset</>)</entry>
+ <entry>Estimated number of result rows (zero if not <structfield>proretset</structfield>)</entry>
</row>
<row>
@@ -5151,7 +5151,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>prosecdef</structfield></entry>
<entry><type>bool</type></entry>
<entry></entry>
- <entry>Function is a security definer (i.e., a <quote>setuid</>
+ <entry>Function is a security definer (i.e., a <quote>setuid</quote>
function)</entry>
</row>
@@ -5195,11 +5195,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<structfield>provolatile</structfield> tells whether the function's
result depends only on its input arguments, or is affected by outside
factors.
- It is <literal>i</literal> for <quote>immutable</> functions,
+ It is <literal>i</literal> for <quote>immutable</quote> functions,
which always deliver the same result for the same inputs.
- It is <literal>s</literal> for <quote>stable</> functions,
+ It is <literal>s</literal> for <quote>stable</quote> functions,
whose results (for fixed inputs) do not change within a scan.
- It is <literal>v</literal> for <quote>volatile</> functions,
+ It is <literal>v</literal> for <quote>volatile</quote> functions,
whose results might change at any time. (Use <literal>v</literal> also
for functions with side-effects, so that calls to them cannot get
optimized away.)
@@ -5251,7 +5251,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry>
An array with the data types of the function arguments. This includes
only input arguments (including <literal>INOUT</literal> and
- <literal>VARIADIC</> arguments), and thus represents
+ <literal>VARIADIC</literal> arguments), and thus represents
the call signature of the function.
</entry>
</row>
@@ -5266,7 +5266,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<literal>INOUT</literal> arguments); however, if all the
arguments are <literal>IN</literal> arguments, this field will be null.
Note that subscripting is 1-based, whereas for historical reasons
- <structfield>proargtypes</> is subscripted from 0.
+ <structfield>proargtypes</structfield> is subscripted from 0.
</entry>
</row>
@@ -5276,15 +5276,15 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
An array with the modes of the function arguments, encoded as
- <literal>i</literal> for <literal>IN</> arguments,
- <literal>o</literal> for <literal>OUT</> arguments,
- <literal>b</literal> for <literal>INOUT</> arguments,
- <literal>v</literal> for <literal>VARIADIC</> arguments,
- <literal>t</literal> for <literal>TABLE</> arguments.
+ <literal>i</literal> for <literal>IN</literal> arguments,
+ <literal>o</literal> for <literal>OUT</literal> arguments,
+ <literal>b</literal> for <literal>INOUT</literal> arguments,
+ <literal>v</literal> for <literal>VARIADIC</literal> arguments,
+ <literal>t</literal> for <literal>TABLE</literal> arguments.
If all the arguments are <literal>IN</literal> arguments,
this field will be null.
Note that subscripts correspond to positions of
- <structfield>proallargtypes</> not <structfield>proargtypes</>.
+ <structfield>proallargtypes</structfield> not <structfield>proargtypes</structfield>.
</entry>
</row>
@@ -5297,7 +5297,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
Arguments without a name are set to empty strings in the array.
If none of the arguments have a name, this field will be null.
Note that subscripts correspond to positions of
- <structfield>proallargtypes</> not <structfield>proargtypes</>.
+ <structfield>proallargtypes</structfield> not <structfield>proargtypes</structfield>.
</entry>
</row>
@@ -5308,9 +5308,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry>
Expression trees (in <function>nodeToString()</function> representation)
for default values. This is a list with
- <structfield>pronargdefaults</> elements, corresponding to the last
- <replaceable>N</> <emphasis>input</> arguments (i.e., the last
- <replaceable>N</> <structfield>proargtypes</> positions).
+ <structfield>pronargdefaults</structfield> elements, corresponding to the last
+ <replaceable>N</replaceable> <emphasis>input</emphasis> arguments (i.e., the last
+ <replaceable>N</replaceable> <structfield>proargtypes</structfield> positions).
If none of the arguments have defaults, this field will be null.
</entry>
</row>
@@ -5525,7 +5525,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_range</> Columns</title>
+ <title><structname>pg_range</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -5586,10 +5586,10 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</table>
<para>
- <structfield>rngsubopc</> (plus <structfield>rngcollation</>, if the
+ <structfield>rngsubopc</structfield> (plus <structfield>rngcollation</structfield>, if the
element type is collatable) determines the sort ordering used by the range
- type. <structfield>rngcanonical</> is used when the element type is
- discrete. <structfield>rngsubdiff</> is optional but should be supplied to
+ type. <structfield>rngcanonical</structfield> is used when the element type is
+ discrete. <structfield>rngsubdiff</structfield> is optional but should be supplied to
improve performance of GiST indexes on the range type.
</para>
@@ -5655,7 +5655,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_rewrite</> Columns</title>
+ <title><structname>pg_rewrite</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -5694,9 +5694,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>char</type></entry>
<entry></entry>
<entry>
- Event type that the rule is for: 1 = <command>SELECT</>, 2 =
- <command>UPDATE</>, 3 = <command>INSERT</>, 4 =
- <command>DELETE</>
+ Event type that the rule is for: 1 = <command>SELECT</command>, 2 =
+ <command>UPDATE</command>, 3 = <command>INSERT</command>, 4 =
+ <command>DELETE</command>
</entry>
</row>
@@ -5707,10 +5707,10 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry>
Controls in which <xref linkend="guc-session-replication-role"> modes
the rule fires.
- <literal>O</> = rule fires in <quote>origin</> and <quote>local</> modes,
- <literal>D</> = rule is disabled,
- <literal>R</> = rule fires in <quote>replica</> mode,
- <literal>A</> = rule fires always.
+ <literal>O</literal> = rule fires in <quote>origin</quote> and <quote>local</quote> modes,
+ <literal>D</literal> = rule is disabled,
+ <literal>R</literal> = rule fires in <quote>replica</quote> mode,
+ <literal>A</literal> = rule fires always.
</entry>
</row>
@@ -5809,7 +5809,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
For a security label on a table column, this is the column number (the
- <structfield>objoid</> and <structfield>classoid</> refer to
+ <structfield>objoid</structfield> and <structfield>classoid</structfield> refer to
the table itself). For all other object types, this column is
zero.
</entry>
@@ -5847,7 +5847,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_sequence</> Columns</title>
+ <title><structname>pg_sequence</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -5864,7 +5864,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>seqrelid</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-class"><structname>pg_class</structname></link>.oid</literal></entry>
- <entry>The OID of the <structname>pg_class</> entry for this sequence</entry>
+ <entry>The OID of the <structname>pg_class</structname> entry for this sequence</entry>
</row>
<row>
@@ -5949,7 +5949,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_shdepend</> Columns</title>
+ <title><structname>pg_shdepend</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -5990,7 +5990,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
For a table column, this is the column number (the
- <structfield>objid</> and <structfield>classid</> refer to the
+ <structfield>objid</structfield> and <structfield>classid</structfield> refer to the
table itself). For all other object types, this column is zero.
</entry>
</row>
@@ -6027,11 +6027,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
In all cases, a <structname>pg_shdepend</structname> entry indicates that
the referenced object cannot be dropped without also dropping the dependent
object. However, there are several subflavors identified by
- <structfield>deptype</>:
+ <structfield>deptype</structfield>:
<variablelist>
<varlistentry>
- <term><symbol>SHARED_DEPENDENCY_OWNER</> (<literal>o</>)</term>
+ <term><symbol>SHARED_DEPENDENCY_OWNER</symbol> (<literal>o</literal>)</term>
<listitem>
<para>
The referenced object (which must be a role) is the owner of the
@@ -6041,20 +6041,20 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</varlistentry>
<varlistentry>
- <term><symbol>SHARED_DEPENDENCY_ACL</> (<literal>a</>)</term>
+ <term><symbol>SHARED_DEPENDENCY_ACL</symbol> (<literal>a</literal>)</term>
<listitem>
<para>
The referenced object (which must be a role) is mentioned in the
ACL (access control list, i.e., privileges list) of the
- dependent object. (A <symbol>SHARED_DEPENDENCY_ACL</> entry is
+ dependent object. (A <symbol>SHARED_DEPENDENCY_ACL</symbol> entry is
not made for the owner of the object, since the owner will have
- a <symbol>SHARED_DEPENDENCY_OWNER</> entry anyway.)
+ a <symbol>SHARED_DEPENDENCY_OWNER</symbol> entry anyway.)
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><symbol>SHARED_DEPENDENCY_POLICY</> (<literal>r</>)</term>
+ <term><symbol>SHARED_DEPENDENCY_POLICY</symbol> (<literal>r</literal>)</term>
<listitem>
<para>
The referenced object (which must be a role) is mentioned as the
@@ -6064,7 +6064,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</varlistentry>
<varlistentry>
- <term><symbol>SHARED_DEPENDENCY_PIN</> (<literal>p</>)</term>
+ <term><symbol>SHARED_DEPENDENCY_PIN</symbol> (<literal>p</literal>)</term>
<listitem>
<para>
There is no dependent object; this type of entry is a signal
@@ -6111,7 +6111,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_shdescription</> Columns</title>
+ <title><structname>pg_shdescription</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -6235,16 +6235,16 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<para>
- Normally there is one entry, with <structfield>stainherit</> =
- <literal>false</>, for each table column that has been analyzed.
+ Normally there is one entry, with <structfield>stainherit</structfield> =
+ <literal>false</literal>, for each table column that has been analyzed.
If the table has inheritance children, a second entry with
- <structfield>stainherit</> = <literal>true</> is also created. This row
+ <structfield>stainherit</structfield> = <literal>true</literal> is also created. This row
represents the column's statistics over the inheritance tree, i.e.,
statistics for the data you'd see with
- <literal>SELECT <replaceable>column</> FROM <replaceable>table</>*</literal>,
- whereas the <structfield>stainherit</> = <literal>false</> row represents
+ <literal>SELECT <replaceable>column</replaceable> FROM <replaceable>table</replaceable>*</literal>,
+ whereas the <structfield>stainherit</structfield> = <literal>false</literal> row represents
the results of
- <literal>SELECT <replaceable>column</> FROM ONLY <replaceable>table</></literal>.
+ <literal>SELECT <replaceable>column</replaceable> FROM ONLY <replaceable>table</replaceable></literal>.
</para>
<para>
@@ -6254,7 +6254,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
references the index. No entry is made for an ordinary non-expression
index column, however, since it would be redundant with the entry
for the underlying table column. Currently, entries for index expressions
- always have <structfield>stainherit</> = <literal>false</>.
+ always have <structfield>stainherit</structfield> = <literal>false</literal>.
</para>
<para>
@@ -6281,7 +6281,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_statistic</> Columns</title>
+ <title><structname>pg_statistic</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -6339,56 +6339,56 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
A value less than zero is the negative of a multiplier for the number
of rows in the table; for example, a column in which about 80% of the
values are nonnull and each nonnull value appears about twice on
- average could be represented by <structfield>stadistinct</> = -0.4.
+ average could be represented by <structfield>stadistinct</structfield> = -0.4.
A zero value means the number of distinct values is unknown.
</entry>
</row>
<row>
- <entry><structfield>stakind<replaceable>N</></structfield></entry>
+ <entry><structfield>stakind<replaceable>N</replaceable></structfield></entry>
<entry><type>int2</type></entry>
<entry></entry>
<entry>
A code number indicating the kind of statistics stored in the
- <replaceable>N</>th <quote>slot</quote> of the
+ <replaceable>N</replaceable>th <quote>slot</quote> of the
<structname>pg_statistic</structname> row.
</entry>
</row>
<row>
- <entry><structfield>staop<replaceable>N</></structfield></entry>
+ <entry><structfield>staop<replaceable>N</replaceable></structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-operator"><structname>pg_operator</structname></link>.oid</literal></entry>
<entry>
An operator used to derive the statistics stored in the
- <replaceable>N</>th <quote>slot</quote>. For example, a
+ <replaceable>N</replaceable>th <quote>slot</quote>. For example, a
histogram slot would show the <literal>&lt;</literal> operator
that defines the sort order of the data.
</entry>
</row>
<row>
- <entry><structfield>stanumbers<replaceable>N</></structfield></entry>
+ <entry><structfield>stanumbers<replaceable>N</replaceable></structfield></entry>
<entry><type>float4[]</type></entry>
<entry></entry>
<entry>
Numerical statistics of the appropriate kind for the
- <replaceable>N</>th <quote>slot</quote>, or null if the slot
+ <replaceable>N</replaceable>th <quote>slot</quote>, or null if the slot
kind does not involve numerical values
</entry>
</row>
<row>
- <entry><structfield>stavalues<replaceable>N</></structfield></entry>
+ <entry><structfield>stavalues<replaceable>N</replaceable></structfield></entry>
<entry><type>anyarray</type></entry>
<entry></entry>
<entry>
Column data values of the appropriate kind for the
- <replaceable>N</>th <quote>slot</quote>, or null if the slot
+ <replaceable>N</replaceable>th <quote>slot</quote>, or null if the slot
kind does not store any data values. Each array's element
values are actually of the specific column's data type, or a related
type such as an array's element type, so there is no way to define
- these columns' type more specifically than <type>anyarray</>.
+ these columns' type more specifically than <type>anyarray</type>.
</entry>
</row>
</tbody>
@@ -6407,12 +6407,12 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
The catalog <structname>pg_statistic_ext</structname>
holds extended planner statistics.
- Each row in this catalog corresponds to a <firstterm>statistics object</>
+ Each row in this catalog corresponds to a <firstterm>statistics object</firstterm>
created with <xref linkend="sql-createstatistics">.
</para>
<table>
- <title><structname>pg_statistic_ext</> Columns</title>
+ <title><structname>pg_statistic_ext</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -6485,7 +6485,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>pg_ndistinct</type></entry>
<entry></entry>
<entry>
- N-distinct counts, serialized as <structname>pg_ndistinct</> type
+ N-distinct counts, serialized as <structname>pg_ndistinct</structname> type
</entry>
</row>
@@ -6495,7 +6495,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
Functional dependency statistics, serialized
- as <structname>pg_dependencies</> type
+ as <structname>pg_dependencies</structname> type
</entry>
</row>
@@ -6507,7 +6507,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
The <structfield>stxkind</structfield> field is filled at creation of the
statistics object, indicating which statistic type(s) are desired.
The fields after it are initially NULL and are filled only when the
- corresponding statistic has been computed by <command>ANALYZE</>.
+ corresponding statistic has been computed by <command>ANALYZE</command>.
</para>
</sect1>
@@ -6677,10 +6677,10 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
State code:
- <literal>i</> = initialize,
- <literal>d</> = data is being copied,
- <literal>s</> = synchronized,
- <literal>r</> = ready (normal replication)
+ <literal>i</literal> = initialize,
+ <literal>d</literal> = data is being copied,
+ <literal>s</literal> = synchronized,
+ <literal>r</literal> = ready (normal replication)
</entry>
</row>
@@ -6689,7 +6689,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>pg_lsn</type></entry>
<entry></entry>
<entry>
- End LSN for <literal>s</> and <literal>r</> states.
+ End LSN for <literal>s</literal> and <literal>r</literal> states.
</entry>
</row>
</tbody>
@@ -6718,7 +6718,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_tablespace</> Columns</title>
+ <title><structname>pg_tablespace</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -6769,7 +6769,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- Tablespace-level options, as <quote>keyword=value</> strings
+ Tablespace-level options, as <quote>keyword=value</quote> strings
</entry>
</row>
</tbody>
@@ -6792,7 +6792,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_transform</> Columns</title>
+ <title><structname>pg_transform</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -6861,7 +6861,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_trigger</> Columns</title>
+ <title><structname>pg_trigger</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -6916,10 +6916,10 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry>
Controls in which <xref linkend="guc-session-replication-role"> modes
the trigger fires.
- <literal>O</> = trigger fires in <quote>origin</> and <quote>local</> modes,
- <literal>D</> = trigger is disabled,
- <literal>R</> = trigger fires in <quote>replica</> mode,
- <literal>A</> = trigger fires always.
+ <literal>O</literal> = trigger fires in <quote>origin</quote> and <quote>local</quote> modes,
+ <literal>D</literal> = trigger is disabled,
+ <literal>R</literal> = trigger fires in <quote>replica</quote> mode,
+ <literal>A</literal> = trigger fires always.
</entry>
</row>
@@ -6928,7 +6928,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>bool</type></entry>
<entry></entry>
<entry>True if trigger is internally generated (usually, to enforce
- the constraint identified by <structfield>tgconstraint</>)</entry>
+ the constraint identified by <structfield>tgconstraint</structfield>)</entry>
</row>
<row>
@@ -6950,7 +6950,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>tgconstraint</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-constraint"><structname>pg_constraint</structname></link>.oid</literal></entry>
- <entry>The <structname>pg_constraint</> entry associated with the trigger, if any</entry>
+ <entry>The <structname>pg_constraint</structname> entry associated with the trigger, if any</entry>
</row>
<row>
@@ -6994,7 +6994,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>pg_node_tree</type></entry>
<entry></entry>
<entry>Expression tree (in <function>nodeToString()</function>
- representation) for the trigger's <literal>WHEN</> condition, or null
+ representation) for the trigger's <literal>WHEN</literal> condition, or null
if none</entry>
</row>
@@ -7002,7 +7002,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>tgoldtable</structfield></entry>
<entry><type>name</type></entry>
<entry></entry>
- <entry><literal>REFERENCING</> clause name for <literal>OLD TABLE</>,
+ <entry><literal>REFERENCING</literal> clause name for <literal>OLD TABLE</literal>,
or null if none</entry>
</row>
@@ -7010,7 +7010,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>tgnewtable</structfield></entry>
<entry><type>name</type></entry>
<entry></entry>
- <entry><literal>REFERENCING</> clause name for <literal>NEW TABLE</>,
+ <entry><literal>REFERENCING</literal> clause name for <literal>NEW TABLE</literal>,
or null if none</entry>
</row>
</tbody>
@@ -7019,18 +7019,18 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
Currently, column-specific triggering is supported only for
- <literal>UPDATE</> events, and so <structfield>tgattr</> is relevant
+ <literal>UPDATE</literal> events, and so <structfield>tgattr</structfield> is relevant
only for that event type. <structfield>tgtype</structfield> might
contain bits for other event types as well, but those are presumed
- to be table-wide regardless of what is in <structfield>tgattr</>.
+ to be table-wide regardless of what is in <structfield>tgattr</structfield>.
</para>
<note>
<para>
- When <structfield>tgconstraint</> is nonzero,
- <structfield>tgconstrrelid</>, <structfield>tgconstrindid</>,
- <structfield>tgdeferrable</>, and <structfield>tginitdeferred</> are
- largely redundant with the referenced <structname>pg_constraint</> entry.
+ When <structfield>tgconstraint</structfield> is nonzero,
+ <structfield>tgconstrrelid</structfield>, <structfield>tgconstrindid</structfield>,
+ <structfield>tgdeferrable</structfield>, and <structfield>tginitdeferred</structfield> are
+ largely redundant with the referenced <structname>pg_constraint</structname> entry.
However, it is possible for a non-deferrable trigger to be associated
with a deferrable constraint: foreign key constraints can have some
deferrable and some non-deferrable triggers.
@@ -7070,7 +7070,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_ts_config</> Columns</title>
+ <title><structname>pg_ts_config</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -7145,7 +7145,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_ts_config_map</> Columns</title>
+ <title><structname>pg_ts_config_map</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -7162,7 +7162,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>mapcfg</structfield></entry>
<entry><type>oid</type></entry>
<entry><literal><link linkend="catalog-pg-ts-config"><structname>pg_ts_config</structname></link>.oid</literal></entry>
- <entry>The OID of the <structname>pg_ts_config</> entry owning this map entry</entry>
+ <entry>The OID of the <structname>pg_ts_config</structname> entry owning this map entry</entry>
</row>
<row>
@@ -7177,7 +7177,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>integer</type></entry>
<entry></entry>
<entry>Order in which to consult this entry (lower
- <structfield>mapseqno</>s first)</entry>
+ <structfield>mapseqno</structfield>s first)</entry>
</row>
<row>
@@ -7206,7 +7206,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
needed; the dictionary itself provides values for the user-settable
parameters supported by the template. This division of labor allows
dictionaries to be created by unprivileged users. The parameters
- are specified by a text string <structfield>dictinitoption</>,
+ are specified by a text string <structfield>dictinitoption</structfield>,
whose format and meaning vary depending on the template.
</para>
@@ -7216,7 +7216,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_ts_dict</> Columns</title>
+ <title><structname>pg_ts_dict</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -7299,7 +7299,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_ts_parser</> Columns</title>
+ <title><structname>pg_ts_parser</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -7396,7 +7396,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_ts_template</> Columns</title>
+ <title><structname>pg_ts_template</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -7470,7 +7470,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_type</> Columns</title>
+ <title><structname>pg_type</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -7521,7 +7521,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
For a fixed-size type, <structfield>typlen</structfield> is the number
of bytes in the internal representation of the type. But for a
variable-length type, <structfield>typlen</structfield> is negative.
- -1 indicates a <quote>varlena</> type (one that has a length word),
+ -1 indicates a <quote>varlena</quote> type (one that has a length word),
-2 indicates a null-terminated C string.
</entry>
</row>
@@ -7566,7 +7566,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry>
<structfield>typcategory</structfield> is an arbitrary classification
of data types that is used by the parser to determine which implicit
- casts should be <quote>preferred</>.
+ casts should be <quote>preferred</quote>.
See <xref linkend="catalog-typcategory-table">.
</entry>
</row>
@@ -7711,7 +7711,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<structfield>typalign</structfield> is the alignment required
when storing a value of this type. It applies to storage on
disk as well as most representations of the value inside
- <productname>PostgreSQL</>.
+ <productname>PostgreSQL</productname>.
When multiple values are stored consecutively, such
as in the representation of a complete row on disk, padding is
inserted before a datum of this type so that it begins on the
@@ -7723,16 +7723,16 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
Possible values are:
<itemizedlist>
<listitem>
- <para><literal>c</> = <type>char</type> alignment, i.e., no alignment needed.</para>
+ <para><literal>c</literal> = <type>char</type> alignment, i.e., no alignment needed.</para>
</listitem>
<listitem>
- <para><literal>s</> = <type>short</type> alignment (2 bytes on most machines).</para>
+ <para><literal>s</literal> = <type>short</type> alignment (2 bytes on most machines).</para>
</listitem>
<listitem>
- <para><literal>i</> = <type>int</type> alignment (4 bytes on most machines).</para>
+ <para><literal>i</literal> = <type>int</type> alignment (4 bytes on most machines).</para>
</listitem>
<listitem>
- <para><literal>d</> = <type>double</type> alignment (8 bytes on many machines, but by no means all).</para>
+ <para><literal>d</literal> = <type>double</type> alignment (8 bytes on many machines, but by no means all).</para>
</listitem>
</itemizedlist>
</para><note>
@@ -7757,24 +7757,24 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
Possible values are
<itemizedlist>
<listitem>
- <para><literal>p</>: Value must always be stored plain.</para>
+ <para><literal>p</literal>: Value must always be stored plain.</para>
</listitem>
<listitem>
<para>
- <literal>e</>: Value can be stored in a <quote>secondary</quote>
+ <literal>e</literal>: Value can be stored in a <quote>secondary</quote>
relation (if relation has one, see
<literal>pg_class.reltoastrelid</literal>).
</para>
</listitem>
<listitem>
- <para><literal>m</>: Value can be stored compressed inline.</para>
+ <para><literal>m</literal>: Value can be stored compressed inline.</para>
</listitem>
<listitem>
- <para><literal>x</>: Value can be stored compressed inline or stored in <quote>secondary</quote> storage.</para>
+ <para><literal>x</literal>: Value can be stored compressed inline or stored in <quote>secondary</quote> storage.</para>
</listitem>
</itemizedlist>
- Note that <literal>m</> columns can also be moved out to secondary
- storage, but only as a last resort (<literal>e</> and <literal>x</> columns are
+ Note that <literal>m</literal> columns can also be moved out to secondary
+ storage, but only as a last resort (<literal>e</literal> and <literal>x</literal> columns are
moved first).
</para></entry>
</row>
@@ -7805,9 +7805,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>int4</type></entry>
<entry></entry>
<entry><para>
- Domains use <structfield>typtypmod</structfield> to record the <literal>typmod</>
+ Domains use <structfield>typtypmod</structfield> to record the <literal>typmod</literal>
to be applied to their base type (-1 if base type does not use a
- <literal>typmod</>). -1 if this type is not a domain.
+ <literal>typmod</literal>). -1 if this type is not a domain.
</para></entry>
</row>
@@ -7817,7 +7817,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry><para>
<structfield>typndims</structfield> is the number of array dimensions
- for a domain over an array (that is, <structfield>typbasetype</> is
+ for a domain over an array (that is, <structfield>typbasetype</structfield> is
an array type).
Zero for types other than domains over array types.
</para></entry>
@@ -7842,7 +7842,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>pg_node_tree</type></entry>
<entry></entry>
<entry><para>
- If <structfield>typdefaultbin</> is not null, it is the
+ If <structfield>typdefaultbin</structfield> is not null, it is the
<function>nodeToString()</function>
representation of a default expression for the type. This is
only used for domains.
@@ -7854,12 +7854,12 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>text</type></entry>
<entry></entry>
<entry><para>
- <structfield>typdefault</> is null if the type has no associated
- default value. If <structfield>typdefaultbin</> is not null,
- <structfield>typdefault</> must contain a human-readable version of the
- default expression represented by <structfield>typdefaultbin</>. If
- <structfield>typdefaultbin</> is null and <structfield>typdefault</> is
- not, then <structfield>typdefault</> is the external representation of
+ <structfield>typdefault</structfield> is null if the type has no associated
+ default value. If <structfield>typdefaultbin</structfield> is not null,
+ <structfield>typdefault</structfield> must contain a human-readable version of the
+ default expression represented by <structfield>typdefaultbin</structfield>. If
+ <structfield>typdefaultbin</structfield> is null and <structfield>typdefault</structfield> is
+ not, then <structfield>typdefault</structfield> is the external representation of
the type's default value, which can be fed to the type's input
converter to produce a constant.
</para></entry>
@@ -7882,13 +7882,13 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
<xref linkend="catalog-typcategory-table"> lists the system-defined values
- of <structfield>typcategory</>. Any future additions to this list will
+ of <structfield>typcategory</structfield>. Any future additions to this list will
also be upper-case ASCII letters. All other ASCII characters are reserved
for user-defined categories.
</para>
<table id="catalog-typcategory-table">
- <title><structfield>typcategory</> Codes</title>
+ <title><structfield>typcategory</structfield> Codes</title>
<tgroup cols="2">
<thead>
@@ -7957,7 +7957,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</row>
<row>
<entry><literal>X</literal></entry>
- <entry><type>unknown</> type</entry>
+ <entry><type>unknown</type> type</entry>
</row>
</tbody>
</tgroup>
@@ -7982,7 +7982,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_user_mapping</> Columns</title>
+ <title><structname>pg_user_mapping</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -8023,7 +8023,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- User mapping specific options, as <quote>keyword=value</> strings
+ User mapping specific options, as <quote>keyword=value</quote> strings
</entry>
</row>
</tbody>
@@ -8241,7 +8241,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_available_extensions</> Columns</title>
+ <title><structname>pg_available_extensions</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -8303,7 +8303,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_available_extension_versions</> Columns</title>
+ <title><structname>pg_available_extension_versions</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -8385,11 +8385,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
The view <structname>pg_config</structname> describes the
compile-time configuration parameters of the currently installed
- version of <productname>PostgreSQL</>. It is intended, for example, to
+ version of <productname>PostgreSQL</productname>. It is intended, for example, to
be used by software packages that want to interface to
- <productname>PostgreSQL</> to facilitate finding the required header
+ <productname>PostgreSQL</productname> to facilitate finding the required header
files and libraries. It provides the same basic information as the
- <xref linkend="app-pgconfig"> <productname>PostgreSQL</> client
+ <xref linkend="app-pgconfig"> <productname>PostgreSQL</productname> client
application.
</para>
@@ -8399,7 +8399,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_config</> Columns</title>
+ <title><structname>pg_config</structname> Columns</title>
<tgroup cols="3">
<thead>
<row>
@@ -8470,15 +8470,15 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<note>
<para>
Cursors are used internally to implement some of the components
- of <productname>PostgreSQL</>, such as procedural languages.
- Therefore, the <structname>pg_cursors</> view might include cursors
+ of <productname>PostgreSQL</productname>, such as procedural languages.
+ Therefore, the <structname>pg_cursors</structname> view might include cursors
that have not been explicitly created by the user.
</para>
</note>
</para>
<table>
- <title><structname>pg_cursors</> Columns</title>
+ <title><structname>pg_cursors</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -8526,7 +8526,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>is_scrollable</structfield></entry>
<entry><type>boolean</type></entry>
<entry>
- <literal>true</> if the cursor is scrollable (that is, it
+ <literal>true</literal> if the cursor is scrollable (that is, it
allows rows to be retrieved in a nonsequential manner);
<literal>false</literal> otherwise
</entry>
@@ -8557,16 +8557,16 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
The view <structname>pg_file_settings</structname> provides a summary of
the contents of the server's configuration file(s). A row appears in
- this view for each <quote>name = value</> entry appearing in the files,
+ this view for each <quote>name = value</quote> entry appearing in the files,
with annotations indicating whether the value could be applied
successfully. Additional row(s) may appear for problems not linked to
- a <quote>name = value</> entry, such as syntax errors in the files.
+ a <quote>name = value</quote> entry, such as syntax errors in the files.
</para>
<para>
This view is helpful for checking whether planned changes in the
configuration files will work, or for diagnosing a previous failure.
- Note that this view reports on the <emphasis>current</> contents of the
+ Note that this view reports on the <emphasis>current</emphasis> contents of the
files, not on what was last applied by the server. (The
<link linkend="view-pg-settings"><structname>pg_settings</structname></link>
view is usually sufficient to determine that.)
@@ -8578,7 +8578,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_file_settings</> Columns</title>
+ <title><structname>pg_file_settings</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -8604,7 +8604,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<row>
<entry><structfield>seqno</structfield></entry>
<entry><structfield>integer</structfield></entry>
- <entry>Order in which the entries are processed (1..<replaceable>n</>)</entry>
+ <entry>Order in which the entries are processed (1..<replaceable>n</replaceable>)</entry>
</row>
<row>
<entry><structfield>name</structfield></entry>
@@ -8634,14 +8634,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
If the configuration file contains syntax errors or invalid parameter
names, the server will not attempt to apply any settings from it, and
- therefore all the <structfield>applied</> fields will read as false.
+ therefore all the <structfield>applied</structfield> fields will read as false.
In such a case there will be one or more rows with
non-null <structfield>error</structfield> fields indicating the
problem(s). Otherwise, individual settings will be applied if possible.
If an individual setting cannot be applied (e.g., invalid value, or the
setting cannot be changed after server start) it will have an appropriate
message in the <structfield>error</structfield> field. Another way that
- an entry might have <structfield>applied</> = false is that it is
+ an entry might have <structfield>applied</structfield> = false is that it is
overridden by a later entry for the same parameter name; this case is not
considered an error so nothing appears in
the <structfield>error</structfield> field.
@@ -8666,12 +8666,12 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
compatibility: it emulates a catalog that existed in
<productname>PostgreSQL</productname> before version 8.1.
It shows the names and members of all roles that are marked as not
- <structfield>rolcanlogin</>, which is an approximation to the set
+ <structfield>rolcanlogin</structfield>, which is an approximation to the set
of roles that are being used as groups.
</para>
<table>
- <title><structname>pg_group</> Columns</title>
+ <title><structname>pg_group</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -8720,7 +8720,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
The view <structname>pg_hba_file_rules</structname> provides a summary of
the contents of the client authentication configuration
- file, <filename>pg_hba.conf</>. A row appears in this view for each
+ file, <filename>pg_hba.conf</filename>. A row appears in this view for each
non-empty, non-comment line in the file, with annotations indicating
whether the rule could be applied successfully.
</para>
@@ -8728,7 +8728,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
This view can be helpful for checking whether planned changes in the
authentication configuration file will work, or for diagnosing a previous
- failure. Note that this view reports on the <emphasis>current</> contents
+ failure. Note that this view reports on the <emphasis>current</emphasis> contents
of the file, not on what was last loaded by the server.
</para>
@@ -8738,7 +8738,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_hba_file_rules</> Columns</title>
+ <title><structname>pg_hba_file_rules</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -8753,7 +8753,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry><structfield>line_number</structfield></entry>
<entry><structfield>integer</structfield></entry>
<entry>
- Line number of this rule in <filename>pg_hba.conf</>
+ Line number of this rule in <filename>pg_hba.conf</filename>
</entry>
</row>
<row>
@@ -8809,7 +8809,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<para>
Usually, a row reflecting an incorrect entry will have values for only
- the <structfield>line_number</> and <structfield>error</> fields.
+ the <structfield>line_number</structfield> and <structfield>error</structfield> fields.
</para>
<para>
@@ -8831,7 +8831,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
</para>
<table>
- <title><structname>pg_indexes</> Columns</title>
+ <title><structname>pg_indexes</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -8912,12 +8912,12 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
in the same way as in <structname>pg_description</structname> or
<structname>pg_depend</structname>). Also, the right to extend a
relation is represented as a separate lockable object.
- Also, <quote>advisory</> locks can be taken on numbers that have
+ Also, <quote>advisory</quote> locks can be taken on numbers that have
user-defined meanings.
</para>
<table>
- <title><structname>pg_locks</> Columns</title>
+ <title><structname>pg_locks</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -8935,15 +8935,15 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
Type of the lockable object:
- <literal>relation</>,
- <literal>extend</>,
- <literal>page</>,
- <literal>tuple</>,
- <literal>transactionid</>,
- <literal>virtualxid</>,
- <literal>object</>,
- <literal>userlock</>, or
- <literal>advisory</>
+ <literal>relation</literal>,
+ <literal>extend</literal>,
+ <literal>page</literal>,
+ <literal>tuple</literal>,
+ <literal>transactionid</literal>,
+ <literal>virtualxid</literal>,
+ <literal>object</literal>,
+ <literal>userlock</literal>, or
+ <literal>advisory</literal>
</entry>
</row>
<row>
@@ -9025,7 +9025,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
<entry></entry>
<entry>
Column number targeted by the lock (the
- <structfield>classid</> and <structfield>objid</> refer to the
+ <structfield>classid</structfield> and <structfield>objid</structfield> refer to the
table itself),
or zero if the target is some other general database object,
or null if the target is not a general database object
@@ -9107,23 +9107,23 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</>:<replaceable>&lt;salt&gt;<
Advisory locks can be acquired on keys consisting of either a single
<type>bigint</type> value or two integer values.
A <type>bigint</type> key is displayed with its
- high-order half in the <structfield>classid</> column, its low-order half
- in the <structfield>objid</> column, and <structfield>objsubid</> equal
+ high-order half in the <structfield>classid</structfield> column, its low-order half
+ in the <structfield>objid</structfield> column, and <structfield>objsubid</structfield> equal
to 1. The original <type>bigint</type> value can be reassembled with the
expression <literal>(classid::bigint &lt;&lt; 32) |
objid::bigint</literal>. Integer keys are displayed with the
first key in the
- <structfield>classid</> column, the second key in the <structfield>objid</>
- column, and <structfield>objsubid</> equal to 2. The actual meaning of
+ <structfield>classid</structfield> column, the second key in the <structfield>objid</structfield>
+ column, and <structfield>objsubid</structfield> equal to 2. The actual meaning of
the keys is up to the user. Advisory locks are local to each database,
- so the <structfield>database</> column is meaningful for an advisory lock.
+ so the <structfield>database</structfield> column is meaningful for an advisory lock.
</para>
<para>
<structname>pg_locks</structname> provides a global view of all locks
in the database cluster, not only those relevant to the current database.
Although its <structfield>relation</structfield> column can be joined
- against <structname>pg_class</>.<structfield>oid</> to identify locked
+ against <structname>pg_class</structname>.<structfield>oid</structfield> to identify locked
relations, this will only work correctly for relations in the current
database (those for which the <structfield>database</structfield> column
is either the current database's OID or zero).
@@ -9141,7 +9141,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_stat_activity psa
ON pl.pid = psa.pid;
</programlisting>
Also, if you are using prepared transactions, the
- <structfield>virtualtransaction</> column can be joined to the
+ <structfield>virtualtransaction</structfield> column can be joined to the
<structfield>transaction</structfield> column of the <link
linkend="view-pg-prepared-xacts"><structname>pg_prepared_xacts</structname></link>
view to get more information on prepared transactions that hold locks.
@@ -9163,7 +9163,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
information about which processes are ahead of which others in lock wait
queues, nor information about which processes are parallel workers running
on behalf of which other client sessions. It is better to use
- the <function>pg_blocking_pids()</> function
+ the <function>pg_blocking_pids()</function> function
(see <xref linkend="functions-info-session-table">) to identify which
process(es) a waiting process is blocked behind.
</para>
@@ -9172,10 +9172,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
The <structname>pg_locks</structname> view displays data from both the
regular lock manager and the predicate lock manager, which are
separate systems; in addition, the regular lock manager subdivides its
- locks into regular and <firstterm>fast-path</> locks.
+ locks into regular and <firstterm>fast-path</firstterm> locks.
This data is not guaranteed to be entirely consistent.
When the view is queried,
- data on fast-path locks (with <structfield>fastpath</> = <literal>true</>)
+ data on fast-path locks (with <structfield>fastpath</structfield> = <literal>true</literal>)
is gathered from each backend one at a time, without freezing the state of
the entire lock manager, so it is possible for locks to be taken or
released while information is gathered. Note, however, that these locks are
@@ -9218,7 +9218,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_matviews</> Columns</title>
+ <title><structname>pg_matviews</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -9291,7 +9291,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_policies</> Columns</title>
+ <title><structname>pg_policies</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -9381,7 +9381,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_prepared_statements</> Columns</title>
+ <title><structname>pg_prepared_statements</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -9467,7 +9467,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_prepared_xacts</> Columns</title>
+ <title><structname>pg_prepared_xacts</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -9706,7 +9706,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry><structfield>slot_type</structfield></entry>
<entry><type>text</type></entry>
<entry></entry>
- <entry>The slot type - <literal>physical</> or <literal>logical</></entry>
+ <entry>The slot type - <literal>physical</literal> or <literal>logical</literal></entry>
</row>
<row>
@@ -9787,7 +9787,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry></entry>
<entry>The address (<literal>LSN</literal>) up to which the logical
slot's consumer has confirmed receiving data. Data older than this is
- not available anymore. <literal>NULL</> for physical slots.
+ not available anymore. <literal>NULL</literal> for physical slots.
</entry>
</row>
@@ -9817,7 +9817,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_roles</> Columns</title>
+ <title><structname>pg_roles</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -9900,7 +9900,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry><structfield>rolpassword</structfield></entry>
<entry><type>text</type></entry>
<entry></entry>
- <entry>Not the password (always reads as <literal>********</>)</entry>
+ <entry>Not the password (always reads as <literal>********</literal>)</entry>
</row>
<row>
@@ -9953,7 +9953,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_rules</> Columns</title>
+ <title><structname>pg_rules</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -9994,9 +9994,9 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</table>
<para>
- The <structname>pg_rules</> view excludes the <literal>ON SELECT</> rules
+ The <structname>pg_rules</structname> view excludes the <literal>ON SELECT</literal> rules
of views and materialized views; those can be seen in
- <structname>pg_views</> and <structname>pg_matviews</>.
+ <structname>pg_views</structname> and <structname>pg_matviews</structname>.
</para>
</sect1>
@@ -10011,11 +10011,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<para>
The view <structname>pg_seclabels</structname> provides information about
security labels. It as an easier-to-query version of the
- <link linkend="catalog-pg-seclabel"><structname>pg_seclabel</></> catalog.
+ <link linkend="catalog-pg-seclabel"><structname>pg_seclabel</structname></link> catalog.
</para>
<table>
- <title><structname>pg_seclabels</> Columns</title>
+ <title><structname>pg_seclabels</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -10045,7 +10045,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry></entry>
<entry>
For a security label on a table column, this is the column number (the
- <structfield>objoid</> and <structfield>classoid</> refer to
+ <structfield>objoid</structfield> and <structfield>classoid</structfield> refer to
the table itself). For all other object types, this column is
zero.
</entry>
@@ -10105,7 +10105,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_sequences</> Columns</title>
+ <title><structname>pg_sequences</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -10206,12 +10206,12 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
interface to the <xref linkend="sql-show">
and <xref linkend="sql-set"> commands.
It also provides access to some facts about each parameter that are
- not directly available from <command>SHOW</>, such as minimum and
+ not directly available from <command>SHOW</command>, such as minimum and
maximum values.
</para>
<table>
- <title><structname>pg_settings</> Columns</title>
+ <title><structname>pg_settings</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -10260,8 +10260,8 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<row>
<entry><structfield>vartype</structfield></entry>
<entry><type>text</type></entry>
- <entry>Parameter type (<literal>bool</>, <literal>enum</>,
- <literal>integer</>, <literal>real</>, or <literal>string</>)
+ <entry>Parameter type (<literal>bool</literal>, <literal>enum</literal>,
+ <literal>integer</literal>, <literal>real</literal>, or <literal>string</literal>)
</entry>
</row>
<row>
@@ -10306,7 +10306,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
values set from sources other than configuration files, or when
examined by a user who is neither a superuser or a member of
<literal>pg_read_all_settings</literal>); helpful when using
- <literal>include</> directives in configuration files</entry>
+ <literal>include</literal> directives in configuration files</entry>
</row>
<row>
<entry><structfield>sourceline</structfield></entry>
@@ -10384,7 +10384,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
Changes to these settings can be made in
<filename>postgresql.conf</filename> without restarting the server.
They can also be set for a particular session in the connection request
- packet (for example, via <application>libpq</>'s <literal>PGOPTIONS</>
+ packet (for example, via <application>libpq</application>'s <literal>PGOPTIONS</literal>
environment variable), but only if the connecting user is a superuser.
However, these settings never change in a session after it is started.
If you change them in <filename>postgresql.conf</filename>, send a
@@ -10402,7 +10402,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
Changes to these settings can be made in
<filename>postgresql.conf</filename> without restarting the server.
They can also be set for a particular session in the connection request
- packet (for example, via <application>libpq</>'s <literal>PGOPTIONS</>
+ packet (for example, via <application>libpq</application>'s <literal>PGOPTIONS</literal>
environment variable); any user can make such a change for their session.
However, these settings never change in a session after it is started.
If you change them in <filename>postgresql.conf</filename>, send a
@@ -10418,10 +10418,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<listitem>
<para>
These settings can be set from <filename>postgresql.conf</filename>,
- or within a session via the <command>SET</> command; but only superusers
- can change them via <command>SET</>. Changes in
+ or within a session via the <command>SET</command> command; but only superusers
+ can change them via <command>SET</command>. Changes in
<filename>postgresql.conf</filename> will affect existing sessions
- only if no session-local value has been established with <command>SET</>.
+ only if no session-local value has been established with <command>SET</command>.
</para>
</listitem>
</varlistentry>
@@ -10431,10 +10431,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<listitem>
<para>
These settings can be set from <filename>postgresql.conf</filename>,
- or within a session via the <command>SET</> command. Any user is
+ or within a session via the <command>SET</command> command. Any user is
allowed to change their session-local value. Changes in
<filename>postgresql.conf</filename> will affect existing sessions
- only if no session-local value has been established with <command>SET</>.
+ only if no session-local value has been established with <command>SET</command>.
</para>
</listitem>
</varlistentry>
@@ -10473,7 +10473,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
compatibility: it emulates a catalog that existed in
<productname>PostgreSQL</productname> before version 8.1.
It shows properties of all roles that are marked as
- <structfield>rolcanlogin</> in
+ <structfield>rolcanlogin</structfield> in
<link linkend="catalog-pg-authid"><structname>pg_authid</structname></link>.
</para>
@@ -10486,7 +10486,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_shadow</> Columns</title>
+ <title><structname>pg_shadow</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -10600,7 +10600,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_stats</> Columns</title>
+ <title><structname>pg_stats</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -10663,7 +10663,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
If greater than zero, the estimated number of distinct values in the
column. If less than zero, the negative of the number of distinct
values divided by the number of rows. (The negated form is used when
- <command>ANALYZE</> believes that the number of distinct values is
+ <command>ANALYZE</command> believes that the number of distinct values is
likely to increase as the table grows; the positive form is used when
the column seems to have a fixed number of possible values.) For
example, -1 indicates a unique column in which the number of distinct
@@ -10699,10 +10699,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry>
A list of values that divide the column's values into groups of
approximately equal population. The values in
- <structfield>most_common_vals</>, if present, are omitted from this
+ <structfield>most_common_vals</structfield>, if present, are omitted from this
histogram calculation. (This column is null if the column data type
- does not have a <literal>&lt;</> operator or if the
- <structfield>most_common_vals</> list accounts for the entire
+ does not have a <literal>&lt;</literal> operator or if the
+ <structfield>most_common_vals</structfield> list accounts for the entire
population.)
</entry>
</row>
@@ -10717,7 +10717,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
When the value is near -1 or +1, an index scan on the column will
be estimated to be cheaper than when it is near zero, due to reduction
of random access to the disk. (This column is null if the column data
- type does not have a <literal>&lt;</> operator.)
+ type does not have a <literal>&lt;</literal> operator.)
</entry>
</row>
@@ -10761,7 +10761,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<para>
The maximum number of entries in the array fields can be controlled on a
- column-by-column basis using the <command>ALTER TABLE SET STATISTICS</>
+ column-by-column basis using the <command>ALTER TABLE SET STATISTICS</command>
command, or globally by setting the
<xref linkend="guc-default-statistics-target"> run-time parameter.
</para>
@@ -10781,7 +10781,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_tables</> Columns</title>
+ <title><structname>pg_tables</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -10862,7 +10862,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_timezone_abbrevs</> Columns</title>
+ <title><structname>pg_timezone_abbrevs</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -10910,7 +10910,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<para>
The view <structname>pg_timezone_names</structname> provides a list
- of time zone names that are recognized by <command>SET TIMEZONE</>,
+ of time zone names that are recognized by <command>SET TIMEZONE</command>,
along with their associated abbreviations, UTC offsets,
and daylight-savings status. (Technically,
<productname>PostgreSQL</productname> does not use UTC because leap
@@ -10919,11 +10919,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
linkend="view-pg-timezone-abbrevs"><structname>pg_timezone_abbrevs</structname></link>, many of these names imply a set of daylight-savings transition
date rules. Therefore, the associated information changes across local DST
boundaries. The displayed information is computed based on the current
- value of <function>CURRENT_TIMESTAMP</>.
+ value of <function>CURRENT_TIMESTAMP</function>.
</para>
<table>
- <title><structname>pg_timezone_names</> Columns</title>
+ <title><structname>pg_timezone_names</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -10976,7 +10976,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_user</> Columns</title>
+ <title><structname>pg_user</structname> Columns</title>
<tgroup cols="3">
<thead>
@@ -11032,7 +11032,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<row>
<entry><structfield>passwd</structfield></entry>
<entry><type>text</type></entry>
- <entry>Not the password (always reads as <literal>********</>)</entry>
+ <entry>Not the password (always reads as <literal>********</literal>)</entry>
</row>
<row>
@@ -11069,7 +11069,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_user_mappings</> Columns</title>
+ <title><structname>pg_user_mappings</structname> Columns</title>
<tgroup cols="4">
<thead>
@@ -11126,7 +11126,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry><type>text[]</type></entry>
<entry></entry>
<entry>
- User mapping specific options, as <quote>keyword=value</> strings
+ User mapping specific options, as <quote>keyword=value</quote> strings
</entry>
</row>
</tbody>
@@ -11141,12 +11141,12 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<listitem>
<para>
current user is the user being mapped, and owns the server or
- holds <literal>USAGE</> privilege on it
+ holds <literal>USAGE</literal> privilege on it
</para>
</listitem>
<listitem>
<para>
- current user is the server owner and mapping is for <literal>PUBLIC</>
+ current user is the server owner and mapping is for <literal>PUBLIC</literal>
</para>
</listitem>
<listitem>
@@ -11173,7 +11173,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</para>
<table>
- <title><structname>pg_views</> Columns</title>
+ <title><structname>pg_views</structname> Columns</title>
<tgroup cols="4">
<thead>
diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml
index 63f7de5b43..3874a3f1ea 100644
--- a/doc/src/sgml/charset.sgml
+++ b/doc/src/sgml/charset.sgml
@@ -35,12 +35,12 @@
<sect1 id="locale">
<title>Locale Support</title>
- <indexterm zone="locale"><primary>locale</></>
+ <indexterm zone="locale"><primary>locale</primary></indexterm>
<para>
- <firstterm>Locale</> support refers to an application respecting
+ <firstterm>Locale</firstterm> support refers to an application respecting
cultural preferences regarding alphabets, sorting, number
- formatting, etc. <productname>PostgreSQL</> uses the standard ISO
+ formatting, etc. <productname>PostgreSQL</productname> uses the standard ISO
C and <acronym>POSIX</acronym> locale facilities provided by the server operating
system. For additional information refer to the documentation of your
system.
@@ -67,14 +67,14 @@ initdb --locale=sv_SE
<para>
This example for Unix systems sets the locale to Swedish
- (<literal>sv</>) as spoken
- in Sweden (<literal>SE</>). Other possibilities might include
- <literal>en_US</> (U.S. English) and <literal>fr_CA</> (French
+ (<literal>sv</literal>) as spoken
+ in Sweden (<literal>SE</literal>). Other possibilities might include
+ <literal>en_US</literal> (U.S. English) and <literal>fr_CA</literal> (French
Canadian). If more than one character set can be used for a
locale then the specifications can take the form
- <replaceable>language_territory.codeset</>. For example,
- <literal>fr_BE.UTF-8</> represents the French language (fr) as
- spoken in Belgium (BE), with a <acronym>UTF-8</> character set
+ <replaceable>language_territory.codeset</replaceable>. For example,
+ <literal>fr_BE.UTF-8</literal> represents the French language (fr) as
+ spoken in Belgium (BE), with a <acronym>UTF-8</acronym> character set
encoding.
</para>
@@ -82,9 +82,9 @@ initdb --locale=sv_SE
What locales are available on your
system under what names depends on what was provided by the operating
system vendor and what was installed. On most Unix systems, the command
- <literal>locale -a</> will provide a list of available locales.
- Windows uses more verbose locale names, such as <literal>German_Germany</>
- or <literal>Swedish_Sweden.1252</>, but the principles are the same.
+ <literal>locale -a</literal> will provide a list of available locales.
+ Windows uses more verbose locale names, such as <literal>German_Germany</literal>
+ or <literal>Swedish_Sweden.1252</literal>, but the principles are the same.
</para>
<para>
@@ -97,28 +97,28 @@ initdb --locale=sv_SE
<tgroup cols="2">
<tbody>
<row>
- <entry><envar>LC_COLLATE</></>
- <entry>String sort order</>
+ <entry><envar>LC_COLLATE</envar></entry>
+ <entry>String sort order</entry>
</row>
<row>
- <entry><envar>LC_CTYPE</></>
- <entry>Character classification (What is a letter? Its upper-case equivalent?)</>
+ <entry><envar>LC_CTYPE</envar></entry>
+ <entry>Character classification (What is a letter? Its upper-case equivalent?)</entry>
</row>
<row>
- <entry><envar>LC_MESSAGES</></>
- <entry>Language of messages</>
+ <entry><envar>LC_MESSAGES</envar></entry>
+ <entry>Language of messages</entry>
</row>
<row>
- <entry><envar>LC_MONETARY</></>
- <entry>Formatting of currency amounts</>
+ <entry><envar>LC_MONETARY</envar></entry>
+ <entry>Formatting of currency amounts</entry>
</row>
<row>
- <entry><envar>LC_NUMERIC</></>
- <entry>Formatting of numbers</>
+ <entry><envar>LC_NUMERIC</envar></entry>
+ <entry>Formatting of numbers</entry>
</row>
<row>
- <entry><envar>LC_TIME</></>
- <entry>Formatting of dates and times</>
+ <entry><envar>LC_TIME</envar></entry>
+ <entry>Formatting of dates and times</entry>
</row>
</tbody>
</tgroup>
@@ -133,8 +133,8 @@ initdb --locale=sv_SE
<para>
If you want the system to behave as if it had no locale support,
- use the special locale name <literal>C</>, or equivalently
- <literal>POSIX</>.
+ use the special locale name <literal>C</literal>, or equivalently
+ <literal>POSIX</literal>.
</para>
<para>
@@ -192,14 +192,14 @@ initdb --locale=sv_SE
settings for the purpose of setting the language of messages. If
in doubt, please refer to the documentation of your operating
system, in particular the documentation about
- <application>gettext</>.
+ <application>gettext</application>.
</para>
</note>
<para>
To enable messages to be translated to the user's preferred language,
<acronym>NLS</acronym> must have been selected at build time
- (<literal>configure --enable-nls</>). All other locale support is
+ (<literal>configure --enable-nls</literal>). All other locale support is
built in automatically.
</para>
</sect2>
@@ -213,63 +213,63 @@ initdb --locale=sv_SE
<itemizedlist>
<listitem>
<para>
- Sort order in queries using <literal>ORDER BY</> or the standard
+ Sort order in queries using <literal>ORDER BY</literal> or the standard
comparison operators on textual data
- <indexterm><primary>ORDER BY</><secondary>and locales</></indexterm>
+ <indexterm><primary>ORDER BY</primary><secondary>and locales</secondary></indexterm>
</para>
</listitem>
<listitem>
<para>
- The <function>upper</>, <function>lower</>, and <function>initcap</>
+ The <function>upper</function>, <function>lower</function>, and <function>initcap</function>
functions
- <indexterm><primary>upper</><secondary>and locales</></indexterm>
- <indexterm><primary>lower</><secondary>and locales</></indexterm>
+ <indexterm><primary>upper</primary><secondary>and locales</secondary></indexterm>
+ <indexterm><primary>lower</primary><secondary>and locales</secondary></indexterm>
</para>
</listitem>
<listitem>
<para>
- Pattern matching operators (<literal>LIKE</>, <literal>SIMILAR TO</>,
+ Pattern matching operators (<literal>LIKE</literal>, <literal>SIMILAR TO</literal>,
and POSIX-style regular expressions); locales affect both case
insensitive matching and the classification of characters by
character-class regular expressions
- <indexterm><primary>LIKE</><secondary>and locales</></indexterm>
- <indexterm><primary>regular expressions</><secondary>and locales</></indexterm>
+ <indexterm><primary>LIKE</primary><secondary>and locales</secondary></indexterm>
+ <indexterm><primary>regular expressions</primary><secondary>and locales</secondary></indexterm>
</para>
</listitem>
<listitem>
<para>
- The <function>to_char</> family of functions
- <indexterm><primary>to_char</><secondary>and locales</></indexterm>
+ The <function>to_char</function> family of functions
+ <indexterm><primary>to_char</primary><secondary>and locales</secondary></indexterm>
</para>
</listitem>
<listitem>
<para>
- The ability to use indexes with <literal>LIKE</> clauses
+ The ability to use indexes with <literal>LIKE</literal> clauses
</para>
</listitem>
</itemizedlist>
</para>
<para>
- The drawback of using locales other than <literal>C</> or
- <literal>POSIX</> in <productname>PostgreSQL</> is its performance
+ The drawback of using locales other than <literal>C</literal> or
+ <literal>POSIX</literal> in <productname>PostgreSQL</productname> is its performance
impact. It slows character handling and prevents ordinary indexes
- from being used by <literal>LIKE</>. For this reason use locales
+ from being used by <literal>LIKE</literal>. For this reason use locales
only if you actually need them.
</para>
<para>
- As a workaround to allow <productname>PostgreSQL</> to use indexes
- with <literal>LIKE</> clauses under a non-C locale, several custom
+ As a workaround to allow <productname>PostgreSQL</productname> to use indexes
+ with <literal>LIKE</literal> clauses under a non-C locale, several custom
operator classes exist. These allow the creation of an index that
performs a strict character-by-character comparison, ignoring
locale comparison rules. Refer to <xref linkend="indexes-opclass">
for more information. Another approach is to create indexes using
- the <literal>C</> collation, as discussed in
+ the <literal>C</literal> collation, as discussed in
<xref linkend="collation">.
</para>
</sect2>
@@ -286,20 +286,20 @@ initdb --locale=sv_SE
</para>
<para>
- Check that <productname>PostgreSQL</> is actually using the locale
- that you think it is. The <envar>LC_COLLATE</> and <envar>LC_CTYPE</>
+ Check that <productname>PostgreSQL</productname> is actually using the locale
+ that you think it is. The <envar>LC_COLLATE</envar> and <envar>LC_CTYPE</envar>
settings are determined when a database is created, and cannot be
changed except by creating a new database. Other locale
- settings including <envar>LC_MESSAGES</> and <envar>LC_MONETARY</>
+ settings including <envar>LC_MESSAGES</envar> and <envar>LC_MONETARY</envar>
are initially determined by the environment the server is started
in, but can be changed on-the-fly. You can check the active locale
- settings using the <command>SHOW</> command.
+ settings using the <command>SHOW</command> command.
</para>
<para>
- The directory <filename>src/test/locale</> in the source
+ The directory <filename>src/test/locale</filename> in the source
distribution contains a test suite for
- <productname>PostgreSQL</>'s locale support.
+ <productname>PostgreSQL</productname>'s locale support.
</para>
<para>
@@ -313,7 +313,7 @@ initdb --locale=sv_SE
<para>
Maintaining catalogs of message translations requires the on-going
efforts of many volunteers that want to see
- <productname>PostgreSQL</> speak their preferred language well.
+ <productname>PostgreSQL</productname> speak their preferred language well.
If messages in your language are currently not available or not fully
translated, your assistance would be appreciated. If you want to
help, refer to <xref linkend="nls"> or write to the developers'
@@ -326,7 +326,7 @@ initdb --locale=sv_SE
<sect1 id="collation">
<title>Collation Support</title>
- <indexterm zone="collation"><primary>collation</></>
+ <indexterm zone="collation"><primary>collation</primary></indexterm>
<para>
The collation feature allows specifying the sort order and character
@@ -370,9 +370,9 @@ initdb --locale=sv_SE
function or operator call is derived from the arguments, as described
below. In addition to comparison operators, collations are taken into
account by functions that convert between lower and upper case
- letters, such as <function>lower</>, <function>upper</>, and
- <function>initcap</>; by pattern matching operators; and by
- <function>to_char</> and related functions.
+ letters, such as <function>lower</function>, <function>upper</function>, and
+ <function>initcap</function>; by pattern matching operators; and by
+ <function>to_char</function> and related functions.
</para>
<para>
@@ -452,7 +452,7 @@ SELECT a &lt; ('foo' COLLATE "fr_FR") FROM test1;
SELECT a &lt; b FROM test1;
</programlisting>
the parser cannot determine which collation to apply, since the
- <structfield>a</> and <structfield>b</> columns have conflicting
+ <structfield>a</structfield> and <structfield>b</structfield> columns have conflicting
implicit collations. Since the <literal>&lt;</literal> operator
does need to know which collation to use, this will result in an
error. The error can be resolved by attaching an explicit collation
@@ -468,7 +468,7 @@ SELECT a COLLATE "de_DE" &lt; b FROM test1;
<programlisting>
SELECT a || b FROM test1;
</programlisting>
- does not result in an error, because the <literal>||</> operator
+ does not result in an error, because the <literal>||</literal> operator
does not care about collations: its result is the same regardless
of the collation.
</para>
@@ -486,8 +486,8 @@ SELECT * FROM test1 ORDER BY a || 'foo';
<programlisting>
SELECT * FROM test1 ORDER BY a || b;
</programlisting>
- results in an error, because even though the <literal>||</> operator
- doesn't need to know a collation, the <literal>ORDER BY</> clause does.
+ results in an error, because even though the <literal>||</literal> operator
+ doesn't need to know a collation, the <literal>ORDER BY</literal> clause does.
As before, the conflict can be resolved with an explicit collation
specifier:
<programlisting>
@@ -508,7 +508,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
operating system C library. These are the locales that most tools
provided by the operating system use. Another provider
is <literal>icu</literal>, which uses the external
- ICU<indexterm><primary>ICU</></> library. ICU locales can only be
+ ICU<indexterm><primary>ICU</primary></indexterm> library. ICU locales can only be
used if support for ICU was configured when PostgreSQL was built.
</para>
@@ -541,14 +541,14 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
<title>Standard Collations</title>
<para>
- On all platforms, the collations named <literal>default</>,
- <literal>C</>, and <literal>POSIX</> are available. Additional
+ On all platforms, the collations named <literal>default</literal>,
+ <literal>C</literal>, and <literal>POSIX</literal> are available. Additional
collations may be available depending on operating system support.
- The <literal>default</> collation selects the <symbol>LC_COLLATE</symbol>
+ The <literal>default</literal> collation selects the <symbol>LC_COLLATE</symbol>
and <symbol>LC_CTYPE</symbol> values specified at database creation time.
- The <literal>C</> and <literal>POSIX</> collations both specify
- <quote>traditional C</> behavior, in which only the ASCII letters
- <quote><literal>A</></quote> through <quote><literal>Z</></quote>
+ The <literal>C</literal> and <literal>POSIX</literal> collations both specify
+ <quote>traditional C</quote> behavior, in which only the ASCII letters
+ <quote><literal>A</literal></quote> through <quote><literal>Z</literal></quote>
are treated as letters, and sorting is done strictly by character
code byte values.
</para>
@@ -565,7 +565,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
<para>
If the operating system provides support for using multiple locales
- within a single program (<function>newlocale</> and related functions),
+ within a single program (<function>newlocale</function> and related functions),
or if support for ICU is configured,
then when a database cluster is initialized, <command>initdb</command>
populates the system catalog <literal>pg_collation</literal> with
@@ -618,8 +618,8 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
within a given database even though it would not be unique globally.
Use of the stripped collation names is recommended, since it will
make one less thing you need to change if you decide to change to
- another database encoding. Note however that the <literal>default</>,
- <literal>C</>, and <literal>POSIX</> collations can be used regardless of
+ another database encoding. Note however that the <literal>default</literal>,
+ <literal>C</literal>, and <literal>POSIX</literal> collations can be used regardless of
the database encoding.
</para>
@@ -630,7 +630,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
<programlisting>
SELECT a COLLATE "C" &lt; b COLLATE "POSIX" FROM test1;
</programlisting>
- will draw an error even though the <literal>C</> and <literal>POSIX</>
+ will draw an error even though the <literal>C</literal> and <literal>POSIX</literal>
collations have identical behaviors. Mixing stripped and non-stripped
collation names is therefore not recommended.
</para>
@@ -691,7 +691,7 @@ SELECT a COLLATE "C" &lt; b COLLATE "POSIX" FROM test1;
database encoding is one of these, ICU collation entries
in <literal>pg_collation</literal> are ignored. Attempting to use one
will draw an error along the lines of <quote>collation "de-x-icu" for
- encoding "WIN874" does not exist</>.
+ encoding "WIN874" does not exist</quote>.
</para>
</sect4>
</sect3>
@@ -889,30 +889,30 @@ CREATE COLLATION french FROM "fr-x-icu";
<sect1 id="multibyte">
<title>Character Set Support</title>
- <indexterm zone="multibyte"><primary>character set</></>
+ <indexterm zone="multibyte"><primary>character set</primary></indexterm>
<para>
The character set support in <productname>PostgreSQL</productname>
allows you to store text in a variety of character sets (also called
encodings), including
single-byte character sets such as the ISO 8859 series and
- multiple-byte character sets such as <acronym>EUC</> (Extended Unix
+ multiple-byte character sets such as <acronym>EUC</acronym> (Extended Unix
Code), UTF-8, and Mule internal code. All supported character sets
can be used transparently by clients, but a few are not supported
for use within the server (that is, as a server-side encoding).
The default character set is selected while
initializing your <productname>PostgreSQL</productname> database
- cluster using <command>initdb</>. It can be overridden when you
+ cluster using <command>initdb</command>. It can be overridden when you
create a database, so you can have multiple
databases each with a different character set.
</para>
<para>
An important restriction, however, is that each database's character set
- must be compatible with the database's <envar>LC_CTYPE</> (character
- classification) and <envar>LC_COLLATE</> (string sort order) locale
- settings. For <literal>C</> or
- <literal>POSIX</> locale, any character set is allowed, but for other
+ must be compatible with the database's <envar>LC_CTYPE</envar> (character
+ classification) and <envar>LC_COLLATE</envar> (string sort order) locale
+ settings. For <literal>C</literal> or
+ <literal>POSIX</literal> locale, any character set is allowed, but for other
libc-provided locales there is only one character set that will work
correctly.
(On Windows, however, UTF-8 encoding can be used with any locale.)
@@ -954,7 +954,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>No</entry>
<entry>No</entry>
<entry>1-2</entry>
- <entry><literal>WIN950</>, <literal>Windows950</></entry>
+ <entry><literal>WIN950</literal>, <literal>Windows950</literal></entry>
</row>
<row>
<entry><literal>EUC_CN</literal></entry>
@@ -1017,11 +1017,11 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>No</entry>
<entry>No</entry>
<entry>1-2</entry>
- <entry><literal>WIN936</>, <literal>Windows936</></entry>
+ <entry><literal>WIN936</literal>, <literal>Windows936</literal></entry>
</row>
<row>
<entry><literal>ISO_8859_5</literal></entry>
- <entry>ISO 8859-5, <acronym>ECMA</> 113</entry>
+ <entry>ISO 8859-5, <acronym>ECMA</acronym> 113</entry>
<entry>Latin/Cyrillic</entry>
<entry>Yes</entry>
<entry>Yes</entry>
@@ -1030,7 +1030,7 @@ CREATE COLLATION french FROM "fr-x-icu";
</row>
<row>
<entry><literal>ISO_8859_6</literal></entry>
- <entry>ISO 8859-6, <acronym>ECMA</> 114</entry>
+ <entry>ISO 8859-6, <acronym>ECMA</acronym> 114</entry>
<entry>Latin/Arabic</entry>
<entry>Yes</entry>
<entry>Yes</entry>
@@ -1039,7 +1039,7 @@ CREATE COLLATION french FROM "fr-x-icu";
</row>
<row>
<entry><literal>ISO_8859_7</literal></entry>
- <entry>ISO 8859-7, <acronym>ECMA</> 118</entry>
+ <entry>ISO 8859-7, <acronym>ECMA</acronym> 118</entry>
<entry>Latin/Greek</entry>
<entry>Yes</entry>
<entry>Yes</entry>
@@ -1048,7 +1048,7 @@ CREATE COLLATION french FROM "fr-x-icu";
</row>
<row>
<entry><literal>ISO_8859_8</literal></entry>
- <entry>ISO 8859-8, <acronym>ECMA</> 121</entry>
+ <entry>ISO 8859-8, <acronym>ECMA</acronym> 121</entry>
<entry>Latin/Hebrew</entry>
<entry>Yes</entry>
<entry>Yes</entry>
@@ -1057,7 +1057,7 @@ CREATE COLLATION french FROM "fr-x-icu";
</row>
<row>
<entry><literal>JOHAB</literal></entry>
- <entry><acronym>JOHAB</></entry>
+ <entry><acronym>JOHAB</acronym></entry>
<entry>Korean (Hangul)</entry>
<entry>No</entry>
<entry>No</entry>
@@ -1071,7 +1071,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>KOI8</></entry>
+ <entry><literal>KOI8</literal></entry>
</row>
<row>
<entry><literal>KOI8U</literal></entry>
@@ -1084,57 +1084,57 @@ CREATE COLLATION french FROM "fr-x-icu";
</row>
<row>
<entry><literal>LATIN1</literal></entry>
- <entry>ISO 8859-1, <acronym>ECMA</> 94</entry>
+ <entry>ISO 8859-1, <acronym>ECMA</acronym> 94</entry>
<entry>Western European</entry>
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO88591</></entry>
+ <entry><literal>ISO88591</literal></entry>
</row>
<row>
<entry><literal>LATIN2</literal></entry>
- <entry>ISO 8859-2, <acronym>ECMA</> 94</entry>
+ <entry>ISO 8859-2, <acronym>ECMA</acronym> 94</entry>
<entry>Central European</entry>
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO88592</></entry>
+ <entry><literal>ISO88592</literal></entry>
</row>
<row>
<entry><literal>LATIN3</literal></entry>
- <entry>ISO 8859-3, <acronym>ECMA</> 94</entry>
+ <entry>ISO 8859-3, <acronym>ECMA</acronym> 94</entry>
<entry>South European</entry>
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO88593</></entry>
+ <entry><literal>ISO88593</literal></entry>
</row>
<row>
<entry><literal>LATIN4</literal></entry>
- <entry>ISO 8859-4, <acronym>ECMA</> 94</entry>
+ <entry>ISO 8859-4, <acronym>ECMA</acronym> 94</entry>
<entry>North European</entry>
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO88594</></entry>
+ <entry><literal>ISO88594</literal></entry>
</row>
<row>
<entry><literal>LATIN5</literal></entry>
- <entry>ISO 8859-9, <acronym>ECMA</> 128</entry>
+ <entry>ISO 8859-9, <acronym>ECMA</acronym> 128</entry>
<entry>Turkish</entry>
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO88599</></entry>
+ <entry><literal>ISO88599</literal></entry>
</row>
<row>
<entry><literal>LATIN6</literal></entry>
- <entry>ISO 8859-10, <acronym>ECMA</> 144</entry>
+ <entry>ISO 8859-10, <acronym>ECMA</acronym> 144</entry>
<entry>Nordic</entry>
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO885910</></entry>
+ <entry><literal>ISO885910</literal></entry>
</row>
<row>
<entry><literal>LATIN7</literal></entry>
@@ -1143,7 +1143,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO885913</></entry>
+ <entry><literal>ISO885913</literal></entry>
</row>
<row>
<entry><literal>LATIN8</literal></entry>
@@ -1152,7 +1152,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO885914</></entry>
+ <entry><literal>ISO885914</literal></entry>
</row>
<row>
<entry><literal>LATIN9</literal></entry>
@@ -1161,16 +1161,16 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ISO885915</></entry>
+ <entry><literal>ISO885915</literal></entry>
</row>
<row>
<entry><literal>LATIN10</literal></entry>
- <entry>ISO 8859-16, <acronym>ASRO</> SR 14111</entry>
+ <entry>ISO 8859-16, <acronym>ASRO</acronym> SR 14111</entry>
<entry>Romanian</entry>
<entry>Yes</entry>
<entry>No</entry>
<entry>1</entry>
- <entry><literal>ISO885916</></entry>
+ <entry><literal>ISO885916</literal></entry>
</row>
<row>
<entry><literal>MULE_INTERNAL</literal></entry>
@@ -1188,7 +1188,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>No</entry>
<entry>No</entry>
<entry>1-2</entry>
- <entry><literal>Mskanji</>, <literal>ShiftJIS</>, <literal>WIN932</>, <literal>Windows932</></entry>
+ <entry><literal>Mskanji</literal>, <literal>ShiftJIS</literal>, <literal>WIN932</literal>, <literal>Windows932</literal></entry>
</row>
<row>
<entry><literal>SHIFT_JIS_2004</literal></entry>
@@ -1202,7 +1202,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<row>
<entry><literal>SQL_ASCII</literal></entry>
<entry>unspecified (see text)</entry>
- <entry><emphasis>any</></entry>
+ <entry><emphasis>any</emphasis></entry>
<entry>Yes</entry>
<entry>No</entry>
<entry>1</entry>
@@ -1215,16 +1215,16 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>No</entry>
<entry>No</entry>
<entry>1-2</entry>
- <entry><literal>WIN949</>, <literal>Windows949</></entry>
+ <entry><literal>WIN949</literal>, <literal>Windows949</literal></entry>
</row>
<row>
<entry><literal>UTF8</literal></entry>
<entry>Unicode, 8-bit</entry>
- <entry><emphasis>all</></entry>
+ <entry><emphasis>all</emphasis></entry>
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1-4</entry>
- <entry><literal>Unicode</></entry>
+ <entry><literal>Unicode</literal></entry>
</row>
<row>
<entry><literal>WIN866</literal></entry>
@@ -1233,7 +1233,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ALT</></entry>
+ <entry><literal>ALT</literal></entry>
</row>
<row>
<entry><literal>WIN874</literal></entry>
@@ -1260,7 +1260,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>WIN</></entry>
+ <entry><literal>WIN</literal></entry>
</row>
<row>
<entry><literal>WIN1252</literal></entry>
@@ -1323,30 +1323,30 @@ CREATE COLLATION french FROM "fr-x-icu";
<entry>Yes</entry>
<entry>Yes</entry>
<entry>1</entry>
- <entry><literal>ABC</>, <literal>TCVN</>, <literal>TCVN5712</>, <literal>VSCII</></entry>
+ <entry><literal>ABC</literal>, <literal>TCVN</literal>, <literal>TCVN5712</literal>, <literal>VSCII</literal></entry>
</row>
</tbody>
</tgroup>
</table>
<para>
- Not all client <acronym>API</>s support all the listed character sets. For example, the
- <productname>PostgreSQL</>
- JDBC driver does not support <literal>MULE_INTERNAL</>, <literal>LATIN6</>,
- <literal>LATIN8</>, and <literal>LATIN10</>.
+ Not all client <acronym>API</acronym>s support all the listed character sets. For example, the
+ <productname>PostgreSQL</productname>
+ JDBC driver does not support <literal>MULE_INTERNAL</literal>, <literal>LATIN6</literal>,
+ <literal>LATIN8</literal>, and <literal>LATIN10</literal>.
</para>
<para>
- The <literal>SQL_ASCII</> setting behaves considerably differently
+ The <literal>SQL_ASCII</literal> setting behaves considerably differently
from the other settings. When the server character set is
- <literal>SQL_ASCII</>, the server interprets byte values 0-127
+ <literal>SQL_ASCII</literal>, the server interprets byte values 0-127
according to the ASCII standard, while byte values 128-255 are taken
as uninterpreted characters. No encoding conversion will be done when
- the setting is <literal>SQL_ASCII</>. Thus, this setting is not so
+ the setting is <literal>SQL_ASCII</literal>. Thus, this setting is not so
much a declaration that a specific encoding is in use, as a declaration
of ignorance about the encoding. In most cases, if you are
working with any non-ASCII data, it is unwise to use the
- <literal>SQL_ASCII</> setting because
+ <literal>SQL_ASCII</literal> setting because
<productname>PostgreSQL</productname> will be unable to help you by
converting or validating non-ASCII characters.
</para>
@@ -1356,7 +1356,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<title>Setting the Character Set</title>
<para>
- <command>initdb</> defines the default character set (encoding)
+ <command>initdb</command> defines the default character set (encoding)
for a <productname>PostgreSQL</productname> cluster. For example,
<screen>
@@ -1367,8 +1367,8 @@ initdb -E EUC_JP
<literal>EUC_JP</literal> (Extended Unix Code for Japanese). You
can use <option>--encoding</option> instead of
<option>-E</option> if you prefer longer option strings.
- If no <option>-E</> or <option>--encoding</option> option is
- given, <command>initdb</> attempts to determine the appropriate
+ If no <option>-E</option> or <option>--encoding</option> option is
+ given, <command>initdb</command> attempts to determine the appropriate
encoding to use based on the specified or default locale.
</para>
@@ -1388,7 +1388,7 @@ createdb -E EUC_KR -T template0 --lc-collate=ko_KR.euckr --lc-ctype=ko_KR.euckr
CREATE DATABASE korean WITH ENCODING 'EUC_KR' LC_COLLATE='ko_KR.euckr' LC_CTYPE='ko_KR.euckr' TEMPLATE=template0;
</programlisting>
- Notice that the above commands specify copying the <literal>template0</>
+ Notice that the above commands specify copying the <literal>template0</literal>
database. When copying any other database, the encoding and locale
settings cannot be changed from those of the source database, because
that might result in corrupt data. For more information see
@@ -1420,7 +1420,7 @@ $ <userinput>psql -l</userinput>
<important>
<para>
On most modern operating systems, <productname>PostgreSQL</productname>
- can determine which character set is implied by the <envar>LC_CTYPE</>
+ can determine which character set is implied by the <envar>LC_CTYPE</envar>
setting, and it will enforce that only the matching database encoding is
used. On older systems it is your responsibility to ensure that you use
the encoding expected by the locale you have selected. A mistake in
@@ -1430,9 +1430,9 @@ $ <userinput>psql -l</userinput>
<para>
<productname>PostgreSQL</productname> will allow superusers to create
- databases with <literal>SQL_ASCII</> encoding even when
- <envar>LC_CTYPE</> is not <literal>C</> or <literal>POSIX</>. As noted
- above, <literal>SQL_ASCII</> does not enforce that the data stored in
+ databases with <literal>SQL_ASCII</literal> encoding even when
+ <envar>LC_CTYPE</envar> is not <literal>C</literal> or <literal>POSIX</literal>. As noted
+ above, <literal>SQL_ASCII</literal> does not enforce that the data stored in
the database has any particular encoding, and so this choice poses risks
of locale-dependent misbehavior. Using this combination of settings is
deprecated and may someday be forbidden altogether.
@@ -1447,7 +1447,7 @@ $ <userinput>psql -l</userinput>
<productname>PostgreSQL</productname> supports automatic
character set conversion between server and client for certain
character set combinations. The conversion information is stored in the
- <literal>pg_conversion</> system catalog. <productname>PostgreSQL</>
+ <literal>pg_conversion</literal> system catalog. <productname>PostgreSQL</productname>
comes with some predefined conversions, as shown in <xref
linkend="multibyte-translation-table">. You can create a new
conversion using the SQL command <command>CREATE CONVERSION</command>.
@@ -1763,7 +1763,7 @@ $ <userinput>psql -l</userinput>
<listitem>
<para>
- <application>libpq</> (<xref linkend="libpq-control">) has functions to control the client encoding.
+ <application>libpq</application> (<xref linkend="libpq-control">) has functions to control the client encoding.
</para>
</listitem>
@@ -1774,14 +1774,14 @@ $ <userinput>psql -l</userinput>
Setting the client encoding can be done with this SQL command:
<programlisting>
-SET CLIENT_ENCODING TO '<replaceable>value</>';
+SET CLIENT_ENCODING TO '<replaceable>value</replaceable>';
</programlisting>
Also you can use the standard SQL syntax <literal>SET NAMES</literal>
for this purpose:
<programlisting>
-SET NAMES '<replaceable>value</>';
+SET NAMES '<replaceable>value</replaceable>';
</programlisting>
To query the current client encoding:
@@ -1813,7 +1813,7 @@ RESET client_encoding;
<para>
Using the configuration variable <xref
linkend="guc-client-encoding">. If the
- <varname>client_encoding</> variable is set, that client
+ <varname>client_encoding</varname> variable is set, that client
encoding is automatically selected when a connection to the
server is made. (This can subsequently be overridden using any
of the other methods mentioned above.)
@@ -1832,9 +1832,9 @@ RESET client_encoding;
</para>
<para>
- If the client character set is defined as <literal>SQL_ASCII</>,
+ If the client character set is defined as <literal>SQL_ASCII</literal>,
encoding conversion is disabled, regardless of the server's character
- set. Just as for the server, use of <literal>SQL_ASCII</> is unwise
+ set. Just as for the server, use of <literal>SQL_ASCII</literal> is unwise
unless you are working with all-ASCII data.
</para>
</sect2>
diff --git a/doc/src/sgml/citext.sgml b/doc/src/sgml/citext.sgml
index 9b4c68f7d4..82251de852 100644
--- a/doc/src/sgml/citext.sgml
+++ b/doc/src/sgml/citext.sgml
@@ -8,10 +8,10 @@
</indexterm>
<para>
- The <filename>citext</> module provides a case-insensitive
- character string type, <type>citext</>. Essentially, it internally calls
- <function>lower</> when comparing values. Otherwise, it behaves almost
- exactly like <type>text</>.
+ The <filename>citext</filename> module provides a case-insensitive
+ character string type, <type>citext</type>. Essentially, it internally calls
+ <function>lower</function> when comparing values. Otherwise, it behaves almost
+ exactly like <type>text</type>.
</para>
<sect2>
@@ -19,7 +19,7 @@
<para>
The standard approach to doing case-insensitive matches
- in <productname>PostgreSQL</> has been to use the <function>lower</>
+ in <productname>PostgreSQL</productname> has been to use the <function>lower</function>
function when comparing values, for example
<programlisting>
@@ -35,19 +35,19 @@ SELECT * FROM tab WHERE lower(col) = LOWER(?);
<listitem>
<para>
It makes your SQL statements verbose, and you always have to remember to
- use <function>lower</> on both the column and the query value.
+ use <function>lower</function> on both the column and the query value.
</para>
</listitem>
<listitem>
<para>
It won't use an index, unless you create a functional index using
- <function>lower</>.
+ <function>lower</function>.
</para>
</listitem>
<listitem>
<para>
- If you declare a column as <literal>UNIQUE</> or <literal>PRIMARY
- KEY</>, the implicitly generated index is case-sensitive. So it's
+ If you declare a column as <literal>UNIQUE</literal> or <literal>PRIMARY
+ KEY</literal>, the implicitly generated index is case-sensitive. So it's
useless for case-insensitive searches, and it won't enforce
uniqueness case-insensitively.
</para>
@@ -55,13 +55,13 @@ SELECT * FROM tab WHERE lower(col) = LOWER(?);
</itemizedlist>
<para>
- The <type>citext</> data type allows you to eliminate calls
- to <function>lower</> in SQL queries, and allows a primary key to
- be case-insensitive. <type>citext</> is locale-aware, just
- like <type>text</>, which means that the matching of upper case and
+ The <type>citext</type> data type allows you to eliminate calls
+ to <function>lower</function> in SQL queries, and allows a primary key to
+ be case-insensitive. <type>citext</type> is locale-aware, just
+ like <type>text</type>, which means that the matching of upper case and
lower case characters is dependent on the rules of
- the database's <literal>LC_CTYPE</> setting. Again, this behavior is
- identical to the use of <function>lower</> in queries. But because it's
+ the database's <literal>LC_CTYPE</literal> setting. Again, this behavior is
+ identical to the use of <function>lower</function> in queries. But because it's
done transparently by the data type, you don't have to remember to do
anything special in your queries.
</para>
@@ -89,9 +89,9 @@ INSERT INTO users VALUES ( 'Bj&oslash;rn', md5(random()::text) );
SELECT * FROM users WHERE nick = 'Larry';
</programlisting>
- The <command>SELECT</> statement will return one tuple, even though
- the <structfield>nick</> column was set to <literal>larry</> and the query
- was for <literal>Larry</>.
+ The <command>SELECT</command> statement will return one tuple, even though
+ the <structfield>nick</structfield> column was set to <literal>larry</literal> and the query
+ was for <literal>Larry</literal>.
</para>
</sect2>
@@ -99,82 +99,82 @@ SELECT * FROM users WHERE nick = 'Larry';
<title>String Comparison Behavior</title>
<para>
- <type>citext</> performs comparisons by converting each string to lower
- case (as though <function>lower</> were called) and then comparing the
+ <type>citext</type> performs comparisons by converting each string to lower
+ case (as though <function>lower</function> were called) and then comparing the
results normally. Thus, for example, two strings are considered equal
- if <function>lower</> would produce identical results for them.
+ if <function>lower</function> would produce identical results for them.
</para>
<para>
In order to emulate a case-insensitive collation as closely as possible,
- there are <type>citext</>-specific versions of a number of string-processing
+ there are <type>citext</type>-specific versions of a number of string-processing
operators and functions. So, for example, the regular expression
- operators <literal>~</> and <literal>~*</> exhibit the same behavior when
- applied to <type>citext</>: they both match case-insensitively.
+ operators <literal>~</literal> and <literal>~*</literal> exhibit the same behavior when
+ applied to <type>citext</type>: they both match case-insensitively.
The same is true
- for <literal>!~</> and <literal>!~*</>, as well as for the
- <literal>LIKE</> operators <literal>~~</> and <literal>~~*</>, and
- <literal>!~~</> and <literal>!~~*</>. If you'd like to match
- case-sensitively, you can cast the operator's arguments to <type>text</>.
+ for <literal>!~</literal> and <literal>!~*</literal>, as well as for the
+ <literal>LIKE</literal> operators <literal>~~</literal> and <literal>~~*</literal>, and
+ <literal>!~~</literal> and <literal>!~~*</literal>. If you'd like to match
+ case-sensitively, you can cast the operator's arguments to <type>text</type>.
</para>
<para>
Similarly, all of the following functions perform matching
- case-insensitively if their arguments are <type>citext</>:
+ case-insensitively if their arguments are <type>citext</type>:
</para>
<itemizedlist>
<listitem>
<para>
- <function>regexp_match()</>
+ <function>regexp_match()</function>
</para>
</listitem>
<listitem>
<para>
- <function>regexp_matches()</>
+ <function>regexp_matches()</function>
</para>
</listitem>
<listitem>
<para>
- <function>regexp_replace()</>
+ <function>regexp_replace()</function>
</para>
</listitem>
<listitem>
<para>
- <function>regexp_split_to_array()</>
+ <function>regexp_split_to_array()</function>
</para>
</listitem>
<listitem>
<para>
- <function>regexp_split_to_table()</>
+ <function>regexp_split_to_table()</function>
</para>
</listitem>
<listitem>
<para>
- <function>replace()</>
+ <function>replace()</function>
</para>
</listitem>
<listitem>
<para>
- <function>split_part()</>
+ <function>split_part()</function>
</para>
</listitem>
<listitem>
<para>
- <function>strpos()</>
+ <function>strpos()</function>
</para>
</listitem>
<listitem>
<para>
- <function>translate()</>
+ <function>translate()</function>
</para>
</listitem>
</itemizedlist>
<para>
For the regexp functions, if you want to match case-sensitively, you can
- specify the <quote>c</> flag to force a case-sensitive match. Otherwise,
- you must cast to <type>text</> before using one of these functions if
+ specify the <quote>c</quote> flag to force a case-sensitive match. Otherwise,
+ you must cast to <type>text</type> before using one of these functions if
you want case-sensitive behavior.
</para>
@@ -186,13 +186,13 @@ SELECT * FROM users WHERE nick = 'Larry';
<itemizedlist>
<listitem>
<para>
- <type>citext</>'s case-folding behavior depends on
- the <literal>LC_CTYPE</> setting of your database. How it compares
+ <type>citext</type>'s case-folding behavior depends on
+ the <literal>LC_CTYPE</literal> setting of your database. How it compares
values is therefore determined when the database is created.
It is not truly
case-insensitive in the terms defined by the Unicode standard.
Effectively, what this means is that, as long as you're happy with your
- collation, you should be happy with <type>citext</>'s comparisons. But
+ collation, you should be happy with <type>citext</type>'s comparisons. But
if you have data in different languages stored in your database, users
of one language may find their query results are not as expected if the
collation is for another language.
@@ -201,38 +201,38 @@ SELECT * FROM users WHERE nick = 'Larry';
<listitem>
<para>
- As of <productname>PostgreSQL</> 9.1, you can attach a
- <literal>COLLATE</> specification to <type>citext</> columns or data
- values. Currently, <type>citext</> operators will honor a non-default
- <literal>COLLATE</> specification while comparing case-folded strings,
+ As of <productname>PostgreSQL</productname> 9.1, you can attach a
+ <literal>COLLATE</literal> specification to <type>citext</type> columns or data
+ values. Currently, <type>citext</type> operators will honor a non-default
+ <literal>COLLATE</literal> specification while comparing case-folded strings,
but the initial folding to lower case is always done according to the
- database's <literal>LC_CTYPE</> setting (that is, as though
- <literal>COLLATE "default"</> were given). This may be changed in a
- future release so that both steps follow the input <literal>COLLATE</>
+ database's <literal>LC_CTYPE</literal> setting (that is, as though
+ <literal>COLLATE "default"</literal> were given). This may be changed in a
+ future release so that both steps follow the input <literal>COLLATE</literal>
specification.
</para>
</listitem>
<listitem>
<para>
- <type>citext</> is not as efficient as <type>text</> because the
+ <type>citext</type> is not as efficient as <type>text</type> because the
operator functions and the B-tree comparison functions must make copies
of the data and convert it to lower case for comparisons. It is,
- however, slightly more efficient than using <function>lower</> to get
+ however, slightly more efficient than using <function>lower</function> to get
case-insensitive matching.
</para>
</listitem>
<listitem>
<para>
- <type>citext</> doesn't help much if you need data to compare
+ <type>citext</type> doesn't help much if you need data to compare
case-sensitively in some contexts and case-insensitively in other
- contexts. The standard answer is to use the <type>text</> type and
- manually use the <function>lower</> function when you need to compare
+ contexts. The standard answer is to use the <type>text</type> type and
+ manually use the <function>lower</function> function when you need to compare
case-insensitively; this works all right if case-insensitive comparison
is needed only infrequently. If you need case-insensitive behavior most
of the time and case-sensitive infrequently, consider storing the data
- as <type>citext</> and explicitly casting the column to <type>text</>
+ as <type>citext</type> and explicitly casting the column to <type>text</type>
when you want case-sensitive comparison. In either situation, you will
need two indexes if you want both types of searches to be fast.
</para>
@@ -240,9 +240,9 @@ SELECT * FROM users WHERE nick = 'Larry';
<listitem>
<para>
- The schema containing the <type>citext</> operators must be
- in the current <varname>search_path</> (typically <literal>public</>);
- if it is not, the normal case-sensitive <type>text</> operators
+ The schema containing the <type>citext</type> operators must be
+ in the current <varname>search_path</varname> (typically <literal>public</literal>);
+ if it is not, the normal case-sensitive <type>text</type> operators
will be invoked instead.
</para>
</listitem>
@@ -257,7 +257,7 @@ SELECT * FROM users WHERE nick = 'Larry';
</para>
<para>
- Inspired by the original <type>citext</> module by Donald Fraser.
+ Inspired by the original <type>citext</type> module by Donald Fraser.
</para>
</sect2>
diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml
index 78c594bbba..722f3da813 100644
--- a/doc/src/sgml/client-auth.sgml
+++ b/doc/src/sgml/client-auth.sgml
@@ -21,9 +21,9 @@
<para>
As explained in <xref linkend="user-manag">,
<productname>PostgreSQL</productname> actually does privilege
- management in terms of <quote>roles</>. In this chapter, we
- consistently use <firstterm>database user</> to mean <quote>role with the
- <literal>LOGIN</> privilege</quote>.
+ management in terms of <quote>roles</quote>. In this chapter, we
+ consistently use <firstterm>database user</firstterm> to mean <quote>role with the
+ <literal>LOGIN</literal> privilege</quote>.
</para>
</note>
@@ -66,7 +66,7 @@
which traditionally is named
<filename>pg_hba.conf</filename> and is stored in the database
cluster's data directory.
- (<acronym>HBA</> stands for host-based authentication.) A default
+ (<acronym>HBA</acronym> stands for host-based authentication.) A default
<filename>pg_hba.conf</filename> file is installed when the data
directory is initialized by <command>initdb</command>. It is
possible to place the authentication configuration file elsewhere,
@@ -82,7 +82,7 @@
up of a number of fields which are separated by spaces and/or tabs.
Fields can contain white space if the field value is double-quoted.
Quoting one of the keywords in a database, user, or address field (e.g.,
- <literal>all</> or <literal>replication</>) makes the word lose its special
+ <literal>all</literal> or <literal>replication</literal>) makes the word lose its special
meaning, and just match a database, user, or host with that name.
</para>
@@ -92,8 +92,8 @@
and the authentication method to be used for connections matching
these parameters. The first record with a matching connection type,
client address, requested database, and user name is used to perform
- authentication. There is no <quote>fall-through</> or
- <quote>backup</>: if one record is chosen and the authentication
+ authentication. There is no <quote>fall-through</quote> or
+ <quote>backup</quote>: if one record is chosen and the authentication
fails, subsequent records are not considered. If no record matches,
access is denied.
</para>
@@ -138,7 +138,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
the server is started with an appropriate value for the
<xref linkend="guc-listen-addresses"> configuration parameter,
since the default behavior is to listen for TCP/IP connections
- only on the local loopback address <literal>localhost</>.
+ only on the local loopback address <literal>localhost</literal>.
</para>
</note>
</listitem>
@@ -169,7 +169,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<term><literal>hostnossl</literal></term>
<listitem>
<para>
- This record type has the opposite behavior of <literal>hostssl</>;
+ This record type has the opposite behavior of <literal>hostssl</literal>;
it only matches connection attempts made over
TCP/IP that do not use <acronym>SSL</acronym>.
</para>
@@ -182,24 +182,24 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
Specifies which database name(s) this record matches. The value
<literal>all</literal> specifies that it matches all databases.
- The value <literal>sameuser</> specifies that the record
+ The value <literal>sameuser</literal> specifies that the record
matches if the requested database has the same name as the
- requested user. The value <literal>samerole</> specifies that
+ requested user. The value <literal>samerole</literal> specifies that
the requested user must be a member of the role with the same
- name as the requested database. (<literal>samegroup</> is an
- obsolete but still accepted spelling of <literal>samerole</>.)
+ name as the requested database. (<literal>samegroup</literal> is an
+ obsolete but still accepted spelling of <literal>samerole</literal>.)
Superusers are not considered to be members of a role for the
- purposes of <literal>samerole</> unless they are explicitly
+ purposes of <literal>samerole</literal> unless they are explicitly
members of the role, directly or indirectly, and not just by
virtue of being a superuser.
- The value <literal>replication</> specifies that the record
+ The value <literal>replication</literal> specifies that the record
matches if a physical replication connection is requested (note that
replication connections do not specify any particular database).
Otherwise, this is the name of
a specific <productname>PostgreSQL</productname> database.
Multiple database names can be supplied by separating them with
commas. A separate file containing database names can be specified by
- preceding the file name with <literal>@</>.
+ preceding the file name with <literal>@</literal>.
</para>
</listitem>
</varlistentry>
@@ -211,18 +211,18 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
Specifies which database user name(s) this record
matches. The value <literal>all</literal> specifies that it
matches all users. Otherwise, this is either the name of a specific
- database user, or a group name preceded by <literal>+</>.
+ database user, or a group name preceded by <literal>+</literal>.
(Recall that there is no real distinction between users and groups
- in <productname>PostgreSQL</>; a <literal>+</> mark really means
+ in <productname>PostgreSQL</productname>; a <literal>+</literal> mark really means
<quote>match any of the roles that are directly or indirectly members
- of this role</>, while a name without a <literal>+</> mark matches
+ of this role</quote>, while a name without a <literal>+</literal> mark matches
only that specific role.) For this purpose, a superuser is only
considered to be a member of a role if they are explicitly a member
of the role, directly or indirectly, and not just by virtue of
being a superuser.
Multiple user names can be supplied by separating them with commas.
A separate file containing user names can be specified by preceding the
- file name with <literal>@</>.
+ file name with <literal>@</literal>.
</para>
</listitem>
</varlistentry>
@@ -239,7 +239,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
An IP address range is specified using standard numeric notation
for the range's starting address, then a slash (<literal>/</literal>)
- and a <acronym>CIDR</> mask length. The mask
+ and a <acronym>CIDR</acronym> mask length. The mask
length indicates the number of high-order bits of the client
IP address that must match. Bits to the right of this should
be zero in the given IP address.
@@ -317,7 +317,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
This field only applies to <literal>host</literal>,
- <literal>hostssl</literal>, and <literal>hostnossl</> records.
+ <literal>hostssl</literal>, and <literal>hostnossl</literal> records.
</para>
<note>
@@ -360,17 +360,17 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<listitem>
<para>
These two fields can be used as an alternative to the
- <replaceable>IP-address</><literal>/</><replaceable>mask-length</>
+ <replaceable>IP-address</replaceable><literal>/</literal><replaceable>mask-length</replaceable>
notation. Instead of
specifying the mask length, the actual mask is specified in a
- separate column. For example, <literal>255.0.0.0</> represents an IPv4
- CIDR mask length of 8, and <literal>255.255.255.255</> represents a
+ separate column. For example, <literal>255.0.0.0</literal> represents an IPv4
+ CIDR mask length of 8, and <literal>255.255.255.255</literal> represents a
CIDR mask length of 32.
</para>
<para>
These fields only apply to <literal>host</literal>,
- <literal>hostssl</literal>, and <literal>hostnossl</> records.
+ <literal>hostssl</literal>, and <literal>hostnossl</literal> records.
</para>
</listitem>
</varlistentry>
@@ -385,7 +385,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<variablelist>
<varlistentry>
- <term><literal>trust</></term>
+ <term><literal>trust</literal></term>
<listitem>
<para>
Allow the connection unconditionally. This method
@@ -399,12 +399,12 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>reject</></term>
+ <term><literal>reject</literal></term>
<listitem>
<para>
Reject the connection unconditionally. This is useful for
- <quote>filtering out</> certain hosts from a group, for example a
- <literal>reject</> line could block a specific host from connecting,
+ <quote>filtering out</quote> certain hosts from a group, for example a
+ <literal>reject</literal> line could block a specific host from connecting,
while a later line allows the remaining hosts in a specific
network to connect.
</para>
@@ -412,7 +412,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>scram-sha-256</></term>
+ <term><literal>scram-sha-256</literal></term>
<listitem>
<para>
Perform SCRAM-SHA-256 authentication to verify the user's
@@ -422,7 +422,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>md5</></term>
+ <term><literal>md5</literal></term>
<listitem>
<para>
Perform SCRAM-SHA-256 or MD5 authentication to verify the
@@ -433,7 +433,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>password</></term>
+ <term><literal>password</literal></term>
<listitem>
<para>
Require the client to supply an unencrypted password for
@@ -446,7 +446,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>gss</></term>
+ <term><literal>gss</literal></term>
<listitem>
<para>
Use GSSAPI to authenticate the user. This is only
@@ -457,7 +457,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>sspi</></term>
+ <term><literal>sspi</literal></term>
<listitem>
<para>
Use SSPI to authenticate the user. This is only
@@ -468,7 +468,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>ident</></term>
+ <term><literal>ident</literal></term>
<listitem>
<para>
Obtain the operating system user name of the client
@@ -483,7 +483,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>peer</></term>
+ <term><literal>peer</literal></term>
<listitem>
<para>
Obtain the client's operating system user name from the operating
@@ -495,17 +495,17 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>ldap</></term>
+ <term><literal>ldap</literal></term>
<listitem>
<para>
- Authenticate using an <acronym>LDAP</> server. See <xref
+ Authenticate using an <acronym>LDAP</acronym> server. See <xref
linkend="auth-ldap"> for details.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>radius</></term>
+ <term><literal>radius</literal></term>
<listitem>
<para>
Authenticate using a RADIUS server. See <xref
@@ -515,7 +515,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>cert</></term>
+ <term><literal>cert</literal></term>
<listitem>
<para>
Authenticate using SSL client certificates. See
@@ -525,7 +525,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>pam</></term>
+ <term><literal>pam</literal></term>
<listitem>
<para>
Authenticate using the Pluggable Authentication Modules
@@ -536,7 +536,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</varlistentry>
<varlistentry>
- <term><literal>bsd</></term>
+ <term><literal>bsd</literal></term>
<listitem>
<para>
Authenticate using the BSD Authentication service provided by the
@@ -554,17 +554,17 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<term><replaceable>auth-options</replaceable></term>
<listitem>
<para>
- After the <replaceable>auth-method</> field, there can be field(s) of
- the form <replaceable>name</><literal>=</><replaceable>value</> that
+ After the <replaceable>auth-method</replaceable> field, there can be field(s) of
+ the form <replaceable>name</replaceable><literal>=</literal><replaceable>value</replaceable> that
specify options for the authentication method. Details about which
options are available for which authentication methods appear below.
</para>
<para>
In addition to the method-specific options listed below, there is one
- method-independent authentication option <literal>clientcert</>, which
- can be specified in any <literal>hostssl</> record. When set
- to <literal>1</>, this option requires the client to present a valid
+ method-independent authentication option <literal>clientcert</literal>, which
+ can be specified in any <literal>hostssl</literal> record. When set
+ to <literal>1</literal>, this option requires the client to present a valid
(trusted) SSL certificate, in addition to the other requirements of the
authentication method.
</para>
@@ -574,11 +574,11 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
</para>
<para>
- Files included by <literal>@</> constructs are read as lists of names,
+ Files included by <literal>@</literal> constructs are read as lists of names,
which can be separated by either whitespace or commas. Comments are
introduced by <literal>#</literal>, just as in
- <filename>pg_hba.conf</filename>, and nested <literal>@</> constructs are
- allowed. Unless the file name following <literal>@</> is an absolute
+ <filename>pg_hba.conf</filename>, and nested <literal>@</literal> constructs are
+ allowed. Unless the file name following <literal>@</literal> is an absolute
path, it is taken to be relative to the directory containing the
referencing file.
</para>
@@ -589,10 +589,10 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
significant. Typically, earlier records will have tight connection
match parameters and weaker authentication methods, while later
records will have looser match parameters and stronger authentication
- methods. For example, one might wish to use <literal>trust</>
+ methods. For example, one might wish to use <literal>trust</literal>
authentication for local TCP/IP connections but require a password for
remote TCP/IP connections. In this case a record specifying
- <literal>trust</> authentication for connections from 127.0.0.1 would
+ <literal>trust</literal> authentication for connections from 127.0.0.1 would
appear before a record specifying password authentication for a wider
range of allowed client IP addresses.
</para>
@@ -603,7 +603,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<systemitem>SIGHUP</systemitem><indexterm><primary>SIGHUP</primary></indexterm>
signal. If you edit the file on an
active system, you will need to signal the postmaster
- (using <literal>pg_ctl reload</> or <literal>kill -HUP</>) to make it
+ (using <literal>pg_ctl reload</literal> or <literal>kill -HUP</literal>) to make it
re-read the file.
</para>
@@ -618,7 +618,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
The system view
<link linkend="view-pg-hba-file-rules"><structname>pg_hba_file_rules</structname></link>
- can be helpful for pre-testing changes to the <filename>pg_hba.conf</>
+ can be helpful for pre-testing changes to the <filename>pg_hba.conf</filename>
file, or for diagnosing problems if loading of the file did not have the
desired effects. Rows in the view with
non-null <structfield>error</structfield> fields indicate problems in the
@@ -629,9 +629,9 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
To connect to a particular database, a user must not only pass the
<filename>pg_hba.conf</filename> checks, but must have the
- <literal>CONNECT</> privilege for the database. If you wish to
+ <literal>CONNECT</literal> privilege for the database. If you wish to
restrict which users can connect to which databases, it's usually
- easier to control this by granting/revoking <literal>CONNECT</> privilege
+ easier to control this by granting/revoking <literal>CONNECT</literal> privilege
than to put the rules in <filename>pg_hba.conf</filename> entries.
</para>
</tip>
@@ -760,21 +760,21 @@ local db1,db2,@demodbs all md5
<para>
User name maps are defined in the ident map file, which by default is named
- <filename>pg_ident.conf</><indexterm><primary>pg_ident.conf</primary></indexterm>
+ <filename>pg_ident.conf</filename><indexterm><primary>pg_ident.conf</primary></indexterm>
and is stored in the
cluster's data directory. (It is possible to place the map file
elsewhere, however; see the <xref linkend="guc-ident-file">
configuration parameter.)
The ident map file contains lines of the general form:
<synopsis>
-<replaceable>map-name</> <replaceable>system-username</> <replaceable>database-username</>
+<replaceable>map-name</replaceable> <replaceable>system-username</replaceable> <replaceable>database-username</replaceable>
</synopsis>
Comments and whitespace are handled in the same way as in
- <filename>pg_hba.conf</>. The
- <replaceable>map-name</> is an arbitrary name that will be used to
+ <filename>pg_hba.conf</filename>. The
+ <replaceable>map-name</replaceable> is an arbitrary name that will be used to
refer to this mapping in <filename>pg_hba.conf</filename>. The other
two fields specify an operating system user name and a matching
- database user name. The same <replaceable>map-name</> can be
+ database user name. The same <replaceable>map-name</replaceable> can be
used repeatedly to specify multiple user-mappings within a single map.
</para>
<para>
@@ -788,13 +788,13 @@ local db1,db2,@demodbs all md5
user has requested to connect as.
</para>
<para>
- If the <replaceable>system-username</> field starts with a slash (<literal>/</>),
+ If the <replaceable>system-username</replaceable> field starts with a slash (<literal>/</literal>),
the remainder of the field is treated as a regular expression.
(See <xref linkend="posix-syntax-details"> for details of
- <productname>PostgreSQL</>'s regular expression syntax.) The regular
+ <productname>PostgreSQL</productname>'s regular expression syntax.) The regular
expression can include a single capture, or parenthesized subexpression,
- which can then be referenced in the <replaceable>database-username</>
- field as <literal>\1</> (backslash-one). This allows the mapping of
+ which can then be referenced in the <replaceable>database-username</replaceable>
+ field as <literal>\1</literal> (backslash-one). This allows the mapping of
multiple user names in a single line, which is particularly useful for
simple syntax substitutions. For example, these entries
<programlisting>
@@ -802,14 +802,14 @@ mymap /^(.*)@mydomain\.com$ \1
mymap /^(.*)@otherdomain\.com$ guest
</programlisting>
will remove the domain part for users with system user names that end with
- <literal>@mydomain.com</>, and allow any user whose system name ends with
- <literal>@otherdomain.com</> to log in as <literal>guest</>.
+ <literal>@mydomain.com</literal>, and allow any user whose system name ends with
+ <literal>@otherdomain.com</literal> to log in as <literal>guest</literal>.
</para>
<tip>
<para>
Keep in mind that by default, a regular expression can match just part of
- a string. It's usually wise to use <literal>^</> and <literal>$</>, as
+ a string. It's usually wise to use <literal>^</literal> and <literal>$</literal>, as
shown in the above example, to force the match to be to the entire
system user name.
</para>
@@ -821,28 +821,28 @@ mymap /^(.*)@otherdomain\.com$ guest
<systemitem>SIGHUP</systemitem><indexterm><primary>SIGHUP</primary></indexterm>
signal. If you edit the file on an
active system, you will need to signal the postmaster
- (using <literal>pg_ctl reload</> or <literal>kill -HUP</>) to make it
+ (using <literal>pg_ctl reload</literal> or <literal>kill -HUP</literal>) to make it
re-read the file.
</para>
<para>
A <filename>pg_ident.conf</filename> file that could be used in
- conjunction with the <filename>pg_hba.conf</> file in <xref
+ conjunction with the <filename>pg_hba.conf</filename> file in <xref
linkend="example-pg-hba.conf"> is shown in <xref
linkend="example-pg-ident.conf">. In this example, anyone
logged in to a machine on the 192.168 network that does not have the
- operating system user name <literal>bryanh</>, <literal>ann</>, or
- <literal>robert</> would not be granted access. Unix user
- <literal>robert</> would only be allowed access when he tries to
- connect as <productname>PostgreSQL</> user <literal>bob</>, not
- as <literal>robert</> or anyone else. <literal>ann</> would
- only be allowed to connect as <literal>ann</>. User
- <literal>bryanh</> would be allowed to connect as either
- <literal>bryanh</> or as <literal>guest1</>.
+ operating system user name <literal>bryanh</literal>, <literal>ann</literal>, or
+ <literal>robert</literal> would not be granted access. Unix user
+ <literal>robert</literal> would only be allowed access when he tries to
+ connect as <productname>PostgreSQL</productname> user <literal>bob</literal>, not
+ as <literal>robert</literal> or anyone else. <literal>ann</literal> would
+ only be allowed to connect as <literal>ann</literal>. User
+ <literal>bryanh</literal> would be allowed to connect as either
+ <literal>bryanh</literal> or as <literal>guest1</literal>.
</para>
<example id="example-pg-ident.conf">
- <title>An Example <filename>pg_ident.conf</> File</title>
+ <title>An Example <filename>pg_ident.conf</filename> File</title>
<programlisting>
# MAPNAME SYSTEM-USERNAME PG-USERNAME
@@ -866,21 +866,21 @@ omicron bryanh guest1
<title>Trust Authentication</title>
<para>
- When <literal>trust</> authentication is specified,
+ When <literal>trust</literal> authentication is specified,
<productname>PostgreSQL</productname> assumes that anyone who can
connect to the server is authorized to access the database with
whatever database user name they specify (even superuser names).
- Of course, restrictions made in the <literal>database</> and
- <literal>user</> columns still apply.
+ Of course, restrictions made in the <literal>database</literal> and
+ <literal>user</literal> columns still apply.
This method should only be used when there is adequate
operating-system-level protection on connections to the server.
</para>
<para>
- <literal>trust</> authentication is appropriate and very
+ <literal>trust</literal> authentication is appropriate and very
convenient for local connections on a single-user workstation. It
- is usually <emphasis>not</> appropriate by itself on a multiuser
- machine. However, you might be able to use <literal>trust</> even
+ is usually <emphasis>not</emphasis> appropriate by itself on a multiuser
+ machine. However, you might be able to use <literal>trust</literal> even
on a multiuser machine, if you restrict access to the server's
Unix-domain socket file using file-system permissions. To do this, set the
<varname>unix_socket_permissions</varname> (and possibly
@@ -895,17 +895,17 @@ omicron bryanh guest1
Setting file-system permissions only helps for Unix-socket connections.
Local TCP/IP connections are not restricted by file-system permissions.
Therefore, if you want to use file-system permissions for local security,
- remove the <literal>host ... 127.0.0.1 ...</> line from
- <filename>pg_hba.conf</>, or change it to a
- non-<literal>trust</> authentication method.
+ remove the <literal>host ... 127.0.0.1 ...</literal> line from
+ <filename>pg_hba.conf</filename>, or change it to a
+ non-<literal>trust</literal> authentication method.
</para>
<para>
- <literal>trust</> authentication is only suitable for TCP/IP connections
+ <literal>trust</literal> authentication is only suitable for TCP/IP connections
if you trust every user on every machine that is allowed to connect
- to the server by the <filename>pg_hba.conf</> lines that specify
- <literal>trust</>. It is seldom reasonable to use <literal>trust</>
- for any TCP/IP connections other than those from <systemitem>localhost</> (127.0.0.1).
+ to the server by the <filename>pg_hba.conf</filename> lines that specify
+ <literal>trust</literal>. It is seldom reasonable to use <literal>trust</literal>
+ for any TCP/IP connections other than those from <systemitem>localhost</systemitem> (127.0.0.1).
</para>
</sect2>
@@ -914,10 +914,10 @@ omicron bryanh guest1
<title>Password Authentication</title>
<indexterm>
- <primary>MD5</>
+ <primary>MD5</primary>
</indexterm>
<indexterm>
- <primary>SCRAM</>
+ <primary>SCRAM</primary>
</indexterm>
<indexterm>
<primary>password</primary>
@@ -936,7 +936,7 @@ omicron bryanh guest1
<term><literal>scram-sha-256</literal></term>
<listitem>
<para>
- The method <literal>scram-sha-256</> performs SCRAM-SHA-256
+ The method <literal>scram-sha-256</literal> performs SCRAM-SHA-256
authentication, as described in
<ulink url="https://tools.ietf.org/html/rfc7677">RFC 7677</ulink>. It
is a challenge-response scheme that prevents password sniffing on
@@ -955,7 +955,7 @@ omicron bryanh guest1
<term><literal>md5</literal></term>
<listitem>
<para>
- The method <literal>md5</> uses a custom less secure challenge-response
+ The method <literal>md5</literal> uses a custom less secure challenge-response
mechanism. It prevents password sniffing and avoids storing passwords
on the server in plain text but provides no protection if an attacker
manages to steal the password hash from the server. Also, the MD5 hash
@@ -982,10 +982,10 @@ omicron bryanh guest1
<term><literal>password</literal></term>
<listitem>
<para>
- The method <literal>password</> sends the password in clear-text and is
- therefore vulnerable to password <quote>sniffing</> attacks. It should
+ The method <literal>password</literal> sends the password in clear-text and is
+ therefore vulnerable to password <quote>sniffing</quote> attacks. It should
always be avoided if possible. If the connection is protected by SSL
- encryption then <literal>password</> can be used safely, though.
+ encryption then <literal>password</literal> can be used safely, though.
(Though SSL certificate authentication might be a better choice if one
is depending on using SSL).
</para>
@@ -996,7 +996,7 @@ omicron bryanh guest1
<para>
<productname>PostgreSQL</productname> database passwords are
separate from operating system user passwords. The password for
- each database user is stored in the <literal>pg_authid</> system
+ each database user is stored in the <literal>pg_authid</literal> system
catalog. Passwords can be managed with the SQL commands
<xref linkend="sql-createuser"> and
<xref linkend="sql-alterrole">,
@@ -1060,7 +1060,7 @@ omicron bryanh guest1
</para>
<para>
- GSSAPI support has to be enabled when <productname>PostgreSQL</> is built;
+ GSSAPI support has to be enabled when <productname>PostgreSQL</productname> is built;
see <xref linkend="installation"> for more information.
</para>
@@ -1068,13 +1068,13 @@ omicron bryanh guest1
When <productname>GSSAPI</productname> uses
<productname>Kerberos</productname>, it uses a standard principal
in the format
- <literal><replaceable>servicename</>/<replaceable>hostname</>@<replaceable>realm</></literal>.
+ <literal><replaceable>servicename</replaceable>/<replaceable>hostname</replaceable>@<replaceable>realm</replaceable></literal>.
The PostgreSQL server will accept any principal that is included in the keytab used by
the server, but care needs to be taken to specify the correct principal details when
- making the connection from the client using the <literal>krbsrvname</> connection parameter. (See
+ making the connection from the client using the <literal>krbsrvname</literal> connection parameter. (See
also <xref linkend="libpq-paramkeywords">.) The installation default can be
changed from the default <literal>postgres</literal> at build time using
- <literal>./configure --with-krb-srvnam=</><replaceable>whatever</>.
+ <literal>./configure --with-krb-srvnam=</literal><replaceable>whatever</replaceable>.
In most environments,
this parameter never needs to be changed.
Some Kerberos implementations might require a different service name,
@@ -1082,31 +1082,31 @@ omicron bryanh guest1
to be in upper case (<literal>POSTGRES</literal>).
</para>
<para>
- <replaceable>hostname</> is the fully qualified host name of the
+ <replaceable>hostname</replaceable> is the fully qualified host name of the
server machine. The service principal's realm is the preferred realm
of the server machine.
</para>
<para>
- Client principals can be mapped to different <productname>PostgreSQL</>
- database user names with <filename>pg_ident.conf</>. For example,
- <literal>pgusername@realm</> could be mapped to just <literal>pgusername</>.
- Alternatively, you can use the full <literal>username@realm</> principal as
- the role name in <productname>PostgreSQL</> without any mapping.
+ Client principals can be mapped to different <productname>PostgreSQL</productname>
+ database user names with <filename>pg_ident.conf</filename>. For example,
+ <literal>pgusername@realm</literal> could be mapped to just <literal>pgusername</literal>.
+ Alternatively, you can use the full <literal>username@realm</literal> principal as
+ the role name in <productname>PostgreSQL</productname> without any mapping.
</para>
<para>
- <productname>PostgreSQL</> also supports a parameter to strip the realm from
+ <productname>PostgreSQL</productname> also supports a parameter to strip the realm from
the principal. This method is supported for backwards compatibility and is
strongly discouraged as it is then impossible to distinguish different users
with the same user name but coming from different realms. To enable this,
- set <literal>include_realm</> to 0. For simple single-realm
+ set <literal>include_realm</literal> to 0. For simple single-realm
installations, doing that combined with setting the
- <literal>krb_realm</> parameter (which checks that the principal's realm
+ <literal>krb_realm</literal> parameter (which checks that the principal's realm
matches exactly what is in the <literal>krb_realm</literal> parameter)
is still secure; but this is a
less capable approach compared to specifying an explicit mapping in
- <filename>pg_ident.conf</>.
+ <filename>pg_ident.conf</filename>.
</para>
<para>
@@ -1116,8 +1116,8 @@ omicron bryanh guest1
of the key file is specified by the <xref
linkend="guc-krb-server-keyfile"> configuration
parameter. The default is
- <filename>/usr/local/pgsql/etc/krb5.keytab</> (or whatever
- directory was specified as <varname>sysconfdir</> at build time).
+ <filename>/usr/local/pgsql/etc/krb5.keytab</filename> (or whatever
+ directory was specified as <varname>sysconfdir</varname> at build time).
For security reasons, it is recommended to use a separate keytab
just for the <productname>PostgreSQL</productname> server rather
than opening up permissions on the system keytab file.
@@ -1127,17 +1127,17 @@ omicron bryanh guest1
Kerberos documentation for details. The following example is
for MIT-compatible Kerberos 5 implementations:
<screen>
-<prompt>kadmin% </><userinput>ank -randkey postgres/server.my.domain.org</>
-<prompt>kadmin% </><userinput>ktadd -k krb5.keytab postgres/server.my.domain.org</>
+<prompt>kadmin% </prompt><userinput>ank -randkey postgres/server.my.domain.org</userinput>
+<prompt>kadmin% </prompt><userinput>ktadd -k krb5.keytab postgres/server.my.domain.org</userinput>
</screen>
</para>
<para>
When connecting to the database make sure you have a ticket for a
principal matching the requested database user name. For example, for
- database user name <literal>fred</>, principal
- <literal>[email protected]</> would be able to connect. To also allow
- principal <literal>fred/[email protected]</>, use a user name
+ database user name <literal>fred</literal>, principal
+ <literal>[email protected]</literal> would be able to connect. To also allow
+ principal <literal>fred/[email protected]</literal>, use a user name
map, as described in <xref linkend="auth-username-maps">.
</para>
@@ -1155,8 +1155,8 @@ omicron bryanh guest1
in multi-realm environments unless <literal>krb_realm</literal> is
also used. It is recommended to
leave <literal>include_realm</literal> set to the default (1) and to
- provide an explicit mapping in <filename>pg_ident.conf</> to convert
- principal names to <productname>PostgreSQL</> user names.
+ provide an explicit mapping in <filename>pg_ident.conf</filename> to convert
+ principal names to <productname>PostgreSQL</productname> user names.
</para>
</listitem>
</varlistentry>
@@ -1236,8 +1236,8 @@ omicron bryanh guest1
in multi-realm environments unless <literal>krb_realm</literal> is
also used. It is recommended to
leave <literal>include_realm</literal> set to the default (1) and to
- provide an explicit mapping in <filename>pg_ident.conf</> to convert
- principal names to <productname>PostgreSQL</> user names.
+ provide an explicit mapping in <filename>pg_ident.conf</filename> to convert
+ principal names to <productname>PostgreSQL</productname> user names.
</para>
</listitem>
</varlistentry>
@@ -1270,9 +1270,9 @@ omicron bryanh guest1
By default, these two names are identical for new user accounts.
</para>
<para>
- Note that <application>libpq</> uses the SAM-compatible name if no
+ Note that <application>libpq</application> uses the SAM-compatible name if no
explicit user name is specified. If you use
- <application>libpq</> or a driver based on it, you should
+ <application>libpq</application> or a driver based on it, you should
leave this option disabled or explicitly specify user name in the
connection string.
</para>
@@ -1357,8 +1357,8 @@ omicron bryanh guest1
is to answer questions like <quote>What user initiated the
connection that goes out of your port <replaceable>X</replaceable>
and connects to my port <replaceable>Y</replaceable>?</quote>.
- Since <productname>PostgreSQL</> knows both <replaceable>X</> and
- <replaceable>Y</> when a physical connection is established, it
+ Since <productname>PostgreSQL</productname> knows both <replaceable>X</replaceable> and
+ <replaceable>Y</replaceable> when a physical connection is established, it
can interrogate the ident server on the host of the connecting
client and can theoretically determine the operating system user
for any given connection.
@@ -1386,9 +1386,9 @@ omicron bryanh guest1
<para>
Some ident servers have a nonstandard option that causes the returned
user name to be encrypted, using a key that only the originating
- machine's administrator knows. This option <emphasis>must not</> be
- used when using the ident server with <productname>PostgreSQL</>,
- since <productname>PostgreSQL</> does not have any way to decrypt the
+ machine's administrator knows. This option <emphasis>must not</emphasis> be
+ used when using the ident server with <productname>PostgreSQL</productname>,
+ since <productname>PostgreSQL</productname> does not have any way to decrypt the
returned string to determine the actual user name.
</para>
</sect2>
@@ -1424,11 +1424,11 @@ omicron bryanh guest1
<para>
Peer authentication is only available on operating systems providing
- the <function>getpeereid()</> function, the <symbol>SO_PEERCRED</symbol>
+ the <function>getpeereid()</function> function, the <symbol>SO_PEERCRED</symbol>
socket parameter, or similar mechanisms. Currently that includes
- <systemitem class="osname">Linux</>,
- most flavors of <systemitem class="osname">BSD</> including
- <systemitem class="osname">macOS</>,
+ <systemitem class="osname">Linux</systemitem>,
+ most flavors of <systemitem class="osname">BSD</systemitem> including
+ <systemitem class="osname">macOS</systemitem>,
and <systemitem class="osname">Solaris</systemitem>.
</para>
@@ -1454,23 +1454,23 @@ omicron bryanh guest1
LDAP authentication can operate in two modes. In the first mode,
which we will call the simple bind mode,
the server will bind to the distinguished name constructed as
- <replaceable>prefix</> <replaceable>username</> <replaceable>suffix</>.
- Typically, the <replaceable>prefix</> parameter is used to specify
- <literal>cn=</>, or <replaceable>DOMAIN</><literal>\</> in an Active
- Directory environment. <replaceable>suffix</> is used to specify the
+ <replaceable>prefix</replaceable> <replaceable>username</replaceable> <replaceable>suffix</replaceable>.
+ Typically, the <replaceable>prefix</replaceable> parameter is used to specify
+ <literal>cn=</literal>, or <replaceable>DOMAIN</replaceable><literal>\</literal> in an Active
+ Directory environment. <replaceable>suffix</replaceable> is used to specify the
remaining part of the DN in a non-Active Directory environment.
</para>
<para>
In the second mode, which we will call the search+bind mode,
the server first binds to the LDAP directory with
- a fixed user name and password, specified with <replaceable>ldapbinddn</>
- and <replaceable>ldapbindpasswd</>, and performs a search for the user trying
+ a fixed user name and password, specified with <replaceable>ldapbinddn</replaceable>
+ and <replaceable>ldapbindpasswd</replaceable>, and performs a search for the user trying
to log in to the database. If no user and password is configured, an
anonymous bind will be attempted to the directory. The search will be
- performed over the subtree at <replaceable>ldapbasedn</>, and will try to
+ performed over the subtree at <replaceable>ldapbasedn</replaceable>, and will try to
do an exact match of the attribute specified in
- <replaceable>ldapsearchattribute</>.
+ <replaceable>ldapsearchattribute</replaceable>.
Once the user has been found in
this search, the server disconnects and re-binds to the directory as
this user, using the password specified by the client, to verify that the
@@ -1572,7 +1572,7 @@ omicron bryanh guest1
<para>
Attribute to match against the user name in the search when doing
search+bind authentication. If no attribute is specified, the
- <literal>uid</> attribute will be used.
+ <literal>uid</literal> attribute will be used.
</para>
</listitem>
</varlistentry>
@@ -1719,11 +1719,11 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
When using RADIUS authentication, an Access Request message will be sent
to the configured RADIUS server. This request will be of type
<literal>Authenticate Only</literal>, and include parameters for
- <literal>user name</>, <literal>password</> (encrypted) and
- <literal>NAS Identifier</>. The request will be encrypted using
+ <literal>user name</literal>, <literal>password</literal> (encrypted) and
+ <literal>NAS Identifier</literal>. The request will be encrypted using
a secret shared with the server. The RADIUS server will respond to
- this server with either <literal>Access Accept</> or
- <literal>Access Reject</>. There is no support for RADIUS accounting.
+ this server with either <literal>Access Accept</literal> or
+ <literal>Access Reject</literal>. There is no support for RADIUS accounting.
</para>
<para>
@@ -1762,8 +1762,8 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
<note>
<para>
The encryption vector used will only be cryptographically
- strong if <productname>PostgreSQL</> is built with support for
- <productname>OpenSSL</>. In other cases, the transmission to the
+ strong if <productname>PostgreSQL</productname> is built with support for
+ <productname>OpenSSL</productname>. In other cases, the transmission to the
RADIUS server should only be considered obfuscated, not secured, and
external security measures should be applied if necessary.
</para>
@@ -1777,7 +1777,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
<listitem>
<para>
The port number on the RADIUS servers to connect to. If no port
- is specified, the default port <literal>1812</> will be used.
+ is specified, the default port <literal>1812</literal> will be used.
</para>
</listitem>
</varlistentry>
@@ -1786,12 +1786,12 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
<term><literal>radiusidentifiers</literal></term>
<listitem>
<para>
- The string used as <literal>NAS Identifier</> in the RADIUS
+ The string used as <literal>NAS Identifier</literal> in the RADIUS
requests. This parameter can be used as a second parameter
identifying for example which database user the user is attempting
to authenticate as, which can be used for policy matching on
the RADIUS server. If no identifier is specified, the default
- <literal>postgresql</> will be used.
+ <literal>postgresql</literal> will be used.
</para>
</listitem>
</varlistentry>
@@ -1836,11 +1836,11 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
</para>
<para>
- In a <filename>pg_hba.conf</> record specifying certificate
- authentication, the authentication option <literal>clientcert</> is
- assumed to be <literal>1</>, and it cannot be turned off since a client
- certificate is necessary for this method. What the <literal>cert</>
- method adds to the basic <literal>clientcert</> certificate validity test
+ In a <filename>pg_hba.conf</filename> record specifying certificate
+ authentication, the authentication option <literal>clientcert</literal> is
+ assumed to be <literal>1</literal>, and it cannot be turned off since a client
+ certificate is necessary for this method. What the <literal>cert</literal>
+ method adds to the basic <literal>clientcert</literal> certificate validity test
is a check that the <literal>cn</literal> attribute matches the database
user name.
</para>
@@ -1863,7 +1863,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
exist in the database before PAM can be used for authentication. For more
information about PAM, please read the
<ulink url="http://www.kernel.org/pub/linux/libs/pam/">
- <productname>Linux-PAM</> Page</ulink>.
+ <productname>Linux-PAM</productname> Page</ulink>.
</para>
<para>
@@ -1896,7 +1896,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
<note>
<para>
- If PAM is set up to read <filename>/etc/shadow</>, authentication
+ If PAM is set up to read <filename>/etc/shadow</filename>, authentication
will fail because the PostgreSQL server is started by a non-root
user. However, this is not an issue when PAM is configured to use
LDAP or other authentication methods.
@@ -1922,11 +1922,11 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
</para>
<para>
- BSD Authentication in <productname>PostgreSQL</> uses
+ BSD Authentication in <productname>PostgreSQL</productname> uses
the <literal>auth-postgresql</literal> login type and authenticates with
the <literal>postgresql</literal> login class if that's defined
in <filename>login.conf</filename>. By default that login class does not
- exist, and <productname>PostgreSQL</> will use the default login class.
+ exist, and <productname>PostgreSQL</productname> will use the default login class.
</para>
<note>
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index b012a26991..aeda826d87 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -70,9 +70,9 @@
(typically eight kilobytes), milliseconds, seconds, or minutes.
An unadorned numeric value for one of these settings will use the
setting's default unit, which can be learned from
- <structname>pg_settings</>.<structfield>unit</>.
+ <structname>pg_settings</structname>.<structfield>unit</structfield>.
For convenience, settings can be given with a unit specified explicitly,
- for example <literal>'120 ms'</> for a time value, and they will be
+ for example <literal>'120 ms'</literal> for a time value, and they will be
converted to whatever the parameter's actual unit is. Note that the
value must be written as a string (with quotes) to use this feature.
The unit name is case-sensitive, and there can be whitespace between
@@ -105,7 +105,7 @@
Enumerated-type parameters are written in the same way as string
parameters, but are restricted to have one of a limited set of
values. The values allowable for such a parameter can be found from
- <structname>pg_settings</>.<structfield>enumvals</>.
+ <structname>pg_settings</structname>.<structfield>enumvals</structfield>.
Enum parameter values are case-insensitive.
</para>
</listitem>
@@ -117,7 +117,7 @@
<para>
The most fundamental way to set these parameters is to edit the file
- <filename>postgresql.conf</><indexterm><primary>postgresql.conf</></>,
+ <filename>postgresql.conf</filename><indexterm><primary>postgresql.conf</primary></indexterm>,
which is normally kept in the data directory. A default copy is
installed when the database cluster directory is initialized.
An example of what this file might look like is:
@@ -150,8 +150,8 @@ shared_buffers = 128MB
<primary>SIGHUP</primary>
</indexterm>
The configuration file is reread whenever the main server process
- receives a <systemitem>SIGHUP</> signal; this signal is most easily
- sent by running <literal>pg_ctl reload</> from the command line or by
+ receives a <systemitem>SIGHUP</systemitem> signal; this signal is most easily
+ sent by running <literal>pg_ctl reload</literal> from the command line or by
calling the SQL function <function>pg_reload_conf()</function>. The main
server process also propagates this signal to all currently running
server processes, so that existing sessions also adopt the new values
@@ -161,26 +161,26 @@ shared_buffers = 128MB
can only be set at server start; any changes to their entries in the
configuration file will be ignored until the server is restarted.
Invalid parameter settings in the configuration file are likewise
- ignored (but logged) during <systemitem>SIGHUP</> processing.
+ ignored (but logged) during <systemitem>SIGHUP</systemitem> processing.
</para>
<para>
- In addition to <filename>postgresql.conf</>,
+ In addition to <filename>postgresql.conf</filename>,
a <productname>PostgreSQL</productname> data directory contains a file
- <filename>postgresql.auto.conf</><indexterm><primary>postgresql.auto.conf</></>,
- which has the same format as <filename>postgresql.conf</> but should
+ <filename>postgresql.auto.conf</filename><indexterm><primary>postgresql.auto.conf</primary></indexterm>,
+ which has the same format as <filename>postgresql.conf</filename> but should
never be edited manually. This file holds settings provided through
the <xref linkend="SQL-ALTERSYSTEM"> command. This file is automatically
- read whenever <filename>postgresql.conf</> is, and its settings take
- effect in the same way. Settings in <filename>postgresql.auto.conf</>
- override those in <filename>postgresql.conf</>.
+ read whenever <filename>postgresql.conf</filename> is, and its settings take
+ effect in the same way. Settings in <filename>postgresql.auto.conf</filename>
+ override those in <filename>postgresql.conf</filename>.
</para>
<para>
The system view
<link linkend="view-pg-file-settings"><structname>pg_file_settings</structname></link>
can be helpful for pre-testing changes to the configuration file, or for
- diagnosing problems if a <systemitem>SIGHUP</> signal did not have the
+ diagnosing problems if a <systemitem>SIGHUP</systemitem> signal did not have the
desired effects.
</para>
</sect2>
@@ -193,7 +193,7 @@ shared_buffers = 128MB
commands to establish configuration defaults.
The already-mentioned <xref linkend="SQL-ALTERSYSTEM"> command
provides a SQL-accessible means of changing global defaults; it is
- functionally equivalent to editing <filename>postgresql.conf</>.
+ functionally equivalent to editing <filename>postgresql.conf</filename>.
In addition, there are two commands that allow setting of defaults
on a per-database or per-role basis:
</para>
@@ -215,7 +215,7 @@ shared_buffers = 128MB
</itemizedlist>
<para>
- Values set with <command>ALTER DATABASE</> and <command>ALTER ROLE</>
+ Values set with <command>ALTER DATABASE</command> and <command>ALTER ROLE</command>
are applied only when starting a fresh database session. They
override values obtained from the configuration files or server
command line, and constitute defaults for the rest of the session.
@@ -224,7 +224,7 @@ shared_buffers = 128MB
</para>
<para>
- Once a client is connected to the database, <productname>PostgreSQL</>
+ Once a client is connected to the database, <productname>PostgreSQL</productname>
provides two additional SQL commands (and equivalent functions) to
interact with session-local configuration settings:
</para>
@@ -251,14 +251,14 @@ shared_buffers = 128MB
<para>
In addition, the system view <link
- linkend="view-pg-settings"><structname>pg_settings</></> can be
+ linkend="view-pg-settings"><structname>pg_settings</structname></link> can be
used to view and change session-local values:
</para>
<itemizedlist>
<listitem>
<para>
- Querying this view is similar to using <command>SHOW ALL</> but
+ Querying this view is similar to using <command>SHOW ALL</command> but
provides more detail. It is also more flexible, since it's possible
to specify filter conditions or join against other relations.
</para>
@@ -267,8 +267,8 @@ shared_buffers = 128MB
<listitem>
<para>
Using <xref linkend="SQL-UPDATE"> on this view, specifically
- updating the <structname>setting</> column, is the equivalent
- of issuing <command>SET</> commands. For example, the equivalent of
+ updating the <structname>setting</structname> column, is the equivalent
+ of issuing <command>SET</command> commands. For example, the equivalent of
<programlisting>
SET configuration_parameter TO DEFAULT;
</programlisting>
@@ -289,7 +289,7 @@ UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter
In addition to setting global defaults or attaching
overrides at the database or role level, you can pass settings to
<productname>PostgreSQL</productname> via shell facilities.
- Both the server and <application>libpq</> client library
+ Both the server and <application>libpq</application> client library
accept parameter values via the shell.
</para>
@@ -298,26 +298,26 @@ UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter
<para>
During server startup, parameter settings can be
passed to the <command>postgres</command> command via the
- <option>-c</> command-line parameter. For example,
+ <option>-c</option> command-line parameter. For example,
<programlisting>
postgres -c log_connections=yes -c log_destination='syslog'
</programlisting>
Settings provided in this way override those set via
- <filename>postgresql.conf</> or <command>ALTER SYSTEM</>,
+ <filename>postgresql.conf</filename> or <command>ALTER SYSTEM</command>,
so they cannot be changed globally without restarting the server.
</para>
</listitem>
<listitem>
<para>
- When starting a client session via <application>libpq</>,
+ When starting a client session via <application>libpq</application>,
parameter settings can be
specified using the <envar>PGOPTIONS</envar> environment variable.
Settings established in this way constitute defaults for the life
of the session, but do not affect other sessions.
For historical reasons, the format of <envar>PGOPTIONS</envar> is
similar to that used when launching the <command>postgres</command>
- command; specifically, the <option>-c</> flag must be specified.
+ command; specifically, the <option>-c</option> flag must be specified.
For example,
<programlisting>
env PGOPTIONS="-c geqo=off -c statement_timeout=5min" psql
@@ -338,20 +338,20 @@ env PGOPTIONS="-c geqo=off -c statement_timeout=5min" psql
<title>Managing Configuration File Contents</title>
<para>
- <productname>PostgreSQL</> provides several features for breaking
- down complex <filename>postgresql.conf</> files into sub-files.
+ <productname>PostgreSQL</productname> provides several features for breaking
+ down complex <filename>postgresql.conf</filename> files into sub-files.
These features are especially useful when managing multiple servers
with related, but not identical, configurations.
</para>
<para>
<indexterm>
- <primary><literal>include</></primary>
+ <primary><literal>include</literal></primary>
<secondary>in configuration file</secondary>
</indexterm>
In addition to individual parameter settings,
- the <filename>postgresql.conf</> file can contain <firstterm>include
- directives</>, which specify another file to read and process as if
+ the <filename>postgresql.conf</filename> file can contain <firstterm>include
+ directives</firstterm>, which specify another file to read and process as if
it were inserted into the configuration file at this point. This
feature allows a configuration file to be divided into physically
separate parts. Include directives simply look like:
@@ -365,23 +365,23 @@ include 'filename'
<para>
<indexterm>
- <primary><literal>include_if_exists</></primary>
+ <primary><literal>include_if_exists</literal></primary>
<secondary>in configuration file</secondary>
</indexterm>
- There is also an <literal>include_if_exists</> directive, which acts
- the same as the <literal>include</> directive, except
+ There is also an <literal>include_if_exists</literal> directive, which acts
+ the same as the <literal>include</literal> directive, except
when the referenced file does not exist or cannot be read. A regular
- <literal>include</> will consider this an error condition, but
- <literal>include_if_exists</> merely logs a message and continues
+ <literal>include</literal> will consider this an error condition, but
+ <literal>include_if_exists</literal> merely logs a message and continues
processing the referencing configuration file.
</para>
<para>
<indexterm>
- <primary><literal>include_dir</></primary>
+ <primary><literal>include_dir</literal></primary>
<secondary>in configuration file</secondary>
</indexterm>
- The <filename>postgresql.conf</> file can also contain
+ The <filename>postgresql.conf</filename> file can also contain
<literal>include_dir</literal> directives, which specify an entire
directory of configuration files to include. These look like
<programlisting>
@@ -401,36 +401,36 @@ include_dir 'directory'
<para>
Include files or directories can be used to logically separate portions
of the database configuration, rather than having a single large
- <filename>postgresql.conf</> file. Consider a company that has two
+ <filename>postgresql.conf</filename> file. Consider a company that has two
database servers, each with a different amount of memory. There are
likely elements of the configuration both will share, for things such
as logging. But memory-related parameters on the server will vary
between the two. And there might be server specific customizations,
too. One way to manage this situation is to break the custom
configuration changes for your site into three files. You could add
- this to the end of your <filename>postgresql.conf</> file to include
+ this to the end of your <filename>postgresql.conf</filename> file to include
them:
<programlisting>
include 'shared.conf'
include 'memory.conf'
include 'server.conf'
</programlisting>
- All systems would have the same <filename>shared.conf</>. Each
+ All systems would have the same <filename>shared.conf</filename>. Each
server with a particular amount of memory could share the
- same <filename>memory.conf</>; you might have one for all servers
+ same <filename>memory.conf</filename>; you might have one for all servers
with 8GB of RAM, another for those having 16GB. And
- finally <filename>server.conf</> could have truly server-specific
+ finally <filename>server.conf</filename> could have truly server-specific
configuration information in it.
</para>
<para>
Another possibility is to create a configuration file directory and
- put this information into files there. For example, a <filename>conf.d</>
- directory could be referenced at the end of <filename>postgresql.conf</>:
+ put this information into files there. For example, a <filename>conf.d</filename>
+ directory could be referenced at the end of <filename>postgresql.conf</filename>:
<programlisting>
include_dir 'conf.d'
</programlisting>
- Then you could name the files in the <filename>conf.d</> directory
+ Then you could name the files in the <filename>conf.d</filename> directory
like this:
<programlisting>
00shared.conf
@@ -441,8 +441,8 @@ include_dir 'conf.d'
files will be loaded. This is important because only the last
setting encountered for a particular parameter while the server is
reading configuration files will be used. In this example,
- something set in <filename>conf.d/02server.conf</> would override a
- value set in <filename>conf.d/01memory.conf</>.
+ something set in <filename>conf.d/02server.conf</filename> would override a
+ value set in <filename>conf.d/01memory.conf</filename>.
</para>
<para>
@@ -483,7 +483,7 @@ include_dir 'conf.d'
<varlistentry id="guc-data-directory" xreflabel="data_directory">
<term><varname>data_directory</varname> (<type>string</type>)
<indexterm>
- <primary><varname>data_directory</> configuration parameter</primary>
+ <primary><varname>data_directory</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -497,13 +497,13 @@ include_dir 'conf.d'
<varlistentry id="guc-config-file" xreflabel="config_file">
<term><varname>config_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>config_file</> configuration parameter</primary>
+ <primary><varname>config_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the main server configuration file
- (customarily called <filename>postgresql.conf</>).
+ (customarily called <filename>postgresql.conf</filename>).
This parameter can only be set on the <command>postgres</command> command line.
</para>
</listitem>
@@ -512,13 +512,13 @@ include_dir 'conf.d'
<varlistentry id="guc-hba-file" xreflabel="hba_file">
<term><varname>hba_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>hba_file</> configuration parameter</primary>
+ <primary><varname>hba_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the configuration file for host-based authentication
- (customarily called <filename>pg_hba.conf</>).
+ (customarily called <filename>pg_hba.conf</filename>).
This parameter can only be set at server start.
</para>
</listitem>
@@ -527,13 +527,13 @@ include_dir 'conf.d'
<varlistentry id="guc-ident-file" xreflabel="ident_file">
<term><varname>ident_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>ident_file</> configuration parameter</primary>
+ <primary><varname>ident_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the configuration file for user name mapping
- (customarily called <filename>pg_ident.conf</>).
+ (customarily called <filename>pg_ident.conf</filename>).
This parameter can only be set at server start.
See also <xref linkend="auth-username-maps">.
</para>
@@ -543,7 +543,7 @@ include_dir 'conf.d'
<varlistentry id="guc-external-pid-file" xreflabel="external_pid_file">
<term><varname>external_pid_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>external_pid_file</> configuration parameter</primary>
+ <primary><varname>external_pid_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -569,10 +569,10 @@ include_dir 'conf.d'
data directory, the <command>postgres</command> <option>-D</option>
command-line option or <envar>PGDATA</envar> environment variable
must point to the directory containing the configuration files,
- and the <varname>data_directory</> parameter must be set in
+ and the <varname>data_directory</varname> parameter must be set in
<filename>postgresql.conf</filename> (or on the command line) to show
where the data directory is actually located. Notice that
- <varname>data_directory</> overrides <option>-D</option> and
+ <varname>data_directory</varname> overrides <option>-D</option> and
<envar>PGDATA</envar> for the location
of the data directory, but not for the location of the configuration
files.
@@ -580,12 +580,12 @@ include_dir 'conf.d'
<para>
If you wish, you can specify the configuration file names and locations
- individually using the parameters <varname>config_file</>,
- <varname>hba_file</> and/or <varname>ident_file</>.
- <varname>config_file</> can only be specified on the
+ individually using the parameters <varname>config_file</varname>,
+ <varname>hba_file</varname> and/or <varname>ident_file</varname>.
+ <varname>config_file</varname> can only be specified on the
<command>postgres</command> command line, but the others can be
set within the main configuration file. If all three parameters plus
- <varname>data_directory</> are explicitly set, then it is not necessary
+ <varname>data_directory</varname> are explicitly set, then it is not necessary
to specify <option>-D</option> or <envar>PGDATA</envar>.
</para>
@@ -607,7 +607,7 @@ include_dir 'conf.d'
<varlistentry id="guc-listen-addresses" xreflabel="listen_addresses">
<term><varname>listen_addresses</varname> (<type>string</type>)
<indexterm>
- <primary><varname>listen_addresses</> configuration parameter</primary>
+ <primary><varname>listen_addresses</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -615,15 +615,15 @@ include_dir 'conf.d'
Specifies the TCP/IP address(es) on which the server is
to listen for connections from client applications.
The value takes the form of a comma-separated list of host names
- and/or numeric IP addresses. The special entry <literal>*</>
+ and/or numeric IP addresses. The special entry <literal>*</literal>
corresponds to all available IP interfaces. The entry
- <literal>0.0.0.0</> allows listening for all IPv4 addresses and
- <literal>::</> allows listening for all IPv6 addresses.
+ <literal>0.0.0.0</literal> allows listening for all IPv4 addresses and
+ <literal>::</literal> allows listening for all IPv6 addresses.
If the list is empty, the server does not listen on any IP interface
at all, in which case only Unix-domain sockets can be used to connect
to it.
- The default value is <systemitem class="systemname">localhost</>,
- which allows only local TCP/IP <quote>loopback</> connections to be
+ The default value is <systemitem class="systemname">localhost</systemitem>,
+ which allows only local TCP/IP <quote>loopback</quote> connections to be
made. While client authentication (<xref
linkend="client-authentication">) allows fine-grained control
over who can access the server, <varname>listen_addresses</varname>
@@ -638,7 +638,7 @@ include_dir 'conf.d'
<varlistentry id="guc-port" xreflabel="port">
<term><varname>port</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>port</> configuration parameter</primary>
+ <primary><varname>port</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -653,7 +653,7 @@ include_dir 'conf.d'
<varlistentry id="guc-max-connections" xreflabel="max_connections">
<term><varname>max_connections</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_connections</> configuration parameter</primary>
+ <primary><varname>max_connections</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -661,7 +661,7 @@ include_dir 'conf.d'
Determines the maximum number of concurrent connections to the
database server. The default is typically 100 connections, but
might be less if your kernel settings will not support it (as
- determined during <application>initdb</>). This parameter can
+ determined during <application>initdb</application>). This parameter can
only be set at server start.
</para>
@@ -678,17 +678,17 @@ include_dir 'conf.d'
<term><varname>superuser_reserved_connections</varname>
(<type>integer</type>)
<indexterm>
- <primary><varname>superuser_reserved_connections</> configuration parameter</primary>
+ <primary><varname>superuser_reserved_connections</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Determines the number of connection <quote>slots</quote> that
- are reserved for connections by <productname>PostgreSQL</>
+ are reserved for connections by <productname>PostgreSQL</productname>
superusers. At most <xref linkend="guc-max-connections">
connections can ever be active simultaneously. Whenever the
number of active concurrent connections is at least
- <varname>max_connections</> minus
+ <varname>max_connections</varname> minus
<varname>superuser_reserved_connections</varname>, new
connections will be accepted only for superusers, and no
new replication connections will be accepted.
@@ -705,7 +705,7 @@ include_dir 'conf.d'
<varlistentry id="guc-unix-socket-directories" xreflabel="unix_socket_directories">
<term><varname>unix_socket_directories</varname> (<type>string</type>)
<indexterm>
- <primary><varname>unix_socket_directories</> configuration parameter</primary>
+ <primary><varname>unix_socket_directories</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -726,10 +726,10 @@ include_dir 'conf.d'
<para>
In addition to the socket file itself, which is named
- <literal>.s.PGSQL.<replaceable>nnnn</></literal> where
- <replaceable>nnnn</> is the server's port number, an ordinary file
- named <literal>.s.PGSQL.<replaceable>nnnn</>.lock</literal> will be
- created in each of the <varname>unix_socket_directories</> directories.
+ <literal>.s.PGSQL.<replaceable>nnnn</replaceable></literal> where
+ <replaceable>nnnn</replaceable> is the server's port number, an ordinary file
+ named <literal>.s.PGSQL.<replaceable>nnnn</replaceable>.lock</literal> will be
+ created in each of the <varname>unix_socket_directories</varname> directories.
Neither file should ever be removed manually.
</para>
@@ -743,7 +743,7 @@ include_dir 'conf.d'
<varlistentry id="guc-unix-socket-group" xreflabel="unix_socket_group">
<term><varname>unix_socket_group</varname> (<type>string</type>)
<indexterm>
- <primary><varname>unix_socket_group</> configuration parameter</primary>
+ <primary><varname>unix_socket_group</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -768,7 +768,7 @@ include_dir 'conf.d'
<varlistentry id="guc-unix-socket-permissions" xreflabel="unix_socket_permissions">
<term><varname>unix_socket_permissions</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>unix_socket_permissions</> configuration parameter</primary>
+ <primary><varname>unix_socket_permissions</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -804,7 +804,7 @@ include_dir 'conf.d'
<para>
This parameter is irrelevant on systems, notably Solaris as of Solaris
10, that ignore socket permissions entirely. There, one can achieve a
- similar effect by pointing <varname>unix_socket_directories</> to a
+ similar effect by pointing <varname>unix_socket_directories</varname> to a
directory having search permission limited to the desired audience.
This parameter is also irrelevant on Windows, which does not have
Unix-domain sockets.
@@ -815,7 +815,7 @@ include_dir 'conf.d'
<varlistentry id="guc-bonjour" xreflabel="bonjour">
<term><varname>bonjour</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>bonjour</> configuration parameter</primary>
+ <primary><varname>bonjour</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -830,14 +830,14 @@ include_dir 'conf.d'
<varlistentry id="guc-bonjour-name" xreflabel="bonjour_name">
<term><varname>bonjour_name</varname> (<type>string</type>)
<indexterm>
- <primary><varname>bonjour_name</> configuration parameter</primary>
+ <primary><varname>bonjour_name</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the <productname>Bonjour</productname> service
name. The computer name is used if this parameter is set to the
- empty string <literal>''</> (which is the default). This parameter is
+ empty string <literal>''</literal> (which is the default). This parameter is
ignored if the server was not compiled with
<productname>Bonjour</productname> support.
This parameter can only be set at server start.
@@ -848,7 +848,7 @@ include_dir 'conf.d'
<varlistentry id="guc-tcp-keepalives-idle" xreflabel="tcp_keepalives_idle">
<term><varname>tcp_keepalives_idle</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>tcp_keepalives_idle</> configuration parameter</primary>
+ <primary><varname>tcp_keepalives_idle</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -857,7 +857,7 @@ include_dir 'conf.d'
should send a keepalive message to the client. A value of 0 uses
the system default.
This parameter is supported only on systems that support
- <symbol>TCP_KEEPIDLE</> or an equivalent socket option, and on
+ <symbol>TCP_KEEPIDLE</symbol> or an equivalent socket option, and on
Windows; on other systems, it must be zero.
In sessions connected via a Unix-domain socket, this parameter is
ignored and always reads as zero.
@@ -874,7 +874,7 @@ include_dir 'conf.d'
<varlistentry id="guc-tcp-keepalives-interval" xreflabel="tcp_keepalives_interval">
<term><varname>tcp_keepalives_interval</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>tcp_keepalives_interval</> configuration parameter</primary>
+ <primary><varname>tcp_keepalives_interval</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -883,7 +883,7 @@ include_dir 'conf.d'
that is not acknowledged by the client should be retransmitted.
A value of 0 uses the system default.
This parameter is supported only on systems that support
- <symbol>TCP_KEEPINTVL</> or an equivalent socket option, and on
+ <symbol>TCP_KEEPINTVL</symbol> or an equivalent socket option, and on
Windows; on other systems, it must be zero.
In sessions connected via a Unix-domain socket, this parameter is
ignored and always reads as zero.
@@ -900,7 +900,7 @@ include_dir 'conf.d'
<varlistentry id="guc-tcp-keepalives-count" xreflabel="tcp_keepalives_count">
<term><varname>tcp_keepalives_count</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>tcp_keepalives_count</> configuration parameter</primary>
+ <primary><varname>tcp_keepalives_count</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -909,7 +909,7 @@ include_dir 'conf.d'
the server's connection to the client is considered dead.
A value of 0 uses the system default.
This parameter is supported only on systems that support
- <symbol>TCP_KEEPCNT</> or an equivalent socket option;
+ <symbol>TCP_KEEPCNT</symbol> or an equivalent socket option;
on other systems, it must be zero.
In sessions connected via a Unix-domain socket, this parameter is
ignored and always reads as zero.
@@ -930,10 +930,10 @@ include_dir 'conf.d'
<variablelist>
<varlistentry id="guc-authentication-timeout" xreflabel="authentication_timeout">
<term><varname>authentication_timeout</varname> (<type>integer</type>)
- <indexterm><primary>timeout</><secondary>client authentication</></indexterm>
- <indexterm><primary>client authentication</><secondary>timeout during</></indexterm>
+ <indexterm><primary>timeout</primary><secondary>client authentication</secondary></indexterm>
+ <indexterm><primary>client authentication</primary><secondary>timeout during</secondary></indexterm>
<indexterm>
- <primary><varname>authentication_timeout</> configuration parameter</primary>
+ <primary><varname>authentication_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
@@ -943,8 +943,8 @@ include_dir 'conf.d'
would-be client has not completed the authentication protocol in
this much time, the server closes the connection. This prevents
hung clients from occupying a connection indefinitely.
- The default is one minute (<literal>1m</>).
- This parameter can only be set in the <filename>postgresql.conf</>
+ The default is one minute (<literal>1m</literal>).
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -953,16 +953,16 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl" xreflabel="ssl">
<term><varname>ssl</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>ssl</> configuration parameter</primary>
+ <primary><varname>ssl</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Enables <acronym>SSL</> connections. Please read
+ Enables <acronym>SSL</acronym> connections. Please read
<xref linkend="ssl-tcp"> before using this.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
- The default is <literal>off</>.
+ The default is <literal>off</literal>.
</para>
</listitem>
</varlistentry>
@@ -970,7 +970,7 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl-ca-file" xreflabel="ssl_ca_file">
<term><varname>ssl_ca_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>ssl_ca_file</> configuration parameter</primary>
+ <primary><varname>ssl_ca_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -978,7 +978,7 @@ include_dir 'conf.d'
Specifies the name of the file containing the SSL server certificate
authority (CA).
Relative paths are relative to the data directory.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
The default is empty, meaning no CA file is loaded,
and client certificate verification is not performed.
@@ -989,14 +989,14 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl-cert-file" xreflabel="ssl_cert_file">
<term><varname>ssl_cert_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>ssl_cert_file</> configuration parameter</primary>
+ <primary><varname>ssl_cert_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the name of the file containing the SSL server certificate.
Relative paths are relative to the data directory.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
The default is <filename>server.crt</filename>.
</para>
@@ -1006,7 +1006,7 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl-crl-file" xreflabel="ssl_crl_file">
<term><varname>ssl_crl_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>ssl_crl_file</> configuration parameter</primary>
+ <primary><varname>ssl_crl_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1014,7 +1014,7 @@ include_dir 'conf.d'
Specifies the name of the file containing the SSL server certificate
revocation list (CRL).
Relative paths are relative to the data directory.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
The default is empty, meaning no CRL file is loaded.
</para>
@@ -1024,14 +1024,14 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl-key-file" xreflabel="ssl_key_file">
<term><varname>ssl_key_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>ssl_key_file</> configuration parameter</primary>
+ <primary><varname>ssl_key_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the name of the file containing the SSL server private key.
Relative paths are relative to the data directory.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
The default is <filename>server.key</filename>.
</para>
@@ -1041,19 +1041,19 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl-ciphers" xreflabel="ssl_ciphers">
<term><varname>ssl_ciphers</varname> (<type>string</type>)
<indexterm>
- <primary><varname>ssl_ciphers</> configuration parameter</primary>
+ <primary><varname>ssl_ciphers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Specifies a list of <acronym>SSL</> cipher suites that are allowed to be
+ Specifies a list of <acronym>SSL</acronym> cipher suites that are allowed to be
used on secure connections. See
- the <citerefentry><refentrytitle>ciphers</></citerefentry> manual page
- in the <application>OpenSSL</> package for the syntax of this setting
+ the <citerefentry><refentrytitle>ciphers</refentrytitle></citerefentry> manual page
+ in the <application>OpenSSL</application> package for the syntax of this setting
and a list of supported values.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
- The default value is <literal>HIGH:MEDIUM:+3DES:!aNULL</>. The
+ The default value is <literal>HIGH:MEDIUM:+3DES:!aNULL</literal>. The
default is usually a reasonable choice unless you have specific
security requirements.
</para>
@@ -1065,7 +1065,7 @@ include_dir 'conf.d'
<term><literal>HIGH</literal></term>
<listitem>
<para>
- Cipher suites that use ciphers from <literal>HIGH</> group (e.g.,
+ Cipher suites that use ciphers from <literal>HIGH</literal> group (e.g.,
AES, Camellia, 3DES)
</para>
</listitem>
@@ -1075,7 +1075,7 @@ include_dir 'conf.d'
<term><literal>MEDIUM</literal></term>
<listitem>
<para>
- Cipher suites that use ciphers from <literal>MEDIUM</> group
+ Cipher suites that use ciphers from <literal>MEDIUM</literal> group
(e.g., RC4, SEED)
</para>
</listitem>
@@ -1085,11 +1085,11 @@ include_dir 'conf.d'
<term><literal>+3DES</literal></term>
<listitem>
<para>
- The OpenSSL default order for <literal>HIGH</> is problematic
+ The OpenSSL default order for <literal>HIGH</literal> is problematic
because it orders 3DES higher than AES128. This is wrong because
3DES offers less security than AES128, and it is also much
- slower. <literal>+3DES</> reorders it after all other
- <literal>HIGH</> and <literal>MEDIUM</> ciphers.
+ slower. <literal>+3DES</literal> reorders it after all other
+ <literal>HIGH</literal> and <literal>MEDIUM</literal> ciphers.
</para>
</listitem>
</varlistentry>
@@ -1111,7 +1111,7 @@ include_dir 'conf.d'
Available cipher suite details will vary across OpenSSL versions. Use
the command
<literal>openssl ciphers -v 'HIGH:MEDIUM:+3DES:!aNULL'</literal> to
- see actual details for the currently installed <application>OpenSSL</>
+ see actual details for the currently installed <application>OpenSSL</application>
version. Note that this list is filtered at run time based on the
server key type.
</para>
@@ -1121,16 +1121,16 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl-prefer-server-ciphers" xreflabel="ssl_prefer_server_ciphers">
<term><varname>ssl_prefer_server_ciphers</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>ssl_prefer_server_ciphers</> configuration parameter</primary>
+ <primary><varname>ssl_prefer_server_ciphers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies whether to use the server's SSL cipher preferences, rather
than the client's.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
- The default is <literal>true</>.
+ The default is <literal>true</literal>.
</para>
<para>
@@ -1146,28 +1146,28 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl-ecdh-curve" xreflabel="ssl_ecdh_curve">
<term><varname>ssl_ecdh_curve</varname> (<type>string</type>)
<indexterm>
- <primary><varname>ssl_ecdh_curve</> configuration parameter</primary>
+ <primary><varname>ssl_ecdh_curve</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Specifies the name of the curve to use in <acronym>ECDH</> key
+ Specifies the name of the curve to use in <acronym>ECDH</acronym> key
exchange. It needs to be supported by all clients that connect.
It does not need to be the same curve used by the server's Elliptic
Curve key.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
- The default is <literal>prime256v1</>.
+ The default is <literal>prime256v1</literal>.
</para>
<para>
OpenSSL names for the most common curves are:
- <literal>prime256v1</> (NIST P-256),
- <literal>secp384r1</> (NIST P-384),
- <literal>secp521r1</> (NIST P-521).
+ <literal>prime256v1</literal> (NIST P-256),
+ <literal>secp384r1</literal> (NIST P-384),
+ <literal>secp521r1</literal> (NIST P-521).
The full list of available curves can be shown with the command
<command>openssl ecparam -list_curves</command>. Not all of them
- are usable in <acronym>TLS</> though.
+ are usable in <acronym>TLS</acronym> though.
</para>
</listitem>
</varlistentry>
@@ -1175,17 +1175,17 @@ include_dir 'conf.d'
<varlistentry id="guc-password-encryption" xreflabel="password_encryption">
<term><varname>password_encryption</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>password_encryption</> configuration parameter</primary>
+ <primary><varname>password_encryption</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
When a password is specified in <xref linkend="sql-createrole"> or
<xref linkend="sql-alterrole">, this parameter determines the algorithm
- to use to encrypt the password. The default value is <literal>md5</>,
- which stores the password as an MD5 hash (<literal>on</> is also
- accepted, as alias for <literal>md5</>). Setting this parameter to
- <literal>scram-sha-256</> will encrypt the password with SCRAM-SHA-256.
+ to use to encrypt the password. The default value is <literal>md5</literal>,
+ which stores the password as an MD5 hash (<literal>on</literal> is also
+ accepted, as alias for <literal>md5</literal>). Setting this parameter to
+ <literal>scram-sha-256</literal> will encrypt the password with SCRAM-SHA-256.
</para>
<para>
Note that older clients might lack support for the SCRAM authentication
@@ -1198,7 +1198,7 @@ include_dir 'conf.d'
<varlistentry id="guc-ssl-dh-params-file" xreflabel="ssl_dh_params_file">
<term><varname>ssl_dh_params_file</varname> (<type>string</type>)
<indexterm>
- <primary><varname>ssl_dh_params_file</> configuration parameter</primary>
+ <primary><varname>ssl_dh_params_file</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1213,7 +1213,7 @@ include_dir 'conf.d'
</para>
<para>
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -1222,7 +1222,7 @@ include_dir 'conf.d'
<varlistentry id="guc-krb-server-keyfile" xreflabel="krb_server_keyfile">
<term><varname>krb_server_keyfile</varname> (<type>string</type>)
<indexterm>
- <primary><varname>krb_server_keyfile</> configuration parameter</primary>
+ <primary><varname>krb_server_keyfile</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1230,7 +1230,7 @@ include_dir 'conf.d'
Sets the location of the Kerberos server key file. See
<xref linkend="gssapi-auth">
for details. This parameter can only be set in the
- <filename>postgresql.conf</> file or on the server command line.
+ <filename>postgresql.conf</filename> file or on the server command line.
</para>
</listitem>
</varlistentry>
@@ -1245,8 +1245,8 @@ include_dir 'conf.d'
<para>
Sets whether GSSAPI user names should be treated
case-insensitively.
- The default is <literal>off</> (case sensitive). This parameter can only be
- set in the <filename>postgresql.conf</> file or on the server command line.
+ The default is <literal>off</literal> (case sensitive). This parameter can only be
+ set in the <filename>postgresql.conf</filename> file or on the server command line.
</para>
</listitem>
</varlistentry>
@@ -1254,43 +1254,43 @@ include_dir 'conf.d'
<varlistentry id="guc-db-user-namespace" xreflabel="db_user_namespace">
<term><varname>db_user_namespace</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>db_user_namespace</> configuration parameter</primary>
+ <primary><varname>db_user_namespace</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
This parameter enables per-database user names. It is off by default.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
<para>
- If this is on, you should create users as <replaceable>username@dbname</>.
- When <replaceable>username</> is passed by a connecting client,
- <literal>@</> and the database name are appended to the user
+ If this is on, you should create users as <replaceable>username@dbname</replaceable>.
+ When <replaceable>username</replaceable> is passed by a connecting client,
+ <literal>@</literal> and the database name are appended to the user
name and that database-specific user name is looked up by the
server. Note that when you create users with names containing
- <literal>@</> within the SQL environment, you will need to
+ <literal>@</literal> within the SQL environment, you will need to
quote the user name.
</para>
<para>
With this parameter enabled, you can still create ordinary global
- users. Simply append <literal>@</> when specifying the user
- name in the client, e.g. <literal>joe@</>. The <literal>@</>
+ users. Simply append <literal>@</literal> when specifying the user
+ name in the client, e.g. <literal>joe@</literal>. The <literal>@</literal>
will be stripped off before the user name is looked up by the
server.
</para>
<para>
- <varname>db_user_namespace</> causes the client's and
+ <varname>db_user_namespace</varname> causes the client's and
server's user name representation to differ.
Authentication checks are always done with the server's user name
so authentication methods must be configured for the
server's user name, not the client's. Because
- <literal>md5</> uses the user name as salt on both the
- client and server, <literal>md5</> cannot be used with
- <varname>db_user_namespace</>.
+ <literal>md5</literal> uses the user name as salt on both the
+ client and server, <literal>md5</literal> cannot be used with
+ <varname>db_user_namespace</varname>.
</para>
<note>
@@ -1317,15 +1317,15 @@ include_dir 'conf.d'
<varlistentry id="guc-shared-buffers" xreflabel="shared_buffers">
<term><varname>shared_buffers</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>shared_buffers</> configuration parameter</primary>
+ <primary><varname>shared_buffers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the amount of memory the database server uses for shared
memory buffers. The default is typically 128 megabytes
- (<literal>128MB</>), but might be less if your kernel settings will
- not support it (as determined during <application>initdb</>).
+ (<literal>128MB</literal>), but might be less if your kernel settings will
+ not support it (as determined during <application>initdb</application>).
This setting must be at least 128 kilobytes. (Non-default
values of <symbol>BLCKSZ</symbol> change the minimum.) However,
settings significantly higher than the minimum are usually needed
@@ -1358,7 +1358,7 @@ include_dir 'conf.d'
<varlistentry id="guc-huge-pages" xreflabel="huge_pages">
<term><varname>huge_pages</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>huge_pages</> configuration parameter</primary>
+ <primary><varname>huge_pages</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1392,7 +1392,7 @@ include_dir 'conf.d'
<varlistentry id="guc-temp-buffers" xreflabel="temp_buffers">
<term><varname>temp_buffers</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>temp_buffers</> configuration parameter</primary>
+ <primary><varname>temp_buffers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1400,7 +1400,7 @@ include_dir 'conf.d'
Sets the maximum number of temporary buffers used by each database
session. These are session-local buffers used only for access to
temporary tables. The default is eight megabytes
- (<literal>8MB</>). The setting can be changed within individual
+ (<literal>8MB</literal>). The setting can be changed within individual
sessions, but only before the first use of temporary tables
within the session; subsequent attempts to change the value will
have no effect on that session.
@@ -1408,10 +1408,10 @@ include_dir 'conf.d'
<para>
A session will allocate temporary buffers as needed up to the limit
- given by <varname>temp_buffers</>. The cost of setting a large
+ given by <varname>temp_buffers</varname>. The cost of setting a large
value in sessions that do not actually need many temporary
buffers is only a buffer descriptor, or about 64 bytes, per
- increment in <varname>temp_buffers</>. However if a buffer is
+ increment in <varname>temp_buffers</varname>. However if a buffer is
actually used an additional 8192 bytes will be consumed for it
(or in general, <symbol>BLCKSZ</symbol> bytes).
</para>
@@ -1421,13 +1421,13 @@ include_dir 'conf.d'
<varlistentry id="guc-max-prepared-transactions" xreflabel="max_prepared_transactions">
<term><varname>max_prepared_transactions</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_prepared_transactions</> configuration parameter</primary>
+ <primary><varname>max_prepared_transactions</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the maximum number of transactions that can be in the
- <quote>prepared</> state simultaneously (see <xref
+ <quote>prepared</quote> state simultaneously (see <xref
linkend="sql-prepare-transaction">).
Setting this parameter to zero (which is the default)
disables the prepared-transaction feature.
@@ -1454,14 +1454,14 @@ include_dir 'conf.d'
<varlistentry id="guc-work-mem" xreflabel="work_mem">
<term><varname>work_mem</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>work_mem</> configuration parameter</primary>
+ <primary><varname>work_mem</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the amount of memory to be used by internal sort operations
and hash tables before writing to temporary disk files. The value
- defaults to four megabytes (<literal>4MB</>).
+ defaults to four megabytes (<literal>4MB</literal>).
Note that for a complex query, several sort or hash operations might be
running in parallel; each operation will be allowed to use as much memory
as this value specifies before it starts to write data into temporary
@@ -1469,10 +1469,10 @@ include_dir 'conf.d'
concurrently. Therefore, the total memory used could be many
times the value of <varname>work_mem</varname>; it is necessary to
keep this fact in mind when choosing the value. Sort operations are
- used for <literal>ORDER BY</>, <literal>DISTINCT</>, and
+ used for <literal>ORDER BY</literal>, <literal>DISTINCT</literal>, and
merge joins.
Hash tables are used in hash joins, hash-based aggregation, and
- hash-based processing of <literal>IN</> subqueries.
+ hash-based processing of <literal>IN</literal> subqueries.
</para>
</listitem>
</varlistentry>
@@ -1480,15 +1480,15 @@ include_dir 'conf.d'
<varlistentry id="guc-maintenance-work-mem" xreflabel="maintenance_work_mem">
<term><varname>maintenance_work_mem</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>maintenance_work_mem</> configuration parameter</primary>
+ <primary><varname>maintenance_work_mem</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the maximum amount of memory to be used by maintenance
operations, such as <command>VACUUM</command>, <command>CREATE
- INDEX</>, and <command>ALTER TABLE ADD FOREIGN KEY</>. It defaults
- to 64 megabytes (<literal>64MB</>). Since only one of these
+ INDEX</command>, and <command>ALTER TABLE ADD FOREIGN KEY</command>. It defaults
+ to 64 megabytes (<literal>64MB</literal>). Since only one of these
operations can be executed at a time by a database session, and
an installation normally doesn't have many of them running
concurrently, it's safe to set this value significantly larger
@@ -1508,7 +1508,7 @@ include_dir 'conf.d'
<varlistentry id="guc-autovacuum-work-mem" xreflabel="autovacuum_work_mem">
<term><varname>autovacuum_work_mem</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>autovacuum_work_mem</> configuration parameter</primary>
+ <primary><varname>autovacuum_work_mem</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1525,26 +1525,26 @@ include_dir 'conf.d'
<varlistentry id="guc-max-stack-depth" xreflabel="max_stack_depth">
<term><varname>max_stack_depth</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_stack_depth</> configuration parameter</primary>
+ <primary><varname>max_stack_depth</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the maximum safe depth of the server's execution stack.
The ideal setting for this parameter is the actual stack size limit
- enforced by the kernel (as set by <literal>ulimit -s</> or local
+ enforced by the kernel (as set by <literal>ulimit -s</literal> or local
equivalent), less a safety margin of a megabyte or so. The safety
margin is needed because the stack depth is not checked in every
routine in the server, but only in key potentially-recursive routines
such as expression evaluation. The default setting is two
- megabytes (<literal>2MB</>), which is conservatively small and
+ megabytes (<literal>2MB</literal>), which is conservatively small and
unlikely to risk crashes. However, it might be too small to allow
execution of complex functions. Only superusers can change this
setting.
</para>
<para>
- Setting <varname>max_stack_depth</> higher than
+ Setting <varname>max_stack_depth</varname> higher than
the actual kernel limit will mean that a runaway recursive function
can crash an individual backend process. On platforms where
<productname>PostgreSQL</productname> can determine the kernel limit,
@@ -1558,25 +1558,25 @@ include_dir 'conf.d'
<varlistentry id="guc-dynamic-shared-memory-type" xreflabel="dynamic_shared_memory_type">
<term><varname>dynamic_shared_memory_type</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>dynamic_shared_memory_type</> configuration parameter</primary>
+ <primary><varname>dynamic_shared_memory_type</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the dynamic shared memory implementation that the server
- should use. Possible values are <literal>posix</> (for POSIX shared
- memory allocated using <literal>shm_open</>), <literal>sysv</literal>
- (for System V shared memory allocated via <literal>shmget</>),
- <literal>windows</> (for Windows shared memory), <literal>mmap</>
+ should use. Possible values are <literal>posix</literal> (for POSIX shared
+ memory allocated using <literal>shm_open</literal>), <literal>sysv</literal>
+ (for System V shared memory allocated via <literal>shmget</literal>),
+ <literal>windows</literal> (for Windows shared memory), <literal>mmap</literal>
(to simulate shared memory using memory-mapped files stored in the
- data directory), and <literal>none</> (to disable this feature).
+ data directory), and <literal>none</literal> (to disable this feature).
Not all values are supported on all platforms; the first supported
option is the default for that platform. The use of the
- <literal>mmap</> option, which is not the default on any platform,
+ <literal>mmap</literal> option, which is not the default on any platform,
is generally discouraged because the operating system may write
modified pages back to disk repeatedly, increasing system I/O load;
however, it may be useful for debugging, when the
- <literal>pg_dynshmem</> directory is stored on a RAM disk, or when
+ <literal>pg_dynshmem</literal> directory is stored on a RAM disk, or when
other shared memory facilities are not available.
</para>
</listitem>
@@ -1592,7 +1592,7 @@ include_dir 'conf.d'
<varlistentry id="guc-temp-file-limit" xreflabel="temp_file_limit">
<term><varname>temp_file_limit</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>temp_file_limit</> configuration parameter</primary>
+ <primary><varname>temp_file_limit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1601,13 +1601,13 @@ include_dir 'conf.d'
for temporary files, such as sort and hash temporary files, or the
storage file for a held cursor. A transaction attempting to exceed
this limit will be canceled.
- The value is specified in kilobytes, and <literal>-1</> (the
+ The value is specified in kilobytes, and <literal>-1</literal> (the
default) means no limit.
Only superusers can change this setting.
</para>
<para>
This setting constrains the total space used at any instant by all
- temporary files used by a given <productname>PostgreSQL</> process.
+ temporary files used by a given <productname>PostgreSQL</productname> process.
It should be noted that disk space used for explicit temporary
tables, as opposed to temporary files used behind-the-scenes in query
execution, does <emphasis>not</emphasis> count against this limit.
@@ -1625,7 +1625,7 @@ include_dir 'conf.d'
<varlistentry id="guc-max-files-per-process" xreflabel="max_files_per_process">
<term><varname>max_files_per_process</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_files_per_process</> configuration parameter</primary>
+ <primary><varname>max_files_per_process</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1637,7 +1637,7 @@ include_dir 'conf.d'
allow individual processes to open many more files than the system
can actually support if many processes all try to open
that many files. If you find yourself seeing <quote>Too many open
- files</> failures, try reducing this setting.
+ files</quote> failures, try reducing this setting.
This parameter can only be set at server start.
</para>
</listitem>
@@ -1684,7 +1684,7 @@ include_dir 'conf.d'
<varlistentry id="guc-vacuum-cost-delay" xreflabel="vacuum_cost_delay">
<term><varname>vacuum_cost_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_cost_delay</> configuration parameter</primary>
+ <primary><varname>vacuum_cost_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1702,7 +1702,7 @@ include_dir 'conf.d'
<para>
When using cost-based vacuuming, appropriate values for
- <varname>vacuum_cost_delay</> are usually quite small, perhaps
+ <varname>vacuum_cost_delay</varname> are usually quite small, perhaps
10 or 20 milliseconds. Adjusting vacuum's resource consumption
is best done by changing the other vacuum cost parameters.
</para>
@@ -1712,7 +1712,7 @@ include_dir 'conf.d'
<varlistentry id="guc-vacuum-cost-page-hit" xreflabel="vacuum_cost_page_hit">
<term><varname>vacuum_cost_page_hit</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_cost_page_hit</> configuration parameter</primary>
+ <primary><varname>vacuum_cost_page_hit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1728,7 +1728,7 @@ include_dir 'conf.d'
<varlistentry id="guc-vacuum-cost-page-miss" xreflabel="vacuum_cost_page_miss">
<term><varname>vacuum_cost_page_miss</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_cost_page_miss</> configuration parameter</primary>
+ <primary><varname>vacuum_cost_page_miss</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1744,7 +1744,7 @@ include_dir 'conf.d'
<varlistentry id="guc-vacuum-cost-page-dirty" xreflabel="vacuum_cost_page_dirty">
<term><varname>vacuum_cost_page_dirty</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_cost_page_dirty</> configuration parameter</primary>
+ <primary><varname>vacuum_cost_page_dirty</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1760,7 +1760,7 @@ include_dir 'conf.d'
<varlistentry id="guc-vacuum-cost-limit" xreflabel="vacuum_cost_limit">
<term><varname>vacuum_cost_limit</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_cost_limit</> configuration parameter</primary>
+ <primary><varname>vacuum_cost_limit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1792,8 +1792,8 @@ include_dir 'conf.d'
<para>
There is a separate server
- process called the <firstterm>background writer</>, whose function
- is to issue writes of <quote>dirty</> (new or modified) shared
+ process called the <firstterm>background writer</firstterm>, whose function
+ is to issue writes of <quote>dirty</quote> (new or modified) shared
buffers. It writes shared buffers so server processes handling
user queries seldom or never need to wait for a write to occur.
However, the background writer does cause a net overall
@@ -1808,7 +1808,7 @@ include_dir 'conf.d'
<varlistentry id="guc-bgwriter-delay" xreflabel="bgwriter_delay">
<term><varname>bgwriter_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>bgwriter_delay</> configuration parameter</primary>
+ <primary><varname>bgwriter_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1816,16 +1816,16 @@ include_dir 'conf.d'
Specifies the delay between activity rounds for the
background writer. In each round the writer issues writes
for some number of dirty buffers (controllable by the
- following parameters). It then sleeps for <varname>bgwriter_delay</>
+ following parameters). It then sleeps for <varname>bgwriter_delay</varname>
milliseconds, and repeats. When there are no dirty buffers in the
buffer pool, though, it goes into a longer sleep regardless of
- <varname>bgwriter_delay</>. The default value is 200
- milliseconds (<literal>200ms</>). Note that on many systems, the
+ <varname>bgwriter_delay</varname>. The default value is 200
+ milliseconds (<literal>200ms</literal>). Note that on many systems, the
effective resolution of sleep delays is 10 milliseconds; setting
- <varname>bgwriter_delay</> to a value that is not a multiple of 10
+ <varname>bgwriter_delay</varname> to a value that is not a multiple of 10
might have the same results as setting it to the next higher multiple
of 10. This parameter can only be set in the
- <filename>postgresql.conf</> file or on the server command line.
+ <filename>postgresql.conf</filename> file or on the server command line.
</para>
</listitem>
</varlistentry>
@@ -1833,7 +1833,7 @@ include_dir 'conf.d'
<varlistentry id="guc-bgwriter-lru-maxpages" xreflabel="bgwriter_lru_maxpages">
<term><varname>bgwriter_lru_maxpages</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>bgwriter_lru_maxpages</> configuration parameter</primary>
+ <primary><varname>bgwriter_lru_maxpages</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1843,7 +1843,7 @@ include_dir 'conf.d'
background writing. (Note that checkpoints, which are managed by
a separate, dedicated auxiliary process, are unaffected.)
The default value is 100 buffers.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -1852,7 +1852,7 @@ include_dir 'conf.d'
<varlistentry id="guc-bgwriter-lru-multiplier" xreflabel="bgwriter_lru_multiplier">
<term><varname>bgwriter_lru_multiplier</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>bgwriter_lru_multiplier</> configuration parameter</primary>
+ <primary><varname>bgwriter_lru_multiplier</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1860,18 +1860,18 @@ include_dir 'conf.d'
The number of dirty buffers written in each round is based on the
number of new buffers that have been needed by server processes
during recent rounds. The average recent need is multiplied by
- <varname>bgwriter_lru_multiplier</> to arrive at an estimate of the
+ <varname>bgwriter_lru_multiplier</varname> to arrive at an estimate of the
number of buffers that will be needed during the next round. Dirty
buffers are written until there are that many clean, reusable buffers
- available. (However, no more than <varname>bgwriter_lru_maxpages</>
+ available. (However, no more than <varname>bgwriter_lru_maxpages</varname>
buffers will be written per round.)
- Thus, a setting of 1.0 represents a <quote>just in time</> policy
+ Thus, a setting of 1.0 represents a <quote>just in time</quote> policy
of writing exactly the number of buffers predicted to be needed.
Larger values provide some cushion against spikes in demand,
while smaller values intentionally leave writes to be done by
server processes.
The default is 2.0.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -1880,7 +1880,7 @@ include_dir 'conf.d'
<varlistentry id="guc-bgwriter-flush-after" xreflabel="bgwriter_flush_after">
<term><varname>bgwriter_flush_after</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>bgwriter_flush_after</> configuration parameter</primary>
+ <primary><varname>bgwriter_flush_after</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1897,10 +1897,10 @@ include_dir 'conf.d'
cache, where performance might degrade. This setting may have no
effect on some platforms. The valid range is between
<literal>0</literal>, which disables forced writeback, and
- <literal>2MB</literal>. The default is <literal>512kB</> on Linux,
- <literal>0</> elsewhere. (If <symbol>BLCKSZ</symbol> is not 8kB,
+ <literal>2MB</literal>. The default is <literal>512kB</literal> on Linux,
+ <literal>0</literal> elsewhere. (If <symbol>BLCKSZ</symbol> is not 8kB,
the default and maximum values scale proportionally to it.)
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -1923,15 +1923,15 @@ include_dir 'conf.d'
<varlistentry id="guc-effective-io-concurrency" xreflabel="effective_io_concurrency">
<term><varname>effective_io_concurrency</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>effective_io_concurrency</> configuration parameter</primary>
+ <primary><varname>effective_io_concurrency</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the number of concurrent disk I/O operations that
- <productname>PostgreSQL</> expects can be executed
+ <productname>PostgreSQL</productname> expects can be executed
simultaneously. Raising this value will increase the number of I/O
- operations that any individual <productname>PostgreSQL</> session
+ operations that any individual <productname>PostgreSQL</productname> session
attempts to initiate in parallel. The allowed range is 1 to 1000,
or zero to disable issuance of asynchronous I/O requests. Currently,
this setting only affects bitmap heap scans.
@@ -1951,7 +1951,7 @@ include_dir 'conf.d'
</para>
<para>
- Asynchronous I/O depends on an effective <function>posix_fadvise</>
+ Asynchronous I/O depends on an effective <function>posix_fadvise</function>
function, which some operating systems lack. If the function is not
present then setting this parameter to anything but zero will result
in an error. On some operating systems (e.g., Solaris), the function
@@ -1970,7 +1970,7 @@ include_dir 'conf.d'
<varlistentry id="guc-max-worker-processes" xreflabel="max_worker_processes">
<term><varname>max_worker_processes</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_worker_processes</> configuration parameter</primary>
+ <primary><varname>max_worker_processes</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -1997,7 +1997,7 @@ include_dir 'conf.d'
<varlistentry id="guc-max-parallel-workers-per-gather" xreflabel="max_parallel_workers_per_gather">
<term><varname>max_parallel_workers_per_gather</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_parallel_workers_per_gather</> configuration parameter</primary>
+ <primary><varname>max_parallel_workers_per_gather</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2021,7 +2021,7 @@ include_dir 'conf.d'
account when choosing a value for this setting, as well as when
configuring other settings that control resource utilization, such
as <xref linkend="guc-work-mem">. Resource limits such as
- <varname>work_mem</> are applied individually to each worker,
+ <varname>work_mem</varname> are applied individually to each worker,
which means the total utilization may be much higher across all
processes than it would normally be for any single process.
For example, a parallel query using 4 workers may use up to 5 times
@@ -2039,7 +2039,7 @@ include_dir 'conf.d'
<varlistentry id="guc-max-parallel-workers" xreflabel="max_parallel_workers">
<term><varname>max_parallel_workers</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_parallel_workers</> configuration parameter</primary>
+ <primary><varname>max_parallel_workers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2059,7 +2059,7 @@ include_dir 'conf.d'
<varlistentry id="guc-backend-flush-after" xreflabel="backend_flush_after">
<term><varname>backend_flush_after</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>backend_flush_after</> configuration parameter</primary>
+ <primary><varname>backend_flush_after</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2076,7 +2076,7 @@ include_dir 'conf.d'
than the OS's page cache, where performance might degrade. This
setting may have no effect on some platforms. The valid range is
between <literal>0</literal>, which disables forced writeback,
- and <literal>2MB</literal>. The default is <literal>0</>, i.e., no
+ and <literal>2MB</literal>. The default is <literal>0</literal>, i.e., no
forced writeback. (If <symbol>BLCKSZ</symbol> is not 8kB,
the maximum value scales proportionally to it.)
</para>
@@ -2086,13 +2086,13 @@ include_dir 'conf.d'
<varlistentry id="guc-old-snapshot-threshold" xreflabel="old_snapshot_threshold">
<term><varname>old_snapshot_threshold</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>old_snapshot_threshold</> configuration parameter</primary>
+ <primary><varname>old_snapshot_threshold</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the minimum time that a snapshot can be used without risk of a
- <literal>snapshot too old</> error occurring when using the snapshot.
+ <literal>snapshot too old</literal> error occurring when using the snapshot.
This parameter can only be set at server start.
</para>
@@ -2107,12 +2107,12 @@ include_dir 'conf.d'
</para>
<para>
- A value of <literal>-1</> disables this feature, and is the default.
+ A value of <literal>-1</literal> disables this feature, and is the default.
Useful values for production work probably range from a small number
of hours to a few days. The setting will be coerced to a granularity
- of minutes, and small numbers (such as <literal>0</> or
- <literal>1min</>) are only allowed because they may sometimes be
- useful for testing. While a setting as high as <literal>60d</> is
+ of minutes, and small numbers (such as <literal>0</literal> or
+ <literal>1min</literal>) are only allowed because they may sometimes be
+ useful for testing. While a setting as high as <literal>60d</literal> is
allowed, please note that in many workloads extreme bloat or
transaction ID wraparound may occur in much shorter time frames.
</para>
@@ -2120,10 +2120,10 @@ include_dir 'conf.d'
<para>
When this feature is enabled, freed space at the end of a relation
cannot be released to the operating system, since that could remove
- information needed to detect the <literal>snapshot too old</>
+ information needed to detect the <literal>snapshot too old</literal>
condition. All space allocated to a relation remains associated with
that relation for reuse only within that relation unless explicitly
- freed (for example, with <command>VACUUM FULL</>).
+ freed (for example, with <command>VACUUM FULL</command>).
</para>
<para>
@@ -2135,7 +2135,7 @@ include_dir 'conf.d'
Some tables cannot safely be vacuumed early, and so will not be
affected by this setting, such as system catalogs. For such tables
this setting will neither reduce bloat nor create a possibility
- of a <literal>snapshot too old</> error on scanning.
+ of a <literal>snapshot too old</literal> error on scanning.
</para>
</listitem>
</varlistentry>
@@ -2158,45 +2158,45 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-level" xreflabel="wal_level">
<term><varname>wal_level</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>wal_level</> configuration parameter</primary>
+ <primary><varname>wal_level</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- <varname>wal_level</> determines how much information is written to
- the WAL. The default value is <literal>replica</>, which writes enough
+ <varname>wal_level</varname> determines how much information is written to
+ the WAL. The default value is <literal>replica</literal>, which writes enough
data to support WAL archiving and replication, including running
- read-only queries on a standby server. <literal>minimal</> removes all
+ read-only queries on a standby server. <literal>minimal</literal> removes all
logging except the information required to recover from a crash or
immediate shutdown. Finally,
- <literal>logical</> adds information necessary to support logical
+ <literal>logical</literal> adds information necessary to support logical
decoding. Each level includes the information logged at all lower
levels. This parameter can only be set at server start.
</para>
<para>
- In <literal>minimal</> level, WAL-logging of some bulk
+ In <literal>minimal</literal> level, WAL-logging of some bulk
operations can be safely skipped, which can make those
operations much faster (see <xref linkend="populate-pitr">).
Operations in which this optimization can be applied include:
<simplelist>
- <member><command>CREATE TABLE AS</></member>
- <member><command>CREATE INDEX</></member>
- <member><command>CLUSTER</></member>
- <member><command>COPY</> into tables that were created or truncated in the same
+ <member><command>CREATE TABLE AS</command></member>
+ <member><command>CREATE INDEX</command></member>
+ <member><command>CLUSTER</command></member>
+ <member><command>COPY</command> into tables that were created or truncated in the same
transaction</member>
</simplelist>
But minimal WAL does not contain enough information to reconstruct the
- data from a base backup and the WAL logs, so <literal>replica</> or
+ data from a base backup and the WAL logs, so <literal>replica</literal> or
higher must be used to enable WAL archiving
(<xref linkend="guc-archive-mode">) and streaming replication.
</para>
<para>
- In <literal>logical</> level, the same information is logged as
- with <literal>replica</>, plus information needed to allow
+ In <literal>logical</literal> level, the same information is logged as
+ with <literal>replica</literal>, plus information needed to allow
extracting logical change sets from the WAL. Using a level of
- <literal>logical</> will increase the WAL volume, particularly if many
+ <literal>logical</literal> will increase the WAL volume, particularly if many
tables are configured for <literal>REPLICA IDENTITY FULL</literal> and
- many <command>UPDATE</> and <command>DELETE</> statements are
+ many <command>UPDATE</command> and <command>DELETE</command> statements are
executed.
</para>
<para>
@@ -2210,14 +2210,14 @@ include_dir 'conf.d'
<varlistentry id="guc-fsync" xreflabel="fsync">
<term><varname>fsync</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>fsync</> configuration parameter</primary>
+ <primary><varname>fsync</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- If this parameter is on, the <productname>PostgreSQL</> server
+ If this parameter is on, the <productname>PostgreSQL</productname> server
will try to make sure that updates are physically written to
- disk, by issuing <function>fsync()</> system calls or various
+ disk, by issuing <function>fsync()</function> system calls or various
equivalent methods (see <xref linkend="guc-wal-sync-method">).
This ensures that the database cluster can recover to a
consistent state after an operating system or hardware crash.
@@ -2249,7 +2249,7 @@ include_dir 'conf.d'
off to on, it is necessary to force all modified buffers in the
kernel to durable storage. This can be done while the cluster
is shutdown or while <varname>fsync</varname> is on by running <command>initdb
- --sync-only</command>, running <command>sync</>, unmounting the
+ --sync-only</command>, running <command>sync</command>, unmounting the
file system, or rebooting the server.
</para>
@@ -2261,7 +2261,7 @@ include_dir 'conf.d'
</para>
<para>
- <varname>fsync</varname> can only be set in the <filename>postgresql.conf</>
+ <varname>fsync</varname> can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
If you turn this parameter off, also consider turning off
<xref linkend="guc-full-page-writes">.
@@ -2272,26 +2272,26 @@ include_dir 'conf.d'
<varlistentry id="guc-synchronous-commit" xreflabel="synchronous_commit">
<term><varname>synchronous_commit</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>synchronous_commit</> configuration parameter</primary>
+ <primary><varname>synchronous_commit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies whether transaction commit will wait for WAL records
- to be written to disk before the command returns a <quote>success</>
- indication to the client. Valid values are <literal>on</>,
- <literal>remote_apply</>, <literal>remote_write</>, <literal>local</>,
- and <literal>off</>. The default, and safe, setting
- is <literal>on</>. When <literal>off</>, there can be a delay between
+ to be written to disk before the command returns a <quote>success</quote>
+ indication to the client. Valid values are <literal>on</literal>,
+ <literal>remote_apply</literal>, <literal>remote_write</literal>, <literal>local</literal>,
+ and <literal>off</literal>. The default, and safe, setting
+ is <literal>on</literal>. When <literal>off</literal>, there can be a delay between
when success is reported to the client and when the transaction is
really guaranteed to be safe against a server crash. (The maximum
delay is three times <xref linkend="guc-wal-writer-delay">.) Unlike
- <xref linkend="guc-fsync">, setting this parameter to <literal>off</>
+ <xref linkend="guc-fsync">, setting this parameter to <literal>off</literal>
does not create any risk of database inconsistency: an operating
system or database crash might
result in some recent allegedly-committed transactions being lost, but
the database state will be just the same as if those transactions had
- been aborted cleanly. So, turning <varname>synchronous_commit</> off
+ been aborted cleanly. So, turning <varname>synchronous_commit</varname> off
can be a useful alternative when performance is more important than
exact certainty about the durability of a transaction. For more
discussion see <xref linkend="wal-async-commit">.
@@ -2300,32 +2300,32 @@ include_dir 'conf.d'
If <xref linkend="guc-synchronous-standby-names"> is non-empty, this
parameter also controls whether or not transaction commits will wait
for their WAL records to be replicated to the standby server(s).
- When set to <literal>on</>, commits will wait until replies
+ When set to <literal>on</literal>, commits will wait until replies
from the current synchronous standby(s) indicate they have received
the commit record of the transaction and flushed it to disk. This
ensures the transaction will not be lost unless both the primary and
all synchronous standbys suffer corruption of their database storage.
- When set to <literal>remote_apply</>, commits will wait until replies
+ When set to <literal>remote_apply</literal>, commits will wait until replies
from the current synchronous standby(s) indicate they have received the
commit record of the transaction and applied it, so that it has become
visible to queries on the standby(s).
- When set to <literal>remote_write</>, commits will wait until replies
+ When set to <literal>remote_write</literal>, commits will wait until replies
from the current synchronous standby(s) indicate they have
received the commit record of the transaction and written it out to
their operating system. This setting is sufficient to
ensure data preservation even if a standby instance of
- <productname>PostgreSQL</> were to crash, but not if the standby
+ <productname>PostgreSQL</productname> were to crash, but not if the standby
suffers an operating-system-level crash, since the data has not
necessarily reached stable storage on the standby.
- Finally, the setting <literal>local</> causes commits to wait for
+ Finally, the setting <literal>local</literal> causes commits to wait for
local flush to disk, but not for replication. This is not usually
desirable when synchronous replication is in use, but is provided for
completeness.
</para>
<para>
- If <varname>synchronous_standby_names</> is empty, the settings
- <literal>on</>, <literal>remote_apply</>, <literal>remote_write</>
- and <literal>local</> all provide the same synchronization level:
+ If <varname>synchronous_standby_names</varname> is empty, the settings
+ <literal>on</literal>, <literal>remote_apply</literal>, <literal>remote_write</literal>
+ and <literal>local</literal> all provide the same synchronization level:
transaction commits only wait for local flush to disk.
</para>
<para>
@@ -2335,7 +2335,7 @@ include_dir 'conf.d'
transactions commit synchronously and others asynchronously.
For example, to make a single multistatement transaction commit
asynchronously when the default is the opposite, issue <command>SET
- LOCAL synchronous_commit TO OFF</> within the transaction.
+ LOCAL synchronous_commit TO OFF</command> within the transaction.
</para>
</listitem>
</varlistentry>
@@ -2343,7 +2343,7 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-sync-method" xreflabel="wal_sync_method">
<term><varname>wal_sync_method</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>wal_sync_method</> configuration parameter</primary>
+ <primary><varname>wal_sync_method</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2356,41 +2356,41 @@ include_dir 'conf.d'
<itemizedlist>
<listitem>
<para>
- <literal>open_datasync</> (write WAL files with <function>open()</> option <symbol>O_DSYNC</>)
+ <literal>open_datasync</literal> (write WAL files with <function>open()</function> option <symbol>O_DSYNC</symbol>)
</para>
</listitem>
<listitem>
<para>
- <literal>fdatasync</> (call <function>fdatasync()</> at each commit)
+ <literal>fdatasync</literal> (call <function>fdatasync()</function> at each commit)
</para>
</listitem>
<listitem>
<para>
- <literal>fsync</> (call <function>fsync()</> at each commit)
+ <literal>fsync</literal> (call <function>fsync()</function> at each commit)
</para>
</listitem>
<listitem>
<para>
- <literal>fsync_writethrough</> (call <function>fsync()</> at each commit, forcing write-through of any disk write cache)
+ <literal>fsync_writethrough</literal> (call <function>fsync()</function> at each commit, forcing write-through of any disk write cache)
</para>
</listitem>
<listitem>
<para>
- <literal>open_sync</> (write WAL files with <function>open()</> option <symbol>O_SYNC</>)
+ <literal>open_sync</literal> (write WAL files with <function>open()</function> option <symbol>O_SYNC</symbol>)
</para>
</listitem>
</itemizedlist>
<para>
- The <literal>open_</>* options also use <literal>O_DIRECT</> if available.
+ The <literal>open_</literal>* options also use <literal>O_DIRECT</literal> if available.
Not all of these choices are available on all platforms.
The default is the first method in the above list that is supported
- by the platform, except that <literal>fdatasync</> is the default on
+ by the platform, except that <literal>fdatasync</literal> is the default on
Linux. The default is not necessarily ideal; it might be
necessary to change this setting or other aspects of your system
configuration in order to create a crash-safe configuration or
achieve optimal performance.
These aspects are discussed in <xref linkend="wal-reliability">.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -2399,12 +2399,12 @@ include_dir 'conf.d'
<varlistentry id="guc-full-page-writes" xreflabel="full_page_writes">
<term><varname>full_page_writes</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>full_page_writes</> configuration parameter</primary>
+ <primary><varname>full_page_writes</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When this parameter is on, the <productname>PostgreSQL</> server
+ When this parameter is on, the <productname>PostgreSQL</productname> server
writes the entire content of each disk page to WAL during the
first modification of that page after a checkpoint.
This is needed because
@@ -2436,9 +2436,9 @@ include_dir 'conf.d'
</para>
<para>
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
- The default is <literal>on</>.
+ The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -2446,12 +2446,12 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-log-hints" xreflabel="wal_log_hints">
<term><varname>wal_log_hints</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>wal_log_hints</> configuration parameter</primary>
+ <primary><varname>wal_log_hints</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When this parameter is <literal>on</>, the <productname>PostgreSQL</>
+ When this parameter is <literal>on</literal>, the <productname>PostgreSQL</productname>
server writes the entire content of each disk page to WAL during the
first modification of that page after a checkpoint, even for
non-critical modifications of so-called hint bits.
@@ -2465,7 +2465,7 @@ include_dir 'conf.d'
</para>
<para>
- This parameter can only be set at server start. The default value is <literal>off</>.
+ This parameter can only be set at server start. The default value is <literal>off</literal>.
</para>
</listitem>
</varlistentry>
@@ -2473,16 +2473,16 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-compression" xreflabel="wal_compression">
<term><varname>wal_compression</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>wal_compression</> configuration parameter</primary>
+ <primary><varname>wal_compression</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When this parameter is <literal>on</>, the <productname>PostgreSQL</>
+ When this parameter is <literal>on</literal>, the <productname>PostgreSQL</productname>
server compresses a full page image written to WAL when
<xref linkend="guc-full-page-writes"> is on or during a base backup.
A compressed page image will be decompressed during WAL replay.
- The default value is <literal>off</>.
+ The default value is <literal>off</literal>.
Only superusers can change this setting.
</para>
@@ -2498,7 +2498,7 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-buffers" xreflabel="wal_buffers">
<term><varname>wal_buffers</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_buffers</> configuration parameter</primary>
+ <primary><varname>wal_buffers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2530,24 +2530,24 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-writer-delay" xreflabel="wal_writer_delay">
<term><varname>wal_writer_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_writer_delay</> configuration parameter</primary>
+ <primary><varname>wal_writer_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies how often the WAL writer flushes WAL. After flushing WAL it
- sleeps for <varname>wal_writer_delay</> milliseconds, unless woken up
+ sleeps for <varname>wal_writer_delay</varname> milliseconds, unless woken up
by an asynchronously committing transaction. If the last flush
- happened less than <varname>wal_writer_delay</> milliseconds ago and
- less than <varname>wal_writer_flush_after</> bytes of WAL have been
+ happened less than <varname>wal_writer_delay</varname> milliseconds ago and
+ less than <varname>wal_writer_flush_after</varname> bytes of WAL have been
produced since, then WAL is only written to the operating system, not
flushed to disk.
- The default value is 200 milliseconds (<literal>200ms</>). Note that
+ The default value is 200 milliseconds (<literal>200ms</literal>). Note that
on many systems, the effective resolution of sleep delays is 10
- milliseconds; setting <varname>wal_writer_delay</> to a value that is
+ milliseconds; setting <varname>wal_writer_delay</varname> to a value that is
not a multiple of 10 might have the same results as setting it to the
next higher multiple of 10. This parameter can only be set in the
- <filename>postgresql.conf</> file or on the server command line.
+ <filename>postgresql.conf</filename> file or on the server command line.
</para>
</listitem>
</varlistentry>
@@ -2555,19 +2555,19 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-writer-flush-after" xreflabel="wal_writer_flush_after">
<term><varname>wal_writer_flush_after</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_writer_flush_after</> configuration parameter</primary>
+ <primary><varname>wal_writer_flush_after</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies how often the WAL writer flushes WAL. If the last flush
- happened less than <varname>wal_writer_delay</> milliseconds ago and
- less than <varname>wal_writer_flush_after</> bytes of WAL have been
+ happened less than <varname>wal_writer_delay</varname> milliseconds ago and
+ less than <varname>wal_writer_flush_after</varname> bytes of WAL have been
produced since, then WAL is only written to the operating system, not
- flushed to disk. If <varname>wal_writer_flush_after</> is set
- to <literal>0</> then WAL data is flushed immediately. The default is
+ flushed to disk. If <varname>wal_writer_flush_after</varname> is set
+ to <literal>0</literal> then WAL data is flushed immediately. The default is
<literal>1MB</literal>. This parameter can only be set in the
- <filename>postgresql.conf</> file or on the server command line.
+ <filename>postgresql.conf</filename> file or on the server command line.
</para>
</listitem>
</varlistentry>
@@ -2575,7 +2575,7 @@ include_dir 'conf.d'
<varlistentry id="guc-commit-delay" xreflabel="commit_delay">
<term><varname>commit_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>commit_delay</> configuration parameter</primary>
+ <primary><varname>commit_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2592,15 +2592,15 @@ include_dir 'conf.d'
<varname>commit_siblings</varname> other transactions are active
when a flush is about to be initiated. Also, no delays are
performed if <varname>fsync</varname> is disabled.
- The default <varname>commit_delay</> is zero (no delay).
+ The default <varname>commit_delay</varname> is zero (no delay).
Only superusers can change this setting.
</para>
<para>
- In <productname>PostgreSQL</> releases prior to 9.3,
+ In <productname>PostgreSQL</productname> releases prior to 9.3,
<varname>commit_delay</varname> behaved differently and was much
less effective: it affected only commits, rather than all WAL flushes,
and waited for the entire configured delay even if the WAL flush
- was completed sooner. Beginning in <productname>PostgreSQL</> 9.3,
+ was completed sooner. Beginning in <productname>PostgreSQL</productname> 9.3,
the first process that becomes ready to flush waits for the configured
interval, while subsequent processes wait only until the leader
completes the flush operation.
@@ -2611,13 +2611,13 @@ include_dir 'conf.d'
<varlistentry id="guc-commit-siblings" xreflabel="commit_siblings">
<term><varname>commit_siblings</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>commit_siblings</> configuration parameter</primary>
+ <primary><varname>commit_siblings</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Minimum number of concurrent open transactions to require
- before performing the <varname>commit_delay</> delay. A larger
+ before performing the <varname>commit_delay</varname> delay. A larger
value makes it more probable that at least one other
transaction will become ready to commit during the delay
interval. The default is five transactions.
@@ -2634,17 +2634,17 @@ include_dir 'conf.d'
<varlistentry id="guc-checkpoint-timeout" xreflabel="checkpoint_timeout">
<term><varname>checkpoint_timeout</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>checkpoint_timeout</> configuration parameter</primary>
+ <primary><varname>checkpoint_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Maximum time between automatic WAL checkpoints, in seconds.
The valid range is between 30 seconds and one day.
- The default is five minutes (<literal>5min</>).
+ The default is five minutes (<literal>5min</literal>).
Increasing this parameter can increase the amount of time needed
for crash recovery.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -2653,14 +2653,14 @@ include_dir 'conf.d'
<varlistentry id="guc-checkpoint-completion-target" xreflabel="checkpoint_completion_target">
<term><varname>checkpoint_completion_target</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>checkpoint_completion_target</> configuration parameter</primary>
+ <primary><varname>checkpoint_completion_target</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the target of checkpoint completion, as a fraction of
total time between checkpoints. The default is 0.5.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -2669,7 +2669,7 @@ include_dir 'conf.d'
<varlistentry id="guc-checkpoint-flush-after" xreflabel="checkpoint_flush_after">
<term><varname>checkpoint_flush_after</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>checkpoint_flush_after</> configuration parameter</primary>
+ <primary><varname>checkpoint_flush_after</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2686,10 +2686,10 @@ include_dir 'conf.d'
than the OS's page cache, where performance might degrade. This
setting may have no effect on some platforms. The valid range is
between <literal>0</literal>, which disables forced writeback,
- and <literal>2MB</literal>. The default is <literal>256kB</> on
- Linux, <literal>0</> elsewhere. (If <symbol>BLCKSZ</symbol> is not
+ and <literal>2MB</literal>. The default is <literal>256kB</literal> on
+ Linux, <literal>0</literal> elsewhere. (If <symbol>BLCKSZ</symbol> is not
8kB, the default and maximum values scale proportionally to it.)
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -2698,7 +2698,7 @@ include_dir 'conf.d'
<varlistentry id="guc-checkpoint-warning" xreflabel="checkpoint_warning">
<term><varname>checkpoint_warning</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>checkpoint_warning</> configuration parameter</primary>
+ <primary><varname>checkpoint_warning</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2706,11 +2706,11 @@ include_dir 'conf.d'
Write a message to the server log if checkpoints caused by
the filling of checkpoint segment files happen closer together
than this many seconds (which suggests that
- <varname>max_wal_size</> ought to be raised). The default is
- 30 seconds (<literal>30s</>). Zero disables the warning.
+ <varname>max_wal_size</varname> ought to be raised). The default is
+ 30 seconds (<literal>30s</literal>). Zero disables the warning.
No warnings will be generated if <varname>checkpoint_timeout</varname>
is less than <varname>checkpoint_warning</varname>.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -2719,19 +2719,19 @@ include_dir 'conf.d'
<varlistentry id="guc-max-wal-size" xreflabel="max_wal_size">
<term><varname>max_wal_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_wal_size</> configuration parameter</primary>
+ <primary><varname>max_wal_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Maximum size to let the WAL grow to between automatic WAL
checkpoints. This is a soft limit; WAL size can exceed
- <varname>max_wal_size</> under special circumstances, like
- under heavy load, a failing <varname>archive_command</>, or a high
- <varname>wal_keep_segments</> setting. The default is 1 GB.
+ <varname>max_wal_size</varname> under special circumstances, like
+ under heavy load, a failing <varname>archive_command</varname>, or a high
+ <varname>wal_keep_segments</varname> setting. The default is 1 GB.
Increasing this parameter can increase the amount of time needed for
crash recovery.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -2740,7 +2740,7 @@ include_dir 'conf.d'
<varlistentry id="guc-min-wal-size" xreflabel="min_wal_size">
<term><varname>min_wal_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>min_wal_size</> configuration parameter</primary>
+ <primary><varname>min_wal_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2750,7 +2750,7 @@ include_dir 'conf.d'
This can be used to ensure that enough WAL space is reserved to
handle spikes in WAL usage, for example when running large batch
jobs. The default is 80 MB.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -2765,29 +2765,29 @@ include_dir 'conf.d'
<varlistentry id="guc-archive-mode" xreflabel="archive_mode">
<term><varname>archive_mode</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>archive_mode</> configuration parameter</primary>
+ <primary><varname>archive_mode</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When <varname>archive_mode</> is enabled, completed WAL segments
+ When <varname>archive_mode</varname> is enabled, completed WAL segments
are sent to archive storage by setting
- <xref linkend="guc-archive-command">. In addition to <literal>off</>,
- to disable, there are two modes: <literal>on</>, and
- <literal>always</>. During normal operation, there is no
- difference between the two modes, but when set to <literal>always</>
+ <xref linkend="guc-archive-command">. In addition to <literal>off</literal>,
+ to disable, there are two modes: <literal>on</literal>, and
+ <literal>always</literal>. During normal operation, there is no
+ difference between the two modes, but when set to <literal>always</literal>
the WAL archiver is enabled also during archive recovery or standby
- mode. In <literal>always</> mode, all files restored from the archive
+ mode. In <literal>always</literal> mode, all files restored from the archive
or streamed with streaming replication will be archived (again). See
<xref linkend="continuous-archiving-in-standby"> for details.
</para>
<para>
- <varname>archive_mode</> and <varname>archive_command</> are
- separate variables so that <varname>archive_command</> can be
+ <varname>archive_mode</varname> and <varname>archive_command</varname> are
+ separate variables so that <varname>archive_command</varname> can be
changed without leaving archiving mode.
This parameter can only be set at server start.
- <varname>archive_mode</> cannot be enabled when
- <varname>wal_level</> is set to <literal>minimal</>.
+ <varname>archive_mode</varname> cannot be enabled when
+ <varname>wal_level</varname> is set to <literal>minimal</literal>.
</para>
</listitem>
</varlistentry>
@@ -2795,32 +2795,32 @@ include_dir 'conf.d'
<varlistentry id="guc-archive-command" xreflabel="archive_command">
<term><varname>archive_command</varname> (<type>string</type>)
<indexterm>
- <primary><varname>archive_command</> configuration parameter</primary>
+ <primary><varname>archive_command</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
The local shell command to execute to archive a completed WAL file
- segment. Any <literal>%p</> in the string is
+ segment. Any <literal>%p</literal> in the string is
replaced by the path name of the file to archive, and any
- <literal>%f</> is replaced by only the file name.
+ <literal>%f</literal> is replaced by only the file name.
(The path name is relative to the working directory of the server,
i.e., the cluster's data directory.)
- Use <literal>%%</> to embed an actual <literal>%</> character in the
+ Use <literal>%%</literal> to embed an actual <literal>%</literal> character in the
command. It is important for the command to return a zero
exit status only if it succeeds. For more information see
<xref linkend="backup-archiving-wal">.
</para>
<para>
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line. It is ignored unless
- <varname>archive_mode</> was enabled at server start.
- If <varname>archive_command</> is an empty string (the default) while
- <varname>archive_mode</> is enabled, WAL archiving is temporarily
+ <varname>archive_mode</varname> was enabled at server start.
+ If <varname>archive_command</varname> is an empty string (the default) while
+ <varname>archive_mode</varname> is enabled, WAL archiving is temporarily
disabled, but the server continues to accumulate WAL segment files in
the expectation that a command will soon be provided. Setting
- <varname>archive_command</> to a command that does nothing but
- return true, e.g. <literal>/bin/true</> (<literal>REM</> on
+ <varname>archive_command</varname> to a command that does nothing but
+ return true, e.g. <literal>/bin/true</literal> (<literal>REM</literal> on
Windows), effectively disables
archiving, but also breaks the chain of WAL files needed for
archive recovery, so it should only be used in unusual circumstances.
@@ -2831,7 +2831,7 @@ include_dir 'conf.d'
<varlistentry id="guc-archive-timeout" xreflabel="archive_timeout">
<term><varname>archive_timeout</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>archive_timeout</> configuration parameter</primary>
+ <primary><varname>archive_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2841,7 +2841,7 @@ include_dir 'conf.d'
traffic (or has slack periods where it does so), there could be a
long delay between the completion of a transaction and its safe
recording in archive storage. To limit how old unarchived
- data can be, you can set <varname>archive_timeout</> to force the
+ data can be, you can set <varname>archive_timeout</varname> to force the
server to switch to a new WAL segment file periodically. When this
parameter is greater than zero, the server will switch to a new
segment file whenever this many seconds have elapsed since the last
@@ -2850,13 +2850,13 @@ include_dir 'conf.d'
no database activity). Note that archived files that are closed
early due to a forced switch are still the same length as completely
full files. Therefore, it is unwise to use a very short
- <varname>archive_timeout</> &mdash; it will bloat your archive
- storage. <varname>archive_timeout</> settings of a minute or so are
+ <varname>archive_timeout</varname> &mdash; it will bloat your archive
+ storage. <varname>archive_timeout</varname> settings of a minute or so are
usually reasonable. You should consider using streaming replication,
instead of archiving, if you want data to be copied off the master
server more quickly than that.
This parameter can only be set in the
- <filename>postgresql.conf</> file or on the server command line.
+ <filename>postgresql.conf</filename> file or on the server command line.
</para>
</listitem>
</varlistentry>
@@ -2871,7 +2871,7 @@ include_dir 'conf.d'
<para>
These settings control the behavior of the built-in
- <firstterm>streaming replication</> feature (see
+ <firstterm>streaming replication</firstterm> feature (see
<xref linkend="streaming-replication">). Servers will be either a
Master or a Standby server. Masters can send data, while Standby(s)
are always receivers of replicated data. When cascading replication
@@ -2898,7 +2898,7 @@ include_dir 'conf.d'
<varlistentry id="guc-max-wal-senders" xreflabel="max_wal_senders">
<term><varname>max_wal_senders</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_wal_senders</> configuration parameter</primary>
+ <primary><varname>max_wal_senders</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2914,8 +2914,8 @@ include_dir 'conf.d'
a timeout is reached, so this parameter should be set slightly
higher than the maximum number of expected clients so disconnected
clients can immediately reconnect. This parameter can only
- be set at server start. <varname>wal_level</> must be set to
- <literal>replica</> or higher to allow connections from standby
+ be set at server start. <varname>wal_level</varname> must be set to
+ <literal>replica</literal> or higher to allow connections from standby
servers.
</para>
</listitem>
@@ -2924,7 +2924,7 @@ include_dir 'conf.d'
<varlistentry id="guc-max-replication-slots" xreflabel="max_replication_slots">
<term><varname>max_replication_slots</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_replication_slots</> configuration parameter</primary>
+ <primary><varname>max_replication_slots</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2944,17 +2944,17 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-keep-segments" xreflabel="wal_keep_segments">
<term><varname>wal_keep_segments</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_keep_segments</> configuration parameter</primary>
+ <primary><varname>wal_keep_segments</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the minimum number of past log file segments kept in the
- <filename>pg_wal</>
+ <filename>pg_wal</filename>
directory, in case a standby server needs to fetch them for streaming
replication. Each segment is normally 16 megabytes. If a standby
server connected to the sending server falls behind by more than
- <varname>wal_keep_segments</> segments, the sending server might remove
+ <varname>wal_keep_segments</varname> segments, the sending server might remove
a WAL segment still needed by the standby, in which case the
replication connection will be terminated. Downstream connections
will also eventually fail as a result. (However, the standby
@@ -2964,15 +2964,15 @@ include_dir 'conf.d'
<para>
This sets only the minimum number of segments retained in
- <filename>pg_wal</>; the system might need to retain more segments
+ <filename>pg_wal</filename>; the system might need to retain more segments
for WAL archival or to recover from a checkpoint. If
- <varname>wal_keep_segments</> is zero (the default), the system
+ <varname>wal_keep_segments</varname> is zero (the default), the system
doesn't keep any extra segments for standby purposes, so the number
of old WAL segments available to standby servers is a function of
the location of the previous checkpoint and status of WAL
archiving.
This parameter can only be set in the
- <filename>postgresql.conf</> file or on the server command line.
+ <filename>postgresql.conf</filename> file or on the server command line.
</para>
</listitem>
</varlistentry>
@@ -2980,7 +2980,7 @@ include_dir 'conf.d'
<varlistentry id="guc-wal-sender-timeout" xreflabel="wal_sender_timeout">
<term><varname>wal_sender_timeout</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_sender_timeout</> configuration parameter</primary>
+ <primary><varname>wal_sender_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -2990,7 +2990,7 @@ include_dir 'conf.d'
the sending server to detect a standby crash or network outage.
A value of zero disables the timeout mechanism. This parameter
can only be set in
- the <filename>postgresql.conf</> file or on the server command line.
+ the <filename>postgresql.conf</filename> file or on the server command line.
The default value is 60 seconds.
</para>
</listitem>
@@ -2999,13 +2999,13 @@ include_dir 'conf.d'
<varlistentry id="guc-track-commit-timestamp" xreflabel="track_commit_timestamp">
<term><varname>track_commit_timestamp</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>track_commit_timestamp</> configuration parameter</primary>
+ <primary><varname>track_commit_timestamp</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Record commit time of transactions. This parameter
- can only be set in <filename>postgresql.conf</> file or on the server
+ can only be set in <filename>postgresql.conf</filename> file or on the server
command line. The default value is <literal>off</literal>.
</para>
</listitem>
@@ -3034,13 +3034,13 @@ include_dir 'conf.d'
<varlistentry id="guc-synchronous-standby-names" xreflabel="synchronous_standby_names">
<term><varname>synchronous_standby_names</varname> (<type>string</type>)
<indexterm>
- <primary><varname>synchronous_standby_names</> configuration parameter</primary>
+ <primary><varname>synchronous_standby_names</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies a list of standby servers that can support
- <firstterm>synchronous replication</>, as described in
+ <firstterm>synchronous replication</firstterm>, as described in
<xref linkend="synchronous-replication">.
There will be one or more active synchronous standbys;
transactions waiting for commit will be allowed to proceed after
@@ -3050,15 +3050,15 @@ include_dir 'conf.d'
that are both currently connected and streaming data in real-time
(as shown by a state of <literal>streaming</literal> in the
<link linkend="monitoring-stats-views-table">
- <literal>pg_stat_replication</></link> view).
+ <literal>pg_stat_replication</literal></link> view).
Specifying more than one synchronous standby can allow for very high
availability and protection against data loss.
</para>
<para>
The name of a standby server for this purpose is the
- <varname>application_name</> setting of the standby, as set in the
+ <varname>application_name</varname> setting of the standby, as set in the
standby's connection information. In case of a physical replication
- standby, this should be set in the <varname>primary_conninfo</>
+ standby, this should be set in the <varname>primary_conninfo</varname>
setting in <filename>recovery.conf</filename>; the default
is <literal>walreceiver</literal>. For logical replication, this can
be set in the connection information of the subscription, and it
@@ -3078,54 +3078,54 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
wait for replies from,
and <replaceable class="parameter">standby_name</replaceable>
is the name of a standby server.
- <literal>FIRST</> and <literal>ANY</> specify the method to choose
+ <literal>FIRST</literal> and <literal>ANY</literal> specify the method to choose
synchronous standbys from the listed servers.
</para>
<para>
- The keyword <literal>FIRST</>, coupled with
+ The keyword <literal>FIRST</literal>, coupled with
<replaceable class="parameter">num_sync</replaceable>, specifies a
priority-based synchronous replication and makes transaction commits
wait until their WAL records are replicated to
<replaceable class="parameter">num_sync</replaceable> synchronous
standbys chosen based on their priorities. For example, a setting of
- <literal>FIRST 3 (s1, s2, s3, s4)</> will cause each commit to wait for
+ <literal>FIRST 3 (s1, s2, s3, s4)</literal> will cause each commit to wait for
replies from three higher-priority standbys chosen from standby servers
- <literal>s1</>, <literal>s2</>, <literal>s3</> and <literal>s4</>.
+ <literal>s1</literal>, <literal>s2</literal>, <literal>s3</literal> and <literal>s4</literal>.
The standbys whose names appear earlier in the list are given higher
priority and will be considered as synchronous. Other standby servers
appearing later in this list represent potential synchronous standbys.
If any of the current synchronous standbys disconnects for whatever
reason, it will be replaced immediately with the next-highest-priority
- standby. The keyword <literal>FIRST</> is optional.
+ standby. The keyword <literal>FIRST</literal> is optional.
</para>
<para>
- The keyword <literal>ANY</>, coupled with
+ The keyword <literal>ANY</literal>, coupled with
<replaceable class="parameter">num_sync</replaceable>, specifies a
quorum-based synchronous replication and makes transaction commits
- wait until their WAL records are replicated to <emphasis>at least</>
+ wait until their WAL records are replicated to <emphasis>at least</emphasis>
<replaceable class="parameter">num_sync</replaceable> listed standbys.
- For example, a setting of <literal>ANY 3 (s1, s2, s3, s4)</> will cause
+ For example, a setting of <literal>ANY 3 (s1, s2, s3, s4)</literal> will cause
each commit to proceed as soon as at least any three standbys of
- <literal>s1</>, <literal>s2</>, <literal>s3</> and <literal>s4</>
+ <literal>s1</literal>, <literal>s2</literal>, <literal>s3</literal> and <literal>s4</literal>
reply.
</para>
<para>
- <literal>FIRST</> and <literal>ANY</> are case-insensitive. If these
+ <literal>FIRST</literal> and <literal>ANY</literal> are case-insensitive. If these
keywords are used as the name of a standby server,
its <replaceable class="parameter">standby_name</replaceable> must
be double-quoted.
</para>
<para>
- The third syntax was used before <productname>PostgreSQL</>
+ The third syntax was used before <productname>PostgreSQL</productname>
version 9.6 and is still supported. It's the same as the first syntax
- with <literal>FIRST</> and
+ with <literal>FIRST</literal> and
<replaceable class="parameter">num_sync</replaceable> equal to 1.
- For example, <literal>FIRST 1 (s1, s2)</> and <literal>s1, s2</> have
- the same meaning: either <literal>s1</> or <literal>s2</> is chosen
+ For example, <literal>FIRST 1 (s1, s2)</literal> and <literal>s1, s2</literal> have
+ the same meaning: either <literal>s1</literal> or <literal>s2</literal> is chosen
as a synchronous standby.
</para>
<para>
- The special entry <literal>*</> matches any standby name.
+ The special entry <literal>*</literal> matches any standby name.
</para>
<para>
There is no mechanism to enforce uniqueness of standby names. In case
@@ -3136,7 +3136,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<para>
Each <replaceable class="parameter">standby_name</replaceable>
should have the form of a valid SQL identifier, unless it
- is <literal>*</>. You can use double-quoting if necessary. But note
+ is <literal>*</literal>. You can use double-quoting if necessary. But note
that <replaceable class="parameter">standby_name</replaceable>s are
compared to standby application names case-insensitively, whether
double-quoted or not.
@@ -3149,10 +3149,10 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
synchronous replication is enabled, individual transactions can be
configured not to wait for replication by setting the
<xref linkend="guc-synchronous-commit"> parameter to
- <literal>local</> or <literal>off</>.
+ <literal>local</literal> or <literal>off</literal>.
</para>
<para>
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -3161,13 +3161,13 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-vacuum-defer-cleanup-age" xreflabel="vacuum_defer_cleanup_age">
<term><varname>vacuum_defer_cleanup_age</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_defer_cleanup_age</> configuration parameter</primary>
+ <primary><varname>vacuum_defer_cleanup_age</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Specifies the number of transactions by which <command>VACUUM</> and
- <acronym>HOT</> updates will defer cleanup of dead row versions. The
+ Specifies the number of transactions by which <command>VACUUM</command> and
+ <acronym>HOT</acronym> updates will defer cleanup of dead row versions. The
default is zero transactions, meaning that dead row versions can be
removed as soon as possible, that is, as soon as they are no longer
visible to any open transaction. You may wish to set this to a
@@ -3178,16 +3178,16 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
is measured in terms of number of write transactions occurring on the
primary server, it is difficult to predict just how much additional
grace time will be made available to standby queries.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
<para>
- You should also consider setting <varname>hot_standby_feedback</>
+ You should also consider setting <varname>hot_standby_feedback</varname>
on standby server(s) as an alternative to using this parameter.
</para>
<para>
This does not prevent cleanup of dead rows which have reached the age
- specified by <varname>old_snapshot_threshold</>.
+ specified by <varname>old_snapshot_threshold</varname>.
</para>
</listitem>
</varlistentry>
@@ -3209,7 +3209,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-hot-standby" xreflabel="hot_standby">
<term><varname>hot_standby</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>hot_standby</> configuration parameter</primary>
+ <primary><varname>hot_standby</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3226,7 +3226,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-max-standby-archive-delay" xreflabel="max_standby_archive_delay">
<term><varname>max_standby_archive_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_standby_archive_delay</> configuration parameter</primary>
+ <primary><varname>max_standby_archive_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3235,16 +3235,16 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
standby server should wait before canceling standby queries that
conflict with about-to-be-applied WAL entries, as described in
<xref linkend="hot-standby-conflict">.
- <varname>max_standby_archive_delay</> applies when WAL data is
+ <varname>max_standby_archive_delay</varname> applies when WAL data is
being read from WAL archive (and is therefore not current).
The default is 30 seconds. Units are milliseconds if not specified.
A value of -1 allows the standby to wait forever for conflicting
queries to complete.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
<para>
- Note that <varname>max_standby_archive_delay</> is not the same as the
+ Note that <varname>max_standby_archive_delay</varname> is not the same as the
maximum length of time a query can run before cancellation; rather it
is the maximum total time allowed to apply any one WAL segment's data.
Thus, if one query has resulted in significant delay earlier in the
@@ -3257,7 +3257,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-max-standby-streaming-delay" xreflabel="max_standby_streaming_delay">
<term><varname>max_standby_streaming_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_standby_streaming_delay</> configuration parameter</primary>
+ <primary><varname>max_standby_streaming_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3266,16 +3266,16 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
standby server should wait before canceling standby queries that
conflict with about-to-be-applied WAL entries, as described in
<xref linkend="hot-standby-conflict">.
- <varname>max_standby_streaming_delay</> applies when WAL data is
+ <varname>max_standby_streaming_delay</varname> applies when WAL data is
being received via streaming replication.
The default is 30 seconds. Units are milliseconds if not specified.
A value of -1 allows the standby to wait forever for conflicting
queries to complete.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
<para>
- Note that <varname>max_standby_streaming_delay</> is not the same as
+ Note that <varname>max_standby_streaming_delay</varname> is not the same as
the maximum length of time a query can run before cancellation; rather
it is the maximum total time allowed to apply WAL data once it has
been received from the primary server. Thus, if one query has
@@ -3289,7 +3289,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-wal-receiver-status-interval" xreflabel="wal_receiver_status_interval">
<term><varname>wal_receiver_status_interval</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_receiver_status_interval</> configuration parameter</primary>
+ <primary><varname>wal_receiver_status_interval</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3298,7 +3298,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
process on the standby to send information about replication progress
to the primary or upstream standby, where it can be seen using the
<link linkend="monitoring-stats-views-table">
- <literal>pg_stat_replication</></link> view. The standby will report
+ <literal>pg_stat_replication</literal></link> view. The standby will report
the last write-ahead log location it has written, the last position it
has flushed to disk, and the last position it has applied.
This parameter's
@@ -3307,7 +3307,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
often as specified by this parameter. Thus, the apply position may
lag slightly behind the true position. Setting this parameter to zero
disables status updates completely. This parameter can only be set in
- the <filename>postgresql.conf</> file or on the server command line.
+ the <filename>postgresql.conf</filename> file or on the server command line.
The default value is 10 seconds.
</para>
</listitem>
@@ -3316,7 +3316,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-hot-standby-feedback" xreflabel="hot_standby_feedback">
<term><varname>hot_standby_feedback</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>hot_standby_feedback</> configuration parameter</primary>
+ <primary><varname>hot_standby_feedback</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3327,9 +3327,9 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
be used to eliminate query cancels caused by cleanup records, but
can cause database bloat on the primary for some workloads.
Feedback messages will not be sent more frequently than once per
- <varname>wal_receiver_status_interval</>. The default value is
+ <varname>wal_receiver_status_interval</varname>. The default value is
<literal>off</literal>. This parameter can only be set in the
- <filename>postgresql.conf</> file or on the server command line.
+ <filename>postgresql.conf</filename> file or on the server command line.
</para>
<para>
If cascaded replication is in use the feedback is passed upstream
@@ -3338,10 +3338,10 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
</para>
<para>
This setting does not override the behavior of
- <varname>old_snapshot_threshold</> on the primary; a snapshot on the
+ <varname>old_snapshot_threshold</varname> on the primary; a snapshot on the
standby which exceeds the primary's age threshold can become invalid,
resulting in cancellation of transactions on the standby. This is
- because <varname>old_snapshot_threshold</> is intended to provide an
+ because <varname>old_snapshot_threshold</varname> is intended to provide an
absolute limit on the time which dead rows can contribute to bloat,
which would otherwise be violated because of the configuration of a
standby.
@@ -3352,7 +3352,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-wal-receiver-timeout" xreflabel="wal_receiver_timeout">
<term><varname>wal_receiver_timeout</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_receiver_timeout</> configuration parameter</primary>
+ <primary><varname>wal_receiver_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3363,7 +3363,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
outage.
A value of zero disables the timeout mechanism. This parameter
can only be set in
- the <filename>postgresql.conf</> file or on the server command line.
+ the <filename>postgresql.conf</filename> file or on the server command line.
The default value is 60 seconds.
</para>
</listitem>
@@ -3372,16 +3372,16 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-wal-retrieve-retry-interval" xreflabel="wal_retrieve_retry_interval">
<term><varname>wal_retrieve_retry_interval</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_retrieve_retry_interval</> configuration parameter</primary>
+ <primary><varname>wal_retrieve_retry_interval</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specify how long the standby server should wait when WAL data is not
available from any sources (streaming replication,
- local <filename>pg_wal</> or WAL archive) before retrying to
+ local <filename>pg_wal</filename> or WAL archive) before retrying to
retrieve WAL data. This parameter can only be set in the
- <filename>postgresql.conf</> file or on the server command line.
+ <filename>postgresql.conf</filename> file or on the server command line.
The default value is 5 seconds. Units are milliseconds if not specified.
</para>
<para>
@@ -3420,7 +3420,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-max-logical-replication-workers" xreflabel="max_logical_replication_workers">
<term><varname>max_logical_replication_workers</varname> (<type>int</type>)
<indexterm>
- <primary><varname>max_logical_replication_workers</> configuration parameter</primary>
+ <primary><varname>max_logical_replication_workers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3441,7 +3441,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-max-sync-workers-per-subscription" xreflabel="max_sync_workers_per_subscription">
<term><varname>max_sync_workers_per_subscription</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_sync_workers_per_subscription</> configuration parameter</primary>
+ <primary><varname>max_sync_workers_per_subscription</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3478,7 +3478,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
These configuration parameters provide a crude method of
influencing the query plans chosen by the query optimizer. If
the default plan chosen by the optimizer for a particular query
- is not optimal, a <emphasis>temporary</> solution is to use one
+ is not optimal, a <emphasis>temporary</emphasis> solution is to use one
of these configuration parameters to force the optimizer to
choose a different plan.
Better ways to improve the quality of the
@@ -3499,13 +3499,13 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<primary>bitmap scan</primary>
</indexterm>
<indexterm>
- <primary><varname>enable_bitmapscan</> configuration parameter</primary>
+ <primary><varname>enable_bitmapscan</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Enables or disables the query planner's use of bitmap-scan plan
- types. The default is <literal>on</>.
+ types. The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3513,13 +3513,13 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-gathermerge" xreflabel="enable_gathermerge">
<term><varname>enable_gathermerge</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_gathermerge</> configuration parameter</primary>
+ <primary><varname>enable_gathermerge</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Enables or disables the query planner's use of gather
- merge plan types. The default is <literal>on</>.
+ merge plan types. The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3527,13 +3527,13 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-hashagg" xreflabel="enable_hashagg">
<term><varname>enable_hashagg</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_hashagg</> configuration parameter</primary>
+ <primary><varname>enable_hashagg</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Enables or disables the query planner's use of hashed
- aggregation plan types. The default is <literal>on</>.
+ aggregation plan types. The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3541,13 +3541,13 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-hashjoin" xreflabel="enable_hashjoin">
<term><varname>enable_hashjoin</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_hashjoin</> configuration parameter</primary>
+ <primary><varname>enable_hashjoin</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Enables or disables the query planner's use of hash-join plan
- types. The default is <literal>on</>.
+ types. The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3558,13 +3558,13 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<primary>index scan</primary>
</indexterm>
<indexterm>
- <primary><varname>enable_indexscan</> configuration parameter</primary>
+ <primary><varname>enable_indexscan</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Enables or disables the query planner's use of index-scan plan
- types. The default is <literal>on</>.
+ types. The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3572,14 +3572,14 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-indexonlyscan" xreflabel="enable_indexonlyscan">
<term><varname>enable_indexonlyscan</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_indexonlyscan</> configuration parameter</primary>
+ <primary><varname>enable_indexonlyscan</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Enables or disables the query planner's use of index-only-scan plan
types (see <xref linkend="indexes-index-only-scans">).
- The default is <literal>on</>.
+ The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3587,7 +3587,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-material" xreflabel="enable_material">
<term><varname>enable_material</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_material</> configuration parameter</primary>
+ <primary><varname>enable_material</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3596,7 +3596,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
It is impossible to suppress materialization entirely,
but turning this variable off prevents the planner from inserting
materialize nodes except in cases where it is required for correctness.
- The default is <literal>on</>.
+ The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3604,13 +3604,13 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-mergejoin" xreflabel="enable_mergejoin">
<term><varname>enable_mergejoin</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_mergejoin</> configuration parameter</primary>
+ <primary><varname>enable_mergejoin</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Enables or disables the query planner's use of merge-join plan
- types. The default is <literal>on</>.
+ types. The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3618,7 +3618,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-nestloop" xreflabel="enable_nestloop">
<term><varname>enable_nestloop</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_nestloop</> configuration parameter</primary>
+ <primary><varname>enable_nestloop</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3627,7 +3627,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
plans. It is impossible to suppress nested-loop joins entirely,
but turning this variable off discourages the planner from using
one if there are other methods available. The default is
- <literal>on</>.
+ <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3635,7 +3635,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-partition-wise-join" xreflabel="enable_partition_wise_join">
<term><varname>enable_partition_wise_join</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_partition_wise_join</> configuration parameter</primary>
+ <primary><varname>enable_partition_wise_join</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3647,7 +3647,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
must be of the same data type and have exactly matching sets of child
partitions. Because partition-wise join planning can use significantly
more CPU time and memory during planning, the default is
- <literal>off</>.
+ <literal>off</literal>.
</para>
</listitem>
</varlistentry>
@@ -3658,7 +3658,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<primary>sequential scan</primary>
</indexterm>
<indexterm>
- <primary><varname>enable_seqscan</> configuration parameter</primary>
+ <primary><varname>enable_seqscan</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3667,7 +3667,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
plan types. It is impossible to suppress sequential scans
entirely, but turning this variable off discourages the planner
from using one if there are other methods available. The
- default is <literal>on</>.
+ default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3675,7 +3675,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-sort" xreflabel="enable_sort">
<term><varname>enable_sort</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_sort</> configuration parameter</primary>
+ <primary><varname>enable_sort</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3684,7 +3684,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
steps. It is impossible to suppress explicit sorts entirely,
but turning this variable off discourages the planner from
using one if there are other methods available. The default
- is <literal>on</>.
+ is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3692,13 +3692,13 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-enable-tidscan" xreflabel="enable_tidscan">
<term><varname>enable_tidscan</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>enable_tidscan</> configuration parameter</primary>
+ <primary><varname>enable_tidscan</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Enables or disables the query planner's use of <acronym>TID</>
- scan plan types. The default is <literal>on</>.
+ Enables or disables the query planner's use of <acronym>TID</acronym>
+ scan plan types. The default is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -3709,12 +3709,12 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<title>Planner Cost Constants</title>
<para>
- The <firstterm>cost</> variables described in this section are measured
+ The <firstterm>cost</firstterm> variables described in this section are measured
on an arbitrary scale. Only their relative values matter, hence
scaling them all up or down by the same factor will result in no change
in the planner's choices. By default, these cost variables are based on
the cost of sequential page fetches; that is,
- <varname>seq_page_cost</> is conventionally set to <literal>1.0</>
+ <varname>seq_page_cost</varname> is conventionally set to <literal>1.0</literal>
and the other cost variables are set with reference to that. But
you can use a different scale if you prefer, such as actual execution
times in milliseconds on a particular machine.
@@ -3735,7 +3735,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-seq-page-cost" xreflabel="seq_page_cost">
<term><varname>seq_page_cost</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>seq_page_cost</> configuration parameter</primary>
+ <primary><varname>seq_page_cost</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3752,7 +3752,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-random-page-cost" xreflabel="random_page_cost">
<term><varname>random_page_cost</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>random_page_cost</> configuration parameter</primary>
+ <primary><varname>random_page_cost</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3765,7 +3765,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
</para>
<para>
- Reducing this value relative to <varname>seq_page_cost</>
+ Reducing this value relative to <varname>seq_page_cost</varname>
will cause the system to prefer index scans; raising it will
make index scans look relatively more expensive. You can raise
or lower both values together to change the importance of disk I/O
@@ -3795,8 +3795,8 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<tip>
<para>
- Although the system will let you set <varname>random_page_cost</> to
- less than <varname>seq_page_cost</>, it is not physically sensible
+ Although the system will let you set <varname>random_page_cost</varname> to
+ less than <varname>seq_page_cost</varname>, it is not physically sensible
to do so. However, setting them equal makes sense if the database
is entirely cached in RAM, since in that case there is no penalty
for touching pages out of sequence. Also, in a heavily-cached
@@ -3811,7 +3811,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-cpu-tuple-cost" xreflabel="cpu_tuple_cost">
<term><varname>cpu_tuple_cost</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>cpu_tuple_cost</> configuration parameter</primary>
+ <primary><varname>cpu_tuple_cost</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3826,7 +3826,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-cpu-index-tuple-cost" xreflabel="cpu_index_tuple_cost">
<term><varname>cpu_index_tuple_cost</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>cpu_index_tuple_cost</> configuration parameter</primary>
+ <primary><varname>cpu_index_tuple_cost</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3841,7 +3841,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-cpu-operator-cost" xreflabel="cpu_operator_cost">
<term><varname>cpu_operator_cost</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>cpu_operator_cost</> configuration parameter</primary>
+ <primary><varname>cpu_operator_cost</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3856,7 +3856,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-parallel-setup-cost" xreflabel="parallel_setup_cost">
<term><varname>parallel_setup_cost</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>parallel_setup_cost</> configuration parameter</primary>
+ <primary><varname>parallel_setup_cost</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3871,7 +3871,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-parallel-tuple-cost" xreflabel="parallel_tuple_cost">
<term><varname>parallel_tuple_cost</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>parallel_tuple_cost</> configuration parameter</primary>
+ <primary><varname>parallel_tuple_cost</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3886,7 +3886,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-min-parallel-table-scan-size" xreflabel="min_parallel_table_scan_size">
<term><varname>min_parallel_table_scan_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>min_parallel_table_scan_size</> configuration parameter</primary>
+ <primary><varname>min_parallel_table_scan_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3896,7 +3896,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
the amount of table data scanned is always equal to the size of the
table, but when indexes are used the amount of table data
scanned will normally be less. The default is 8
- megabytes (<literal>8MB</>).
+ megabytes (<literal>8MB</literal>).
</para>
</listitem>
</varlistentry>
@@ -3904,7 +3904,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-min-parallel-index-scan-size" xreflabel="min_parallel_index_scan_size">
<term><varname>min_parallel_index_scan_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>min_parallel_index_scan_size</> configuration parameter</primary>
+ <primary><varname>min_parallel_index_scan_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3913,7 +3913,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
for a parallel scan to be considered. Note that a parallel index scan
typically won't touch the entire index; it is the number of pages
which the planner believes will actually be touched by the scan which
- is relevant. The default is 512 kilobytes (<literal>512kB</>).
+ is relevant. The default is 512 kilobytes (<literal>512kB</literal>).
</para>
</listitem>
</varlistentry>
@@ -3921,7 +3921,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-effective-cache-size" xreflabel="effective_cache_size">
<term><varname>effective_cache_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>effective_cache_size</> configuration parameter</primary>
+ <primary><varname>effective_cache_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3942,7 +3942,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
does it reserve kernel disk cache; it is used only for estimation
purposes. The system also does not assume data remains in
the disk cache between queries. The default is 4 gigabytes
- (<literal>4GB</>).
+ (<literal>4GB</literal>).
</para>
</listitem>
</varlistentry>
@@ -3974,7 +3974,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<see>genetic query optimization</see>
</indexterm>
<indexterm>
- <primary><varname>geqo</> configuration parameter</primary>
+ <primary><varname>geqo</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -3990,14 +3990,14 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-geqo-threshold" xreflabel="geqo_threshold">
<term><varname>geqo_threshold</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>geqo_threshold</> configuration parameter</primary>
+ <primary><varname>geqo_threshold</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Use genetic query optimization to plan queries with at least
- this many <literal>FROM</> items involved. (Note that a
- <literal>FULL OUTER JOIN</> construct counts as only one <literal>FROM</>
+ this many <literal>FROM</literal> items involved. (Note that a
+ <literal>FULL OUTER JOIN</literal> construct counts as only one <literal>FROM</literal>
item.) The default is 12. For simpler queries it is usually best
to use the regular, exhaustive-search planner, but for queries with
many tables the exhaustive search takes too long, often
@@ -4011,7 +4011,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-geqo-effort" xreflabel="geqo_effort">
<term><varname>geqo_effort</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>geqo_effort</> configuration parameter</primary>
+ <primary><varname>geqo_effort</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4037,7 +4037,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-geqo-pool-size" xreflabel="geqo_pool_size">
<term><varname>geqo_pool_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>geqo_pool_size</> configuration parameter</primary>
+ <primary><varname>geqo_pool_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4055,7 +4055,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-geqo-generations" xreflabel="geqo_generations">
<term><varname>geqo_generations</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>geqo_generations</> configuration parameter</primary>
+ <primary><varname>geqo_generations</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4073,7 +4073,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-geqo-selection-bias" xreflabel="geqo_selection_bias">
<term><varname>geqo_selection_bias</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>geqo_selection_bias</> configuration parameter</primary>
+ <primary><varname>geqo_selection_bias</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4088,7 +4088,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-geqo-seed" xreflabel="geqo_seed">
<term><varname>geqo_seed</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>geqo_seed</> configuration parameter</primary>
+ <primary><varname>geqo_seed</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4112,17 +4112,17 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<varlistentry id="guc-default-statistics-target" xreflabel="default_statistics_target">
<term><varname>default_statistics_target</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>default_statistics_target</> configuration parameter</primary>
+ <primary><varname>default_statistics_target</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the default statistics target for table columns without
a column-specific target set via <command>ALTER TABLE
- SET STATISTICS</>. Larger values increase the time needed to
- do <command>ANALYZE</>, but might improve the quality of the
+ SET STATISTICS</command>. Larger values increase the time needed to
+ do <command>ANALYZE</command>, but might improve the quality of the
planner's estimates. The default is 100. For more information
- on the use of statistics by the <productname>PostgreSQL</>
+ on the use of statistics by the <productname>PostgreSQL</productname>
query planner, refer to <xref linkend="planner-stats">.
</para>
</listitem>
@@ -4134,26 +4134,26 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
<primary>constraint exclusion</primary>
</indexterm>
<indexterm>
- <primary><varname>constraint_exclusion</> configuration parameter</primary>
+ <primary><varname>constraint_exclusion</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Controls the query planner's use of table constraints to
optimize queries.
- The allowed values of <varname>constraint_exclusion</> are
- <literal>on</> (examine constraints for all tables),
- <literal>off</> (never examine constraints), and
- <literal>partition</> (examine constraints only for inheritance child
- tables and <literal>UNION ALL</> subqueries).
- <literal>partition</> is the default setting.
+ The allowed values of <varname>constraint_exclusion</varname> are
+ <literal>on</literal> (examine constraints for all tables),
+ <literal>off</literal> (never examine constraints), and
+ <literal>partition</literal> (examine constraints only for inheritance child
+ tables and <literal>UNION ALL</literal> subqueries).
+ <literal>partition</literal> is the default setting.
It is often used with inheritance and partitioned tables to
improve performance.
</para>
<para>
When this parameter allows it for a particular table, the planner
- compares query conditions with the table's <literal>CHECK</>
+ compares query conditions with the table's <literal>CHECK</literal>
constraints, and omits scanning tables for which the conditions
contradict the constraints. For example:
@@ -4165,8 +4165,8 @@ CREATE TABLE child2000(check (key between 2000 and 2999)) INHERITS(parent);
SELECT * FROM parent WHERE key = 2400;
</programlisting>
- With constraint exclusion enabled, this <command>SELECT</>
- will not scan <structname>child1000</> at all, improving performance.
+ With constraint exclusion enabled, this <command>SELECT</command>
+ will not scan <structname>child1000</structname> at all, improving performance.
</para>
<para>
@@ -4188,14 +4188,14 @@ SELECT * FROM parent WHERE key = 2400;
<varlistentry id="guc-cursor-tuple-fraction" xreflabel="cursor_tuple_fraction">
<term><varname>cursor_tuple_fraction</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>cursor_tuple_fraction</> configuration parameter</primary>
+ <primary><varname>cursor_tuple_fraction</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the planner's estimate of the fraction of a cursor's rows that
will be retrieved. The default is 0.1. Smaller values of this
- setting bias the planner towards using <quote>fast start</> plans
+ setting bias the planner towards using <quote>fast start</quote> plans
for cursors, which will retrieve the first few rows quickly while
perhaps taking a long time to fetch all rows. Larger values
put more emphasis on the total estimated time. At the maximum
@@ -4209,7 +4209,7 @@ SELECT * FROM parent WHERE key = 2400;
<varlistentry id="guc-from-collapse-limit" xreflabel="from_collapse_limit">
<term><varname>from_collapse_limit</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>from_collapse_limit</> configuration parameter</primary>
+ <primary><varname>from_collapse_limit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4232,14 +4232,14 @@ SELECT * FROM parent WHERE key = 2400;
<varlistentry id="guc-join-collapse-limit" xreflabel="join_collapse_limit">
<term><varname>join_collapse_limit</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>join_collapse_limit</> configuration parameter</primary>
+ <primary><varname>join_collapse_limit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- The planner will rewrite explicit <literal>JOIN</>
- constructs (except <literal>FULL JOIN</>s) into lists of
- <literal>FROM</> items whenever a list of no more than this many items
+ The planner will rewrite explicit <literal>JOIN</literal>
+ constructs (except <literal>FULL JOIN</literal>s) into lists of
+ <literal>FROM</literal> items whenever a list of no more than this many items
would result. Smaller values reduce planning time but might
yield inferior query plans.
</para>
@@ -4248,7 +4248,7 @@ SELECT * FROM parent WHERE key = 2400;
By default, this variable is set the same as
<varname>from_collapse_limit</varname>, which is appropriate
for most uses. Setting it to 1 prevents any reordering of
- explicit <literal>JOIN</>s. Thus, the explicit join order
+ explicit <literal>JOIN</literal>s. Thus, the explicit join order
specified in the query will be the actual order in which the
relations are joined. Because the query planner does not always choose
the optimal join order, advanced users can elect to
@@ -4268,24 +4268,24 @@ SELECT * FROM parent WHERE key = 2400;
<varlistentry id="guc-force-parallel-mode" xreflabel="force_parallel_mode">
<term><varname>force_parallel_mode</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>force_parallel_mode</> configuration parameter</primary>
+ <primary><varname>force_parallel_mode</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Allows the use of parallel queries for testing purposes even in cases
where no performance benefit is expected.
- The allowed values of <varname>force_parallel_mode</> are
- <literal>off</> (use parallel mode only when it is expected to improve
- performance), <literal>on</> (force parallel query for all queries
- for which it is thought to be safe), and <literal>regress</> (like
- <literal>on</>, but with additional behavior changes as explained
+ The allowed values of <varname>force_parallel_mode</varname> are
+ <literal>off</literal> (use parallel mode only when it is expected to improve
+ performance), <literal>on</literal> (force parallel query for all queries
+ for which it is thought to be safe), and <literal>regress</literal> (like
+ <literal>on</literal>, but with additional behavior changes as explained
below).
</para>
<para>
- More specifically, setting this value to <literal>on</> will add
- a <literal>Gather</> node to the top of any query plan for which this
+ More specifically, setting this value to <literal>on</literal> will add
+ a <literal>Gather</literal> node to the top of any query plan for which this
appears to be safe, so that the query runs inside of a parallel worker.
Even when a parallel worker is not available or cannot be used,
operations such as starting a subtransaction that would be prohibited
@@ -4297,15 +4297,15 @@ SELECT * FROM parent WHERE key = 2400;
</para>
<para>
- Setting this value to <literal>regress</> has all of the same effects
- as setting it to <literal>on</> plus some additional effects that are
+ Setting this value to <literal>regress</literal> has all of the same effects
+ as setting it to <literal>on</literal> plus some additional effects that are
intended to facilitate automated regression testing. Normally,
messages from a parallel worker include a context line indicating that,
- but a setting of <literal>regress</> suppresses this line so that the
+ but a setting of <literal>regress</literal> suppresses this line so that the
output is the same as in non-parallel execution. Also,
- the <literal>Gather</> nodes added to plans by this setting are hidden
- in <literal>EXPLAIN</> output so that the output matches what
- would be obtained if this setting were turned <literal>off</>.
+ the <literal>Gather</literal> nodes added to plans by this setting are hidden
+ in <literal>EXPLAIN</literal> output so that the output matches what
+ would be obtained if this setting were turned <literal>off</literal>.
</para>
</listitem>
</varlistentry>
@@ -4338,7 +4338,7 @@ SELECT * FROM parent WHERE key = 2400;
<varlistentry id="guc-log-destination" xreflabel="log_destination">
<term><varname>log_destination</varname> (<type>string</type>)
<indexterm>
- <primary><varname>log_destination</> configuration parameter</primary>
+ <primary><varname>log_destination</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4351,13 +4351,13 @@ SELECT * FROM parent WHERE key = 2400;
parameter to a list of desired log destinations separated by
commas. The default is to log to <systemitem>stderr</systemitem>
only.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
<para>
- If <systemitem>csvlog</> is included in <varname>log_destination</>,
+ If <systemitem>csvlog</systemitem> is included in <varname>log_destination</varname>,
log entries are output in <quote>comma separated
- value</> (<acronym>CSV</>) format, which is convenient for
+ value</quote> (<acronym>CSV</acronym>) format, which is convenient for
loading logs into programs.
See <xref linkend="runtime-config-logging-csvlog"> for details.
<xref linkend="guc-logging-collector"> must be enabled to generate
@@ -4366,7 +4366,7 @@ SELECT * FROM parent WHERE key = 2400;
<para>
When either <systemitem>stderr</systemitem> or
<systemitem>csvlog</systemitem> are included, the file
- <filename>current_logfiles</> is created to record the location
+ <filename>current_logfiles</filename> is created to record the location
of the log file(s) currently in use by the logging collector and the
associated logging destination. This provides a convenient way to
find the logs currently in use by the instance. Here is an example of
@@ -4378,10 +4378,10 @@ csvlog log/postgresql.csv
<filename>current_logfiles</filename> is recreated when a new log file
is created as an effect of rotation, and
- when <varname>log_destination</> is reloaded. It is removed when
+ when <varname>log_destination</varname> is reloaded. It is removed when
neither <systemitem>stderr</systemitem>
nor <systemitem>csvlog</systemitem> are included
- in <varname>log_destination</>, and when the logging collector is
+ in <varname>log_destination</varname>, and when the logging collector is
disabled.
</para>
@@ -4390,9 +4390,9 @@ csvlog log/postgresql.csv
On most Unix systems, you will need to alter the configuration of
your system's <application>syslog</application> daemon in order
to make use of the <systemitem>syslog</systemitem> option for
- <varname>log_destination</>. <productname>PostgreSQL</productname>
+ <varname>log_destination</varname>. <productname>PostgreSQL</productname>
can log to <application>syslog</application> facilities
- <literal>LOCAL0</> through <literal>LOCAL7</> (see <xref
+ <literal>LOCAL0</literal> through <literal>LOCAL7</literal> (see <xref
linkend="guc-syslog-facility">), but the default
<application>syslog</application> configuration on most platforms
will discard all such messages. You will need to add something like:
@@ -4404,7 +4404,7 @@ local0.* /var/log/postgresql
</para>
<para>
On Windows, when you use the <literal>eventlog</literal>
- option for <varname>log_destination</>, you should
+ option for <varname>log_destination</varname>, you should
register an event source and its library with the operating
system so that the Windows Event Viewer can display event
log messages cleanly.
@@ -4417,27 +4417,27 @@ local0.* /var/log/postgresql
<varlistentry id="guc-logging-collector" xreflabel="logging_collector">
<term><varname>logging_collector</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>logging_collector</> configuration parameter</primary>
+ <primary><varname>logging_collector</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- This parameter enables the <firstterm>logging collector</>, which
+ This parameter enables the <firstterm>logging collector</firstterm>, which
is a background process that captures log messages
- sent to <systemitem>stderr</> and redirects them into log files.
+ sent to <systemitem>stderr</systemitem> and redirects them into log files.
This approach is often more useful than
- logging to <application>syslog</>, since some types of messages
- might not appear in <application>syslog</> output. (One common
+ logging to <application>syslog</application>, since some types of messages
+ might not appear in <application>syslog</application> output. (One common
example is dynamic-linker failure messages; another is error messages
- produced by scripts such as <varname>archive_command</>.)
+ produced by scripts such as <varname>archive_command</varname>.)
This parameter can only be set at server start.
</para>
<note>
<para>
- It is possible to log to <systemitem>stderr</> without using the
+ It is possible to log to <systemitem>stderr</systemitem> without using the
logging collector; the log messages will just go to wherever the
- server's <systemitem>stderr</> is directed. However, that method is
+ server's <systemitem>stderr</systemitem> is directed. However, that method is
only suitable for low log volumes, since it provides no convenient
way to rotate log files. Also, on some platforms not using the
logging collector can result in lost or garbled log output, because
@@ -4451,7 +4451,7 @@ local0.* /var/log/postgresql
The logging collector is designed to never lose messages. This means
that in case of extremely high load, server processes could be
blocked while trying to send additional log messages when the
- collector has fallen behind. In contrast, <application>syslog</>
+ collector has fallen behind. In contrast, <application>syslog</application>
prefers to drop messages if it cannot write them, which means it
may fail to log some messages in such cases but it will not block
the rest of the system.
@@ -4464,16 +4464,16 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-directory" xreflabel="log_directory">
<term><varname>log_directory</varname> (<type>string</type>)
<indexterm>
- <primary><varname>log_directory</> configuration parameter</primary>
+ <primary><varname>log_directory</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When <varname>logging_collector</> is enabled,
+ When <varname>logging_collector</varname> is enabled,
this parameter determines the directory in which log files will be created.
It can be specified as an absolute path, or relative to the
cluster data directory.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
The default is <literal>log</literal>.
</para>
@@ -4483,7 +4483,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-filename" xreflabel="log_filename">
<term><varname>log_filename</varname> (<type>string</type>)
<indexterm>
- <primary><varname>log_filename</> configuration parameter</primary>
+ <primary><varname>log_filename</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4514,14 +4514,14 @@ local0.* /var/log/postgresql
longer the case.
</para>
<para>
- If CSV-format output is enabled in <varname>log_destination</>,
- <literal>.csv</> will be appended to the timestamped
+ If CSV-format output is enabled in <varname>log_destination</varname>,
+ <literal>.csv</literal> will be appended to the timestamped
log file name to create the file name for CSV-format output.
- (If <varname>log_filename</> ends in <literal>.log</>, the suffix is
+ (If <varname>log_filename</varname> ends in <literal>.log</literal>, the suffix is
replaced instead.)
</para>
<para>
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4530,7 +4530,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-file-mode" xreflabel="log_file_mode">
<term><varname>log_file_mode</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>log_file_mode</> configuration parameter</primary>
+ <primary><varname>log_file_mode</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4545,9 +4545,9 @@ local0.* /var/log/postgresql
must start with a <literal>0</literal> (zero).)
</para>
<para>
- The default permissions are <literal>0600</>, meaning only the
+ The default permissions are <literal>0600</literal>, meaning only the
server owner can read or write the log files. The other commonly
- useful setting is <literal>0640</>, allowing members of the owner's
+ useful setting is <literal>0640</literal>, allowing members of the owner's
group to read the files. Note however that to make use of such a
setting, you'll need to alter <xref linkend="guc-log-directory"> to
store the files somewhere outside the cluster data directory. In
@@ -4555,7 +4555,7 @@ local0.* /var/log/postgresql
they might contain sensitive data.
</para>
<para>
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4564,7 +4564,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-rotation-age" xreflabel="log_rotation_age">
<term><varname>log_rotation_age</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>log_rotation_age</> configuration parameter</primary>
+ <primary><varname>log_rotation_age</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4574,7 +4574,7 @@ local0.* /var/log/postgresql
After this many minutes have elapsed, a new log file will
be created. Set to zero to disable time-based creation of
new log files.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4583,7 +4583,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-rotation-size" xreflabel="log_rotation_size">
<term><varname>log_rotation_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>log_rotation_size</> configuration parameter</primary>
+ <primary><varname>log_rotation_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4593,7 +4593,7 @@ local0.* /var/log/postgresql
After this many kilobytes have been emitted into a log file,
a new log file will be created. Set to zero to disable size-based
creation of new log files.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4602,7 +4602,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-truncate-on-rotation" xreflabel="log_truncate_on_rotation">
<term><varname>log_truncate_on_rotation</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_truncate_on_rotation</> configuration parameter</primary>
+ <primary><varname>log_truncate_on_rotation</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4617,7 +4617,7 @@ local0.* /var/log/postgresql
a <varname>log_filename</varname> like <literal>postgresql-%H.log</literal>
would result in generating twenty-four hourly log files and then
cyclically overwriting them.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
<para>
@@ -4635,7 +4635,7 @@ local0.* /var/log/postgresql
<varname>log_truncate_on_rotation</varname> to <literal>on</literal>,
<varname>log_rotation_age</varname> to <literal>60</literal>, and
<varname>log_rotation_size</varname> to <literal>1000000</literal>.
- Including <literal>%M</> in <varname>log_filename</varname> allows
+ Including <literal>%M</literal> in <varname>log_filename</varname> allows
any size-driven rotations that might occur to select a file name
different from the hour's initial file name.
</para>
@@ -4645,21 +4645,21 @@ local0.* /var/log/postgresql
<varlistentry id="guc-syslog-facility" xreflabel="syslog_facility">
<term><varname>syslog_facility</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>syslog_facility</> configuration parameter</primary>
+ <primary><varname>syslog_facility</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When logging to <application>syslog</> is enabled, this parameter
+ When logging to <application>syslog</application> is enabled, this parameter
determines the <application>syslog</application>
<quote>facility</quote> to be used. You can choose
- from <literal>LOCAL0</>, <literal>LOCAL1</>,
- <literal>LOCAL2</>, <literal>LOCAL3</>, <literal>LOCAL4</>,
- <literal>LOCAL5</>, <literal>LOCAL6</>, <literal>LOCAL7</>;
- the default is <literal>LOCAL0</>. See also the
+ from <literal>LOCAL0</literal>, <literal>LOCAL1</literal>,
+ <literal>LOCAL2</literal>, <literal>LOCAL3</literal>, <literal>LOCAL4</literal>,
+ <literal>LOCAL5</literal>, <literal>LOCAL6</literal>, <literal>LOCAL7</literal>;
+ the default is <literal>LOCAL0</literal>. See also the
documentation of your system's
<application>syslog</application> daemon.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4668,17 +4668,17 @@ local0.* /var/log/postgresql
<varlistentry id="guc-syslog-ident" xreflabel="syslog_ident">
<term><varname>syslog_ident</varname> (<type>string</type>)
<indexterm>
- <primary><varname>syslog_ident</> configuration parameter</primary>
+ <primary><varname>syslog_ident</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When logging to <application>syslog</> is enabled, this parameter
+ When logging to <application>syslog</application> is enabled, this parameter
determines the program name used to identify
<productname>PostgreSQL</productname> messages in
<application>syslog</application> logs. The default is
<literal>postgres</literal>.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4687,7 +4687,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-syslog-sequence-numbers" xreflabel="syslog_sequence_numbers">
<term><varname>syslog_sequence_numbers</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>syslog_sequence_numbers</> configuration parameter</primary>
+ <primary><varname>syslog_sequence_numbers</varname> configuration parameter</primary>
</indexterm>
</term>
@@ -4706,7 +4706,7 @@ local0.* /var/log/postgresql
</para>
<para>
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4715,12 +4715,12 @@ local0.* /var/log/postgresql
<varlistentry id="guc-syslog-split-messages" xreflabel="syslog_split_messages">
<term><varname>syslog_split_messages</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>syslog_split_messages</> configuration parameter</primary>
+ <primary><varname>syslog_split_messages</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When logging to <application>syslog</> is enabled, this parameter
+ When logging to <application>syslog</application> is enabled, this parameter
determines how messages are delivered to syslog. When on (the
default), messages are split by lines, and long lines are split so
that they will fit into 1024 bytes, which is a typical size limit for
@@ -4739,7 +4739,7 @@ local0.* /var/log/postgresql
</para>
<para>
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4748,16 +4748,16 @@ local0.* /var/log/postgresql
<varlistentry id="guc-event-source" xreflabel="event_source">
<term><varname>event_source</varname> (<type>string</type>)
<indexterm>
- <primary><varname>event_source</> configuration parameter</primary>
+ <primary><varname>event_source</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When logging to <application>event log</> is enabled, this parameter
+ When logging to <application>event log</application> is enabled, this parameter
determines the program name used to identify
<productname>PostgreSQL</productname> messages in
the log. The default is <literal>PostgreSQL</literal>.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -4773,21 +4773,21 @@ local0.* /var/log/postgresql
<varlistentry id="guc-client-min-messages" xreflabel="client_min_messages">
<term><varname>client_min_messages</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>client_min_messages</> configuration parameter</primary>
+ <primary><varname>client_min_messages</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Controls which message levels are sent to the client.
- Valid values are <literal>DEBUG5</>,
- <literal>DEBUG4</>, <literal>DEBUG3</>, <literal>DEBUG2</>,
- <literal>DEBUG1</>, <literal>LOG</>, <literal>NOTICE</>,
- <literal>WARNING</>, <literal>ERROR</>, <literal>FATAL</>,
- and <literal>PANIC</>. Each level
+ Valid values are <literal>DEBUG5</literal>,
+ <literal>DEBUG4</literal>, <literal>DEBUG3</literal>, <literal>DEBUG2</literal>,
+ <literal>DEBUG1</literal>, <literal>LOG</literal>, <literal>NOTICE</literal>,
+ <literal>WARNING</literal>, <literal>ERROR</literal>, <literal>FATAL</literal>,
+ and <literal>PANIC</literal>. Each level
includes all the levels that follow it. The later the level,
the fewer messages are sent. The default is
- <literal>NOTICE</>. Note that <literal>LOG</> has a different
- rank here than in <varname>log_min_messages</>.
+ <literal>NOTICE</literal>. Note that <literal>LOG</literal> has a different
+ rank here than in <varname>log_min_messages</varname>.
</para>
</listitem>
</varlistentry>
@@ -4795,21 +4795,21 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-min-messages" xreflabel="log_min_messages">
<term><varname>log_min_messages</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>log_min_messages</> configuration parameter</primary>
+ <primary><varname>log_min_messages</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Controls which message levels are written to the server log.
- Valid values are <literal>DEBUG5</>, <literal>DEBUG4</>,
- <literal>DEBUG3</>, <literal>DEBUG2</>, <literal>DEBUG1</>,
- <literal>INFO</>, <literal>NOTICE</>, <literal>WARNING</>,
- <literal>ERROR</>, <literal>LOG</>, <literal>FATAL</>, and
- <literal>PANIC</>. Each level includes all the levels that
+ Valid values are <literal>DEBUG5</literal>, <literal>DEBUG4</literal>,
+ <literal>DEBUG3</literal>, <literal>DEBUG2</literal>, <literal>DEBUG1</literal>,
+ <literal>INFO</literal>, <literal>NOTICE</literal>, <literal>WARNING</literal>,
+ <literal>ERROR</literal>, <literal>LOG</literal>, <literal>FATAL</literal>, and
+ <literal>PANIC</literal>. Each level includes all the levels that
follow it. The later the level, the fewer messages are sent
- to the log. The default is <literal>WARNING</>. Note that
- <literal>LOG</> has a different rank here than in
- <varname>client_min_messages</>.
+ to the log. The default is <literal>WARNING</literal>. Note that
+ <literal>LOG</literal> has a different rank here than in
+ <varname>client_min_messages</varname>.
Only superusers can change this setting.
</para>
</listitem>
@@ -4818,7 +4818,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-min-error-statement" xreflabel="log_min_error_statement">
<term><varname>log_min_error_statement</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>log_min_error_statement</> configuration parameter</primary>
+ <primary><varname>log_min_error_statement</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4846,7 +4846,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-min-duration-statement" xreflabel="log_min_duration_statement">
<term><varname>log_min_duration_statement</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>log_min_duration_statement</> configuration parameter</primary>
+ <primary><varname>log_min_duration_statement</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -4872,9 +4872,9 @@ local0.* /var/log/postgresql
When using this option together with
<xref linkend="guc-log-statement">,
the text of statements that are logged because of
- <varname>log_statement</> will not be repeated in the
+ <varname>log_statement</varname> will not be repeated in the
duration log message.
- If you are not using <application>syslog</>, it is recommended
+ If you are not using <application>syslog</application>, it is recommended
that you log the PID or session ID using
<xref linkend="guc-log-line-prefix">
so that you can link the statement message to the later
@@ -4888,7 +4888,7 @@ local0.* /var/log/postgresql
<para>
<xref linkend="runtime-config-severity-levels"> explains the message
- severity levels used by <productname>PostgreSQL</>. If logging output
+ severity levels used by <productname>PostgreSQL</productname>. If logging output
is sent to <systemitem>syslog</systemitem> or Windows'
<systemitem>eventlog</systemitem>, the severity levels are translated
as shown in the table.
@@ -4901,73 +4901,73 @@ local0.* /var/log/postgresql
<row>
<entry>Severity</entry>
<entry>Usage</entry>
- <entry><systemitem>syslog</></entry>
- <entry><systemitem>eventlog</></entry>
+ <entry><systemitem>syslog</systemitem></entry>
+ <entry><systemitem>eventlog</systemitem></entry>
</row>
</thead>
<tbody>
<row>
- <entry><literal>DEBUG1..DEBUG5</></entry>
+ <entry><literal>DEBUG1..DEBUG5</literal></entry>
<entry>Provides successively-more-detailed information for use by
developers.</entry>
- <entry><literal>DEBUG</></entry>
- <entry><literal>INFORMATION</></entry>
+ <entry><literal>DEBUG</literal></entry>
+ <entry><literal>INFORMATION</literal></entry>
</row>
<row>
- <entry><literal>INFO</></entry>
+ <entry><literal>INFO</literal></entry>
<entry>Provides information implicitly requested by the user,
- e.g., output from <command>VACUUM VERBOSE</>.</entry>
- <entry><literal>INFO</></entry>
- <entry><literal>INFORMATION</></entry>
+ e.g., output from <command>VACUUM VERBOSE</command>.</entry>
+ <entry><literal>INFO</literal></entry>
+ <entry><literal>INFORMATION</literal></entry>
</row>
<row>
- <entry><literal>NOTICE</></entry>
+ <entry><literal>NOTICE</literal></entry>
<entry>Provides information that might be helpful to users, e.g.,
notice of truncation of long identifiers.</entry>
- <entry><literal>NOTICE</></entry>
- <entry><literal>INFORMATION</></entry>
+ <entry><literal>NOTICE</literal></entry>
+ <entry><literal>INFORMATION</literal></entry>
</row>
<row>
- <entry><literal>WARNING</></entry>
- <entry>Provides warnings of likely problems, e.g., <command>COMMIT</>
+ <entry><literal>WARNING</literal></entry>
+ <entry>Provides warnings of likely problems, e.g., <command>COMMIT</command>
outside a transaction block.</entry>
- <entry><literal>NOTICE</></entry>
- <entry><literal>WARNING</></entry>
+ <entry><literal>NOTICE</literal></entry>
+ <entry><literal>WARNING</literal></entry>
</row>
<row>
- <entry><literal>ERROR</></entry>
+ <entry><literal>ERROR</literal></entry>
<entry>Reports an error that caused the current command to
abort.</entry>
- <entry><literal>WARNING</></entry>
- <entry><literal>ERROR</></entry>
+ <entry><literal>WARNING</literal></entry>
+ <entry><literal>ERROR</literal></entry>
</row>
<row>
- <entry><literal>LOG</></entry>
+ <entry><literal>LOG</literal></entry>
<entry>Reports information of interest to administrators, e.g.,
checkpoint activity.</entry>
- <entry><literal>INFO</></entry>
- <entry><literal>INFORMATION</></entry>
+ <entry><literal>INFO</literal></entry>
+ <entry><literal>INFORMATION</literal></entry>
</row>
<row>
- <entry><literal>FATAL</></entry>
+ <entry><literal>FATAL</literal></entry>
<entry>Reports an error that caused the current session to
abort.</entry>
- <entry><literal>ERR</></entry>
- <entry><literal>ERROR</></entry>
+ <entry><literal>ERR</literal></entry>
+ <entry><literal>ERROR</literal></entry>
</row>
<row>
- <entry><literal>PANIC</></entry>
+ <entry><literal>PANIC</literal></entry>
<entry>Reports an error that caused all database sessions to abort.</entry>
- <entry><literal>CRIT</></entry>
- <entry><literal>ERROR</></entry>
+ <entry><literal>CRIT</literal></entry>
+ <entry><literal>ERROR</literal></entry>
</row>
</tbody>
</tgroup>
@@ -4982,15 +4982,15 @@ local0.* /var/log/postgresql
<varlistentry id="guc-application-name" xreflabel="application_name">
<term><varname>application_name</varname> (<type>string</type>)
<indexterm>
- <primary><varname>application_name</> configuration parameter</primary>
+ <primary><varname>application_name</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
The <varname>application_name</varname> can be any string of less than
- <symbol>NAMEDATALEN</> characters (64 characters in a standard build).
+ <symbol>NAMEDATALEN</symbol> characters (64 characters in a standard build).
It is typically set by an application upon connection to the server.
- The name will be displayed in the <structname>pg_stat_activity</> view
+ The name will be displayed in the <structname>pg_stat_activity</structname> view
and included in CSV log entries. It can also be included in regular
log entries via the <xref linkend="guc-log-line-prefix"> parameter.
Only printable ASCII characters may be used in the
@@ -5003,17 +5003,17 @@ local0.* /var/log/postgresql
<varlistentry>
<term><varname>debug_print_parse</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>debug_print_parse</> configuration parameter</primary>
+ <primary><varname>debug_print_parse</varname> configuration parameter</primary>
</indexterm>
</term>
<term><varname>debug_print_rewritten</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>debug_print_rewritten</> configuration parameter</primary>
+ <primary><varname>debug_print_rewritten</varname> configuration parameter</primary>
</indexterm>
</term>
<term><varname>debug_print_plan</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>debug_print_plan</> configuration parameter</primary>
+ <primary><varname>debug_print_plan</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5021,7 +5021,7 @@ local0.* /var/log/postgresql
These parameters enable various debugging output to be emitted.
When set, they print the resulting parse tree, the query rewriter
output, or the execution plan for each executed query.
- These messages are emitted at <literal>LOG</> message level, so by
+ These messages are emitted at <literal>LOG</literal> message level, so by
default they will appear in the server log but will not be sent to the
client. You can change that by adjusting
<xref linkend="guc-client-min-messages"> and/or
@@ -5034,7 +5034,7 @@ local0.* /var/log/postgresql
<varlistentry>
<term><varname>debug_pretty_print</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>debug_pretty_print</> configuration parameter</primary>
+ <primary><varname>debug_pretty_print</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5043,7 +5043,7 @@ local0.* /var/log/postgresql
produced by <varname>debug_print_parse</varname>,
<varname>debug_print_rewritten</varname>, or
<varname>debug_print_plan</varname>. This results in more readable
- but much longer output than the <quote>compact</> format used when
+ but much longer output than the <quote>compact</quote> format used when
it is off. It is on by default.
</para>
</listitem>
@@ -5052,7 +5052,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-checkpoints" xreflabel="log_checkpoints">
<term><varname>log_checkpoints</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_checkpoints</> configuration parameter</primary>
+ <primary><varname>log_checkpoints</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5060,7 +5060,7 @@ local0.* /var/log/postgresql
Causes checkpoints and restartpoints to be logged in the server log.
Some statistics are included in the log messages, including the number
of buffers written and the time spent writing them.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line. The default is off.
</para>
</listitem>
@@ -5069,7 +5069,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-connections" xreflabel="log_connections">
<term><varname>log_connections</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_connections</> configuration parameter</primary>
+ <primary><varname>log_connections</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5078,14 +5078,14 @@ local0.* /var/log/postgresql
as well as successful completion of client authentication.
Only superusers can change this parameter at session start,
and it cannot be changed at all within a session.
- The default is <literal>off</>.
+ The default is <literal>off</literal>.
</para>
<note>
<para>
- Some client programs, like <application>psql</>, attempt
+ Some client programs, like <application>psql</application>, attempt
to connect twice while determining if a password is required, so
- duplicate <quote>connection received</> messages do not
+ duplicate <quote>connection received</quote> messages do not
necessarily indicate a problem.
</para>
</note>
@@ -5095,7 +5095,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-disconnections" xreflabel="log_disconnections">
<term><varname>log_disconnections</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_disconnections</> configuration parameter</primary>
+ <primary><varname>log_disconnections</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5105,7 +5105,7 @@ local0.* /var/log/postgresql
plus the duration of the session.
Only superusers can change this parameter at session start,
and it cannot be changed at all within a session.
- The default is <literal>off</>.
+ The default is <literal>off</literal>.
</para>
</listitem>
</varlistentry>
@@ -5114,13 +5114,13 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-duration" xreflabel="log_duration">
<term><varname>log_duration</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_duration</> configuration parameter</primary>
+ <primary><varname>log_duration</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Causes the duration of every completed statement to be logged.
- The default is <literal>off</>.
+ The default is <literal>off</literal>.
Only superusers can change this setting.
</para>
@@ -5133,10 +5133,10 @@ local0.* /var/log/postgresql
<para>
The difference between setting this option and setting
<xref linkend="guc-log-min-duration-statement"> to zero is that
- exceeding <varname>log_min_duration_statement</> forces the text of
+ exceeding <varname>log_min_duration_statement</varname> forces the text of
the query to be logged, but this option doesn't. Thus, if
- <varname>log_duration</> is <literal>on</> and
- <varname>log_min_duration_statement</> has a positive value, all
+ <varname>log_duration</varname> is <literal>on</literal> and
+ <varname>log_min_duration_statement</varname> has a positive value, all
durations are logged but the query text is included only for
statements exceeding the threshold. This behavior can be useful for
gathering statistics in high-load installations.
@@ -5148,18 +5148,18 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-error-verbosity" xreflabel="log_error_verbosity">
<term><varname>log_error_verbosity</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>log_error_verbosity</> configuration parameter</primary>
+ <primary><varname>log_error_verbosity</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Controls the amount of detail written in the server log for each
- message that is logged. Valid values are <literal>TERSE</>,
- <literal>DEFAULT</>, and <literal>VERBOSE</>, each adding more
- fields to displayed messages. <literal>TERSE</> excludes
- the logging of <literal>DETAIL</>, <literal>HINT</>,
- <literal>QUERY</>, and <literal>CONTEXT</> error information.
- <literal>VERBOSE</> output includes the <symbol>SQLSTATE</> error
+ message that is logged. Valid values are <literal>TERSE</literal>,
+ <literal>DEFAULT</literal>, and <literal>VERBOSE</literal>, each adding more
+ fields to displayed messages. <literal>TERSE</literal> excludes
+ the logging of <literal>DETAIL</literal>, <literal>HINT</literal>,
+ <literal>QUERY</literal>, and <literal>CONTEXT</literal> error information.
+ <literal>VERBOSE</literal> output includes the <symbol>SQLSTATE</symbol> error
code (see also <xref linkend="errcodes-appendix">) and the source code file name, function name,
and line number that generated the error.
Only superusers can change this setting.
@@ -5170,7 +5170,7 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-hostname" xreflabel="log_hostname">
<term><varname>log_hostname</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_hostname</> configuration parameter</primary>
+ <primary><varname>log_hostname</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5179,7 +5179,7 @@ local0.* /var/log/postgresql
connecting host. Turning this parameter on causes logging of the
host name as well. Note that depending on your host name resolution
setup this might impose a non-negligible performance penalty.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -5188,14 +5188,14 @@ local0.* /var/log/postgresql
<varlistentry id="guc-log-line-prefix" xreflabel="log_line_prefix">
<term><varname>log_line_prefix</varname> (<type>string</type>)
<indexterm>
- <primary><varname>log_line_prefix</> configuration parameter</primary>
+ <primary><varname>log_line_prefix</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- This is a <function>printf</>-style string that is output at the
+ This is a <function>printf</function>-style string that is output at the
beginning of each log line.
- <literal>%</> characters begin <quote>escape sequences</>
+ <literal>%</literal> characters begin <quote>escape sequences</quote>
that are replaced with status information as outlined below.
Unrecognized escapes are ignored. Other
characters are copied straight to the log line. Some escapes are
@@ -5207,9 +5207,9 @@ local0.* /var/log/postgresql
right with spaces to give it a minimum width, whereas a positive
value will pad on the left. Padding can be useful to aid human
readability in log files.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line. The default is
- <literal>'%m [%p] '</> which logs a time stamp and the process ID.
+ <literal>'%m [%p] '</literal> which logs a time stamp and the process ID.
<informaltable>
<tgroup cols="3">
@@ -5310,19 +5310,19 @@ local0.* /var/log/postgresql
</row>
<row>
<entry><literal>%%</literal></entry>
- <entry>Literal <literal>%</></entry>
+ <entry>Literal <literal>%</literal></entry>
<entry>no</entry>
</row>
</tbody>
</tgroup>
</informaltable>
- The <literal>%c</> escape prints a quasi-unique session identifier,
+ The <literal>%c</literal> escape prints a quasi-unique session identifier,
consisting of two 4-byte hexadecimal numbers (without leading zeros)
separated by a dot. The numbers are the process start time and the
- process ID, so <literal>%c</> can also be used as a space saving way
+ process ID, so <literal>%c</literal> can also be used as a space saving way
of printing those items. For example, to generate the session
- identifier from <literal>pg_stat_activity</>, use this query:
+ identifier from <literal>pg_stat_activity</literal>, use this query:
<programlisting>
SELECT to_hex(trunc(EXTRACT(EPOCH FROM backend_start))::integer) || '.' ||
to_hex(pid)
@@ -5333,7 +5333,7 @@ FROM pg_stat_activity;
<tip>
<para>
- If you set a nonempty value for <varname>log_line_prefix</>,
+ If you set a nonempty value for <varname>log_line_prefix</varname>,
you should usually make its last character be a space, to provide
visual separation from the rest of the log line. A punctuation
character can be used too.
@@ -5342,15 +5342,15 @@ FROM pg_stat_activity;
<tip>
<para>
- <application>Syslog</> produces its own
+ <application>Syslog</application> produces its own
time stamp and process ID information, so you probably do not want to
- include those escapes if you are logging to <application>syslog</>.
+ include those escapes if you are logging to <application>syslog</application>.
</para>
</tip>
<tip>
<para>
- The <literal>%q</> escape is useful when including information that is
+ The <literal>%q</literal> escape is useful when including information that is
only available in session (backend) context like user or database
name. For example:
<programlisting>
@@ -5364,7 +5364,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
<varlistentry id="guc-log-lock-waits" xreflabel="log_lock_waits">
<term><varname>log_lock_waits</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_lock_waits</> configuration parameter</primary>
+ <primary><varname>log_lock_waits</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5372,7 +5372,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
Controls whether a log message is produced when a session waits
longer than <xref linkend="guc-deadlock-timeout"> to acquire a
lock. This is useful in determining if lock waits are causing
- poor performance. The default is <literal>off</>.
+ poor performance. The default is <literal>off</literal>.
Only superusers can change this setting.
</para>
</listitem>
@@ -5381,22 +5381,22 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
<varlistentry id="guc-log-statement" xreflabel="log_statement">
<term><varname>log_statement</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>log_statement</> configuration parameter</primary>
+ <primary><varname>log_statement</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Controls which SQL statements are logged. Valid values are
- <literal>none</> (off), <literal>ddl</>, <literal>mod</>, and
- <literal>all</> (all statements). <literal>ddl</> logs all data definition
- statements, such as <command>CREATE</>, <command>ALTER</>, and
- <command>DROP</> statements. <literal>mod</> logs all
- <literal>ddl</> statements, plus data-modifying statements
- such as <command>INSERT</>,
- <command>UPDATE</>, <command>DELETE</>, <command>TRUNCATE</>,
- and <command>COPY FROM</>.
- <command>PREPARE</>, <command>EXECUTE</>, and
- <command>EXPLAIN ANALYZE</> statements are also logged if their
+ <literal>none</literal> (off), <literal>ddl</literal>, <literal>mod</literal>, and
+ <literal>all</literal> (all statements). <literal>ddl</literal> logs all data definition
+ statements, such as <command>CREATE</command>, <command>ALTER</command>, and
+ <command>DROP</command> statements. <literal>mod</literal> logs all
+ <literal>ddl</literal> statements, plus data-modifying statements
+ such as <command>INSERT</command>,
+ <command>UPDATE</command>, <command>DELETE</command>, <command>TRUNCATE</command>,
+ and <command>COPY FROM</command>.
+ <command>PREPARE</command>, <command>EXECUTE</command>, and
+ <command>EXPLAIN ANALYZE</command> statements are also logged if their
contained command is of an appropriate type. For clients using
extended query protocol, logging occurs when an Execute message
is received, and values of the Bind parameters are included
@@ -5404,20 +5404,20 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
</para>
<para>
- The default is <literal>none</>. Only superusers can change this
+ The default is <literal>none</literal>. Only superusers can change this
setting.
</para>
<note>
<para>
Statements that contain simple syntax errors are not logged
- even by the <varname>log_statement</> = <literal>all</> setting,
+ even by the <varname>log_statement</varname> = <literal>all</literal> setting,
because the log message is emitted only after basic parsing has
been done to determine the statement type. In the case of extended
query protocol, this setting likewise does not log statements that
fail before the Execute phase (i.e., during parse analysis or
- planning). Set <varname>log_min_error_statement</> to
- <literal>ERROR</> (or lower) to log such statements.
+ planning). Set <varname>log_min_error_statement</varname> to
+ <literal>ERROR</literal> (or lower) to log such statements.
</para>
</note>
</listitem>
@@ -5426,14 +5426,14 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
<varlistentry id="guc-log-replication-commands" xreflabel="log_replication_commands">
<term><varname>log_replication_commands</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_replication_commands</> configuration parameter</primary>
+ <primary><varname>log_replication_commands</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Causes each replication command to be logged in the server log.
See <xref linkend="protocol-replication"> for more information about
- replication command. The default value is <literal>off</>.
+ replication command. The default value is <literal>off</literal>.
Only superusers can change this setting.
</para>
</listitem>
@@ -5442,7 +5442,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
<varlistentry id="guc-log-temp-files" xreflabel="log_temp_files">
<term><varname>log_temp_files</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>log_temp_files</> configuration parameter</primary>
+ <primary><varname>log_temp_files</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5463,7 +5463,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
<varlistentry id="guc-log-timezone" xreflabel="log_timezone">
<term><varname>log_timezone</varname> (<type>string</type>)
<indexterm>
- <primary><varname>log_timezone</> configuration parameter</primary>
+ <primary><varname>log_timezone</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5471,11 +5471,11 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
Sets the time zone used for timestamps written in the server log.
Unlike <xref linkend="guc-timezone">, this value is cluster-wide,
so that all sessions will report timestamps consistently.
- The built-in default is <literal>GMT</>, but that is typically
- overridden in <filename>postgresql.conf</>; <application>initdb</>
+ The built-in default is <literal>GMT</literal>, but that is typically
+ overridden in <filename>postgresql.conf</filename>; <application>initdb</application>
will install a setting there corresponding to its system environment.
See <xref linkend="datatype-timezones"> for more information.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -5487,10 +5487,10 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
<title>Using CSV-Format Log Output</title>
<para>
- Including <literal>csvlog</> in the <varname>log_destination</> list
+ Including <literal>csvlog</literal> in the <varname>log_destination</varname> list
provides a convenient way to import log files into a database table.
This option emits log lines in comma-separated-values
- (<acronym>CSV</>) format,
+ (<acronym>CSV</acronym>) format,
with these columns:
time stamp with milliseconds,
user name,
@@ -5512,10 +5512,10 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
character count of the error position therein,
error context,
user query that led to the error (if any and enabled by
- <varname>log_min_error_statement</>),
+ <varname>log_min_error_statement</varname>),
character count of the error position therein,
location of the error in the PostgreSQL source code
- (if <varname>log_error_verbosity</> is set to <literal>verbose</>),
+ (if <varname>log_error_verbosity</varname> is set to <literal>verbose</literal>),
and application name.
Here is a sample table definition for storing CSV-format log output:
@@ -5551,7 +5551,7 @@ CREATE TABLE postgres_log
</para>
<para>
- To import a log file into this table, use the <command>COPY FROM</>
+ To import a log file into this table, use the <command>COPY FROM</command>
command:
<programlisting>
@@ -5567,7 +5567,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<listitem>
<para>
Set <varname>log_filename</varname> and
- <varname>log_rotation_age</> to provide a consistent,
+ <varname>log_rotation_age</varname> to provide a consistent,
predictable naming scheme for your log files. This lets you
predict what the file name will be and know when an individual log
file is complete and therefore ready to be imported.
@@ -5584,7 +5584,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<listitem>
<para>
- Set <varname>log_truncate_on_rotation</varname> to <literal>on</> so
+ Set <varname>log_truncate_on_rotation</varname> to <literal>on</literal> so
that old log data isn't mixed with the new in the same file.
</para>
</listitem>
@@ -5593,14 +5593,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<para>
The table definition above includes a primary key specification.
This is useful to protect against accidentally importing the same
- information twice. The <command>COPY</> command commits all of the
+ information twice. The <command>COPY</command> command commits all of the
data it imports at one time, so any error will cause the entire
import to fail. If you import a partial log file and later import
the file again when it is complete, the primary key violation will
cause the import to fail. Wait until the log is complete and
closed before importing. This procedure will also protect against
accidentally importing a partial line that hasn't been completely
- written, which would also cause <command>COPY</> to fail.
+ written, which would also cause <command>COPY</command> to fail.
</para>
</listitem>
</orderedlist>
@@ -5613,7 +5613,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<para>
These settings control how process titles of server processes are
modified. Process titles are typically viewed using programs like
- <application>ps</> or, on Windows, <application>Process Explorer</>.
+ <application>ps</application> or, on Windows, <application>Process Explorer</application>.
See <xref linkend="monitoring-ps"> for details.
</para>
@@ -5621,18 +5621,18 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-cluster-name" xreflabel="cluster_name">
<term><varname>cluster_name</varname> (<type>string</type>)
<indexterm>
- <primary><varname>cluster_name</> configuration parameter</primary>
+ <primary><varname>cluster_name</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the cluster name that appears in the process title for all
server processes in this cluster. The name can be any string of less
- than <symbol>NAMEDATALEN</> characters (64 characters in a standard
+ than <symbol>NAMEDATALEN</symbol> characters (64 characters in a standard
build). Only printable ASCII characters may be used in the
<varname>cluster_name</varname> value. Other characters will be
replaced with question marks (<literal>?</literal>). No name is shown
- if this parameter is set to the empty string <literal>''</> (which is
+ if this parameter is set to the empty string <literal>''</literal> (which is
the default). This parameter can only be set at server start.
</para>
</listitem>
@@ -5641,15 +5641,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-update-process-title" xreflabel="update_process_title">
<term><varname>update_process_title</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>update_process_title</> configuration parameter</primary>
+ <primary><varname>update_process_title</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Enables updating of the process title every time a new SQL command
is received by the server.
- This setting defaults to <literal>on</> on most platforms, but it
- defaults to <literal>off</> on Windows due to that platform's larger
+ This setting defaults to <literal>on</literal> on most platforms, but it
+ defaults to <literal>off</literal> on Windows due to that platform's larger
overhead for updating the process title.
Only superusers can change this setting.
</para>
@@ -5678,7 +5678,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-track-activities" xreflabel="track_activities">
<term><varname>track_activities</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>track_activities</> configuration parameter</primary>
+ <primary><varname>track_activities</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5698,14 +5698,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-track-activity-query-size" xreflabel="track_activity_query_size">
<term><varname>track_activity_query_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>track_activity_query_size</> configuration parameter</primary>
+ <primary><varname>track_activity_query_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the number of bytes reserved to track the currently
executing command for each active session, for the
- <structname>pg_stat_activity</>.<structfield>query</> field.
+ <structname>pg_stat_activity</structname>.<structfield>query</structfield> field.
The default value is 1024. This parameter can only be set at server
start.
</para>
@@ -5715,7 +5715,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-track-counts" xreflabel="track_counts">
<term><varname>track_counts</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>track_counts</> configuration parameter</primary>
+ <primary><varname>track_counts</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5731,7 +5731,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-track-io-timing" xreflabel="track_io_timing">
<term><varname>track_io_timing</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>track_io_timing</> configuration parameter</primary>
+ <primary><varname>track_io_timing</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5743,7 +5743,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
measure the overhead of timing on your system.
I/O timing information is
displayed in <xref linkend="pg-stat-database-view">, in the output of
- <xref linkend="sql-explain"> when the <literal>BUFFERS</> option is
+ <xref linkend="sql-explain"> when the <literal>BUFFERS</literal> option is
used, and by <xref linkend="pgstatstatements">. Only superusers can
change this setting.
</para>
@@ -5753,7 +5753,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-track-functions" xreflabel="track_functions">
<term><varname>track_functions</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>track_functions</> configuration parameter</primary>
+ <primary><varname>track_functions</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5767,7 +5767,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<note>
<para>
- SQL-language functions that are simple enough to be <quote>inlined</>
+ SQL-language functions that are simple enough to be <quote>inlined</quote>
into the calling query will not be tracked, regardless of this
setting.
</para>
@@ -5778,7 +5778,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-stats-temp-directory" xreflabel="stats_temp_directory">
<term><varname>stats_temp_directory</varname> (<type>string</type>)
<indexterm>
- <primary><varname>stats_temp_directory</> configuration parameter</primary>
+ <primary><varname>stats_temp_directory</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5788,7 +5788,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
is <filename>pg_stat_tmp</filename>. Pointing this at a RAM-based
file system will decrease physical I/O requirements and can lead to
improved performance.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -5804,29 +5804,29 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry>
<term><varname>log_statement_stats</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_statement_stats</> configuration parameter</primary>
+ <primary><varname>log_statement_stats</varname> configuration parameter</primary>
</indexterm>
</term>
<term><varname>log_parser_stats</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_parser_stats</> configuration parameter</primary>
+ <primary><varname>log_parser_stats</varname> configuration parameter</primary>
</indexterm>
</term>
<term><varname>log_planner_stats</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_planner_stats</> configuration parameter</primary>
+ <primary><varname>log_planner_stats</varname> configuration parameter</primary>
</indexterm>
</term>
<term><varname>log_executor_stats</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_executor_stats</> configuration parameter</primary>
+ <primary><varname>log_executor_stats</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
For each query, output performance statistics of the respective
module to the server log. This is a crude profiling
- instrument, similar to the Unix <function>getrusage()</> operating
+ instrument, similar to the Unix <function>getrusage()</function> operating
system facility. <varname>log_statement_stats</varname> reports total
statement statistics, while the others report per-module statistics.
<varname>log_statement_stats</varname> cannot be enabled together with
@@ -5850,7 +5850,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
</indexterm>
<para>
- These settings control the behavior of the <firstterm>autovacuum</>
+ These settings control the behavior of the <firstterm>autovacuum</firstterm>
feature. Refer to <xref linkend="autovacuum"> for more information.
Note that many of these settings can be overridden on a per-table
basis; see <xref linkend="sql-createtable-storage-parameters"
@@ -5862,7 +5862,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum" xreflabel="autovacuum">
<term><varname>autovacuum</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>autovacuum</> configuration parameter</primary>
+ <primary><varname>autovacuum</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5871,7 +5871,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
autovacuum launcher daemon. This is on by default; however,
<xref linkend="guc-track-counts"> must also be enabled for
autovacuum to work.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line; however, autovacuuming can be
disabled for individual tables by changing table storage parameters.
</para>
@@ -5887,7 +5887,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-log-autovacuum-min-duration" xreflabel="log_autovacuum_min_duration">
<term><varname>log_autovacuum_min_duration</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>log_autovacuum_min_duration</> configuration parameter</primary>
+ <primary><varname>log_autovacuum_min_duration</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5902,7 +5902,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
logged if an autovacuum action is skipped due to the existence of a
conflicting lock. Enabling this parameter can be helpful
in tracking autovacuum activity. This parameter can only be set in
- the <filename>postgresql.conf</> file or on the server command line;
+ the <filename>postgresql.conf</filename> file or on the server command line;
but the setting can be overridden for individual tables by
changing table storage parameters.
</para>
@@ -5912,7 +5912,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-max-workers" xreflabel="autovacuum_max_workers">
<term><varname>autovacuum_max_workers</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>autovacuum_max_workers</> configuration parameter</primary>
+ <primary><varname>autovacuum_max_workers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -5927,17 +5927,17 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-naptime" xreflabel="autovacuum_naptime">
<term><varname>autovacuum_naptime</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>autovacuum_naptime</> configuration parameter</primary>
+ <primary><varname>autovacuum_naptime</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the minimum delay between autovacuum runs on any given
database. In each round the daemon examines the
- database and issues <command>VACUUM</> and <command>ANALYZE</> commands
+ database and issues <command>VACUUM</command> and <command>ANALYZE</command> commands
as needed for tables in that database. The delay is measured
- in seconds, and the default is one minute (<literal>1min</>).
- This parameter can only be set in the <filename>postgresql.conf</>
+ in seconds, and the default is one minute (<literal>1min</literal>).
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -5946,15 +5946,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-vacuum-threshold" xreflabel="autovacuum_vacuum_threshold">
<term><varname>autovacuum_vacuum_threshold</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>autovacuum_vacuum_threshold</> configuration parameter</primary>
+ <primary><varname>autovacuum_vacuum_threshold</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the minimum number of updated or deleted tuples needed
- to trigger a <command>VACUUM</> in any one table.
+ to trigger a <command>VACUUM</command> in any one table.
The default is 50 tuples.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line;
but the setting can be overridden for individual tables by
changing table storage parameters.
@@ -5965,15 +5965,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-analyze-threshold" xreflabel="autovacuum_analyze_threshold">
<term><varname>autovacuum_analyze_threshold</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>autovacuum_analyze_threshold</> configuration parameter</primary>
+ <primary><varname>autovacuum_analyze_threshold</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the minimum number of inserted, updated or deleted tuples
- needed to trigger an <command>ANALYZE</> in any one table.
+ needed to trigger an <command>ANALYZE</command> in any one table.
The default is 50 tuples.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line;
but the setting can be overridden for individual tables by
changing table storage parameters.
@@ -5984,16 +5984,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-vacuum-scale-factor" xreflabel="autovacuum_vacuum_scale_factor">
<term><varname>autovacuum_vacuum_scale_factor</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>autovacuum_vacuum_scale_factor</> configuration parameter</primary>
+ <primary><varname>autovacuum_vacuum_scale_factor</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies a fraction of the table size to add to
<varname>autovacuum_vacuum_threshold</varname>
- when deciding whether to trigger a <command>VACUUM</>.
+ when deciding whether to trigger a <command>VACUUM</command>.
The default is 0.2 (20% of table size).
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line;
but the setting can be overridden for individual tables by
changing table storage parameters.
@@ -6004,16 +6004,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-analyze-scale-factor" xreflabel="autovacuum_analyze_scale_factor">
<term><varname>autovacuum_analyze_scale_factor</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>autovacuum_analyze_scale_factor</> configuration parameter</primary>
+ <primary><varname>autovacuum_analyze_scale_factor</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies a fraction of the table size to add to
<varname>autovacuum_analyze_threshold</varname>
- when deciding whether to trigger an <command>ANALYZE</>.
+ when deciding whether to trigger an <command>ANALYZE</command>.
The default is 0.1 (10% of table size).
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line;
but the setting can be overridden for individual tables by
changing table storage parameters.
@@ -6024,14 +6024,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-freeze-max-age" xreflabel="autovacuum_freeze_max_age">
<term><varname>autovacuum_freeze_max_age</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>autovacuum_freeze_max_age</> configuration parameter</primary>
+ <primary><varname>autovacuum_freeze_max_age</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the maximum age (in transactions) that a table's
- <structname>pg_class</>.<structfield>relfrozenxid</> field can
- attain before a <command>VACUUM</> operation is forced
+ <structname>pg_class</structname>.<structfield>relfrozenxid</structfield> field can
+ attain before a <command>VACUUM</command> operation is forced
to prevent transaction ID wraparound within the table.
Note that the system will launch autovacuum processes to
prevent wraparound even when autovacuum is otherwise disabled.
@@ -6039,7 +6039,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<para>
Vacuum also allows removal of old files from the
- <filename>pg_xact</> subdirectory, which is why the default
+ <filename>pg_xact</filename> subdirectory, which is why the default
is a relatively low 200 million transactions.
This parameter can only be set at server start, but the setting
can be reduced for individual tables by
@@ -6058,8 +6058,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<listitem>
<para>
Specifies the maximum age (in multixacts) that a table's
- <structname>pg_class</>.<structfield>relminmxid</> field can
- attain before a <command>VACUUM</> operation is forced to
+ <structname>pg_class</structname>.<structfield>relminmxid</structfield> field can
+ attain before a <command>VACUUM</command> operation is forced to
prevent multixact ID wraparound within the table.
Note that the system will launch autovacuum processes to
prevent wraparound even when autovacuum is otherwise disabled.
@@ -6067,7 +6067,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<para>
Vacuuming multixacts also allows removal of old files from the
- <filename>pg_multixact/members</> and <filename>pg_multixact/offsets</>
+ <filename>pg_multixact/members</filename> and <filename>pg_multixact/offsets</filename>
subdirectories, which is why the default is a relatively low
400 million multixacts.
This parameter can only be set at server start, but the setting can
@@ -6080,16 +6080,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-vacuum-cost-delay" xreflabel="autovacuum_vacuum_cost_delay">
<term><varname>autovacuum_vacuum_cost_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>autovacuum_vacuum_cost_delay</> configuration parameter</primary>
+ <primary><varname>autovacuum_vacuum_cost_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the cost delay value that will be used in automatic
- <command>VACUUM</> operations. If -1 is specified, the regular
+ <command>VACUUM</command> operations. If -1 is specified, the regular
<xref linkend="guc-vacuum-cost-delay"> value will be used.
The default value is 20 milliseconds.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line;
but the setting can be overridden for individual tables by
changing table storage parameters.
@@ -6100,19 +6100,19 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-autovacuum-vacuum-cost-limit" xreflabel="autovacuum_vacuum_cost_limit">
<term><varname>autovacuum_vacuum_cost_limit</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>autovacuum_vacuum_cost_limit</> configuration parameter</primary>
+ <primary><varname>autovacuum_vacuum_cost_limit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Specifies the cost limit value that will be used in automatic
- <command>VACUUM</> operations. If -1 is specified (which is the
+ <command>VACUUM</command> operations. If -1 is specified (which is the
default), the regular
<xref linkend="guc-vacuum-cost-limit"> value will be used. Note that
the value is distributed proportionally among the running autovacuum
workers, if there is more than one, so that the sum of the limits for
each worker does not exceed the value of this variable.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line;
but the setting can be overridden for individual tables by
changing table storage parameters.
@@ -6133,9 +6133,9 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-search-path" xreflabel="search_path">
<term><varname>search_path</varname> (<type>string</type>)
<indexterm>
- <primary><varname>search_path</> configuration parameter</primary>
+ <primary><varname>search_path</varname> configuration parameter</primary>
</indexterm>
- <indexterm><primary>path</><secondary>for schemas</></>
+ <indexterm><primary>path</primary><secondary>for schemas</secondary></indexterm>
</term>
<listitem>
<para>
@@ -6151,32 +6151,32 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<para>
The value for <varname>search_path</varname> must be a comma-separated
list of schema names. Any name that is not an existing schema, or is
- a schema for which the user does not have <literal>USAGE</>
+ a schema for which the user does not have <literal>USAGE</literal>
permission, is silently ignored.
</para>
<para>
If one of the list items is the special name
<literal>$user</literal>, then the schema having the name returned by
- <function>SESSION_USER</> is substituted, if there is such a schema
- and the user has <literal>USAGE</> permission for it.
+ <function>SESSION_USER</function> is substituted, if there is such a schema
+ and the user has <literal>USAGE</literal> permission for it.
(If not, <literal>$user</literal> is ignored.)
</para>
<para>
- The system catalog schema, <literal>pg_catalog</>, is always
+ The system catalog schema, <literal>pg_catalog</literal>, is always
searched, whether it is mentioned in the path or not. If it is
mentioned in the path then it will be searched in the specified
- order. If <literal>pg_catalog</> is not in the path then it will
- be searched <emphasis>before</> searching any of the path items.
+ order. If <literal>pg_catalog</literal> is not in the path then it will
+ be searched <emphasis>before</emphasis> searching any of the path items.
</para>
<para>
Likewise, the current session's temporary-table schema,
- <literal>pg_temp_<replaceable>nnn</></>, is always searched if it
+ <literal>pg_temp_<replaceable>nnn</replaceable></literal>, is always searched if it
exists. It can be explicitly listed in the path by using the
- alias <literal>pg_temp</><indexterm><primary>pg_temp</></>. If it is not listed in the path then
- it is searched first (even before <literal>pg_catalog</>). However,
+ alias <literal>pg_temp</literal><indexterm><primary>pg_temp</primary></indexterm>. If it is not listed in the path then
+ it is searched first (even before <literal>pg_catalog</literal>). However,
the temporary schema is only searched for relation (table, view,
sequence, etc) and data type names. It is never searched for
function or operator names.
@@ -6193,7 +6193,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
The default value for this parameter is
<literal>"$user", public</literal>.
This setting supports shared use of a database (where no users
- have private schemas, and all share use of <literal>public</>),
+ have private schemas, and all share use of <literal>public</literal>),
private per-user schemas, and combinations of these. Other
effects can be obtained by altering the default search path
setting, either globally or per-user.
@@ -6202,11 +6202,11 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<para>
The current effective value of the search path can be examined
via the <acronym>SQL</acronym> function
- <function>current_schemas</>
+ <function>current_schemas</function>
(see <xref linkend="functions-info">).
This is not quite the same as
examining the value of <varname>search_path</varname>, since
- <function>current_schemas</> shows how the items
+ <function>current_schemas</function> shows how the items
appearing in <varname>search_path</varname> were resolved.
</para>
@@ -6219,20 +6219,20 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-row-security" xreflabel="row_security">
<term><varname>row_security</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>row_security</> configuration parameter</primary>
+ <primary><varname>row_security</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
This variable controls whether to raise an error in lieu of applying a
- row security policy. When set to <literal>on</>, policies apply
- normally. When set to <literal>off</>, queries fail which would
- otherwise apply at least one policy. The default is <literal>on</>.
- Change to <literal>off</> where limited row visibility could cause
- incorrect results; for example, <application>pg_dump</> makes that
+ row security policy. When set to <literal>on</literal>, policies apply
+ normally. When set to <literal>off</literal>, queries fail which would
+ otherwise apply at least one policy. The default is <literal>on</literal>.
+ Change to <literal>off</literal> where limited row visibility could cause
+ incorrect results; for example, <application>pg_dump</application> makes that
change by default. This variable has no effect on roles which bypass
every row security policy, to wit, superusers and roles with
- the <literal>BYPASSRLS</> attribute.
+ the <literal>BYPASSRLS</literal> attribute.
</para>
<para>
@@ -6245,14 +6245,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-default-tablespace" xreflabel="default_tablespace">
<term><varname>default_tablespace</varname> (<type>string</type>)
<indexterm>
- <primary><varname>default_tablespace</> configuration parameter</primary>
+ <primary><varname>default_tablespace</varname> configuration parameter</primary>
</indexterm>
- <indexterm><primary>tablespace</><secondary>default</></>
+ <indexterm><primary>tablespace</primary><secondary>default</secondary></indexterm>
</term>
<listitem>
<para>
This variable specifies the default tablespace in which to create
- objects (tables and indexes) when a <command>CREATE</> command does
+ objects (tables and indexes) when a <command>CREATE</command> command does
not explicitly specify a tablespace.
</para>
@@ -6260,9 +6260,9 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
The value is either the name of a tablespace, or an empty string
to specify using the default tablespace of the current database.
If the value does not match the name of any existing tablespace,
- <productname>PostgreSQL</> will automatically use the default
+ <productname>PostgreSQL</productname> will automatically use the default
tablespace of the current database. If a nondefault tablespace
- is specified, the user must have <literal>CREATE</> privilege
+ is specified, the user must have <literal>CREATE</literal> privilege
for it, or creation attempts will fail.
</para>
@@ -6287,38 +6287,38 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-temp-tablespaces" xreflabel="temp_tablespaces">
<term><varname>temp_tablespaces</varname> (<type>string</type>)
<indexterm>
- <primary><varname>temp_tablespaces</> configuration parameter</primary>
+ <primary><varname>temp_tablespaces</varname> configuration parameter</primary>
</indexterm>
- <indexterm><primary>tablespace</><secondary>temporary</></>
+ <indexterm><primary>tablespace</primary><secondary>temporary</secondary></indexterm>
</term>
<listitem>
<para>
This variable specifies tablespaces in which to create temporary
objects (temp tables and indexes on temp tables) when a
- <command>CREATE</> command does not explicitly specify a tablespace.
+ <command>CREATE</command> command does not explicitly specify a tablespace.
Temporary files for purposes such as sorting large data sets
are also created in these tablespaces.
</para>
<para>
The value is a list of names of tablespaces. When there is more than
- one name in the list, <productname>PostgreSQL</> chooses a random
+ one name in the list, <productname>PostgreSQL</productname> chooses a random
member of the list each time a temporary object is to be created;
except that within a transaction, successively created temporary
objects are placed in successive tablespaces from the list.
If the selected element of the list is an empty string,
- <productname>PostgreSQL</> will automatically use the default
+ <productname>PostgreSQL</productname> will automatically use the default
tablespace of the current database instead.
</para>
<para>
- When <varname>temp_tablespaces</> is set interactively, specifying a
+ When <varname>temp_tablespaces</varname> is set interactively, specifying a
nonexistent tablespace is an error, as is specifying a tablespace for
- which the user does not have <literal>CREATE</> privilege. However,
+ which the user does not have <literal>CREATE</literal> privilege. However,
when using a previously set value, nonexistent tablespaces are
ignored, as are tablespaces for which the user lacks
- <literal>CREATE</> privilege. In particular, this rule applies when
- using a value set in <filename>postgresql.conf</>.
+ <literal>CREATE</literal> privilege. In particular, this rule applies when
+ using a value set in <filename>postgresql.conf</filename>.
</para>
<para>
@@ -6336,18 +6336,18 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-check-function-bodies" xreflabel="check_function_bodies">
<term><varname>check_function_bodies</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>check_function_bodies</> configuration parameter</primary>
+ <primary><varname>check_function_bodies</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- This parameter is normally on. When set to <literal>off</>, it
+ This parameter is normally on. When set to <literal>off</literal>, it
disables validation of the function body string during <xref
linkend="sql-createfunction">. Disabling validation avoids side
effects of the validation process and avoids false positives due
to problems such as forward references. Set this parameter
- to <literal>off</> before loading functions on behalf of other
- users; <application>pg_dump</> does so automatically.
+ to <literal>off</literal> before loading functions on behalf of other
+ users; <application>pg_dump</application> does so automatically.
</para>
</listitem>
</varlistentry>
@@ -6359,7 +6359,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<secondary>setting default</secondary>
</indexterm>
<indexterm>
- <primary><varname>default_transaction_isolation</> configuration parameter</primary>
+ <primary><varname>default_transaction_isolation</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6386,14 +6386,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<secondary>setting default</secondary>
</indexterm>
<indexterm>
- <primary><varname>default_transaction_read_only</> configuration parameter</primary>
+ <primary><varname>default_transaction_read_only</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
A read-only SQL transaction cannot alter non-temporary tables.
This parameter controls the default read-only status of each new
- transaction. The default is <literal>off</> (read/write).
+ transaction. The default is <literal>off</literal> (read/write).
</para>
<para>
@@ -6409,12 +6409,12 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<secondary>setting default</secondary>
</indexterm>
<indexterm>
- <primary><varname>default_transaction_deferrable</> configuration parameter</primary>
+ <primary><varname>default_transaction_deferrable</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When running at the <literal>serializable</> isolation level,
+ When running at the <literal>serializable</literal> isolation level,
a deferrable read-only SQL transaction may be delayed before
it is allowed to proceed. However, once it begins executing
it does not incur any of the overhead required to ensure
@@ -6427,7 +6427,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
This parameter controls the default deferrable status of each
new transaction. It currently has no effect on read-write
transactions or those operating at isolation levels lower
- than <literal>serializable</>. The default is <literal>off</>.
+ than <literal>serializable</literal>. The default is <literal>off</literal>.
</para>
<para>
@@ -6440,7 +6440,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-session-replication-role" xreflabel="session_replication_role">
<term><varname>session_replication_role</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>session_replication_role</> configuration parameter</primary>
+ <primary><varname>session_replication_role</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6448,8 +6448,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
Controls firing of replication-related triggers and rules for the
current session. Setting this variable requires
superuser privilege and results in discarding any previously cached
- query plans. Possible values are <literal>origin</> (the default),
- <literal>replica</> and <literal>local</>.
+ query plans. Possible values are <literal>origin</literal> (the default),
+ <literal>replica</literal> and <literal>local</literal>.
See <xref linkend="sql-altertable"> for
more information.
</para>
@@ -6459,21 +6459,21 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-statement-timeout" xreflabel="statement_timeout">
<term><varname>statement_timeout</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>statement_timeout</> configuration parameter</primary>
+ <primary><varname>statement_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Abort any statement that takes more than the specified number of
milliseconds, starting from the time the command arrives at the server
- from the client. If <varname>log_min_error_statement</> is set to
- <literal>ERROR</> or lower, the statement that timed out will also be
+ from the client. If <varname>log_min_error_statement</varname> is set to
+ <literal>ERROR</literal> or lower, the statement that timed out will also be
logged. A value of zero (the default) turns this off.
</para>
<para>
- Setting <varname>statement_timeout</> in
- <filename>postgresql.conf</> is not recommended because it would
+ Setting <varname>statement_timeout</varname> in
+ <filename>postgresql.conf</filename> is not recommended because it would
affect all sessions.
</para>
</listitem>
@@ -6482,7 +6482,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-lock-timeout" xreflabel="lock_timeout">
<term><varname>lock_timeout</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>lock_timeout</> configuration parameter</primary>
+ <primary><varname>lock_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6491,24 +6491,24 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
milliseconds while attempting to acquire a lock on a table, index,
row, or other database object. The time limit applies separately to
each lock acquisition attempt. The limit applies both to explicit
- locking requests (such as <command>LOCK TABLE</>, or <command>SELECT
- FOR UPDATE</> without <literal>NOWAIT</>) and to implicitly-acquired
- locks. If <varname>log_min_error_statement</> is set to
- <literal>ERROR</> or lower, the statement that timed out will be
+ locking requests (such as <command>LOCK TABLE</command>, or <command>SELECT
+ FOR UPDATE</command> without <literal>NOWAIT</literal>) and to implicitly-acquired
+ locks. If <varname>log_min_error_statement</varname> is set to
+ <literal>ERROR</literal> or lower, the statement that timed out will be
logged. A value of zero (the default) turns this off.
</para>
<para>
- Unlike <varname>statement_timeout</>, this timeout can only occur
- while waiting for locks. Note that if <varname>statement_timeout</>
- is nonzero, it is rather pointless to set <varname>lock_timeout</> to
+ Unlike <varname>statement_timeout</varname>, this timeout can only occur
+ while waiting for locks. Note that if <varname>statement_timeout</varname>
+ is nonzero, it is rather pointless to set <varname>lock_timeout</varname> to
the same or larger value, since the statement timeout would always
trigger first.
</para>
<para>
- Setting <varname>lock_timeout</> in
- <filename>postgresql.conf</> is not recommended because it would
+ Setting <varname>lock_timeout</varname> in
+ <filename>postgresql.conf</filename> is not recommended because it would
affect all sessions.
</para>
</listitem>
@@ -6517,7 +6517,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-idle-in-transaction-session-timeout" xreflabel="idle_in_transaction_session_timeout">
<term><varname>idle_in_transaction_session_timeout</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>idle_in_transaction_session_timeout</> configuration parameter</primary>
+ <primary><varname>idle_in_transaction_session_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6537,21 +6537,21 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-vacuum-freeze-table-age" xreflabel="vacuum_freeze_table_age">
<term><varname>vacuum_freeze_table_age</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_freeze_table_age</> configuration parameter</primary>
+ <primary><varname>vacuum_freeze_table_age</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- <command>VACUUM</> performs an aggressive scan if the table's
- <structname>pg_class</>.<structfield>relfrozenxid</> field has reached
+ <command>VACUUM</command> performs an aggressive scan if the table's
+ <structname>pg_class</structname>.<structfield>relfrozenxid</structfield> field has reached
the age specified by this setting. An aggressive scan differs from
- a regular <command>VACUUM</> in that it visits every page that might
+ a regular <command>VACUUM</command> in that it visits every page that might
contain unfrozen XIDs or MXIDs, not just those that might contain dead
tuples. The default is 150 million transactions. Although users can
- set this value anywhere from zero to two billions, <command>VACUUM</>
+ set this value anywhere from zero to two billions, <command>VACUUM</command>
will silently limit the effective value to 95% of
<xref linkend="guc-autovacuum-freeze-max-age">, so that a
- periodical manual <command>VACUUM</> has a chance to run before an
+ periodical manual <command>VACUUM</command> has a chance to run before an
anti-wraparound autovacuum is launched for the table. For more
information see
<xref linkend="vacuum-for-wraparound">.
@@ -6562,17 +6562,17 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-vacuum-freeze-min-age" xreflabel="vacuum_freeze_min_age">
<term><varname>vacuum_freeze_min_age</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_freeze_min_age</> configuration parameter</primary>
+ <primary><varname>vacuum_freeze_min_age</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Specifies the cutoff age (in transactions) that <command>VACUUM</>
+ Specifies the cutoff age (in transactions) that <command>VACUUM</command>
should use to decide whether to freeze row versions
while scanning a table.
The default is 50 million transactions. Although
users can set this value anywhere from zero to one billion,
- <command>VACUUM</> will silently limit the effective value to half
+ <command>VACUUM</command> will silently limit the effective value to half
the value of <xref linkend="guc-autovacuum-freeze-max-age">, so
that there is not an unreasonably short time between forced
autovacuums. For more information see <xref
@@ -6584,21 +6584,21 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-vacuum-multixact-freeze-table-age" xreflabel="vacuum_multixact_freeze_table_age">
<term><varname>vacuum_multixact_freeze_table_age</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_multixact_freeze_table_age</> configuration parameter</primary>
+ <primary><varname>vacuum_multixact_freeze_table_age</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- <command>VACUUM</> performs an aggressive scan if the table's
- <structname>pg_class</>.<structfield>relminmxid</> field has reached
+ <command>VACUUM</command> performs an aggressive scan if the table's
+ <structname>pg_class</structname>.<structfield>relminmxid</structfield> field has reached
the age specified by this setting. An aggressive scan differs from
- a regular <command>VACUUM</> in that it visits every page that might
+ a regular <command>VACUUM</command> in that it visits every page that might
contain unfrozen XIDs or MXIDs, not just those that might contain dead
tuples. The default is 150 million multixacts.
Although users can set this value anywhere from zero to two billions,
- <command>VACUUM</> will silently limit the effective value to 95% of
+ <command>VACUUM</command> will silently limit the effective value to 95% of
<xref linkend="guc-autovacuum-multixact-freeze-max-age">, so that a
- periodical manual <command>VACUUM</> has a chance to run before an
+ periodical manual <command>VACUUM</command> has a chance to run before an
anti-wraparound is launched for the table.
For more information see <xref linkend="vacuum-for-multixact-wraparound">.
</para>
@@ -6608,17 +6608,17 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-vacuum-multixact-freeze-min-age" xreflabel="vacuum_multixact_freeze_min_age">
<term><varname>vacuum_multixact_freeze_min_age</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>vacuum_multixact_freeze_min_age</> configuration parameter</primary>
+ <primary><varname>vacuum_multixact_freeze_min_age</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Specifies the cutoff age (in multixacts) that <command>VACUUM</>
+ Specifies the cutoff age (in multixacts) that <command>VACUUM</command>
should use to decide whether to replace multixact IDs with a newer
transaction ID or multixact ID while scanning a table. The default
is 5 million multixacts.
Although users can set this value anywhere from zero to one billion,
- <command>VACUUM</> will silently limit the effective value to half
+ <command>VACUUM</command> will silently limit the effective value to half
the value of <xref linkend="guc-autovacuum-multixact-freeze-max-age">,
so that there is not an unreasonably short time between forced
autovacuums.
@@ -6630,7 +6630,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-bytea-output" xreflabel="bytea_output">
<term><varname>bytea_output</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>bytea_output</> configuration parameter</primary>
+ <primary><varname>bytea_output</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6648,7 +6648,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-xmlbinary" xreflabel="xmlbinary">
<term><varname>xmlbinary</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>xmlbinary</> configuration parameter</primary>
+ <primary><varname>xmlbinary</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6676,10 +6676,10 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-xmloption" xreflabel="xmloption">
<term><varname>xmloption</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>xmloption</> configuration parameter</primary>
+ <primary><varname>xmloption</varname> configuration parameter</primary>
</indexterm>
<indexterm>
- <primary><varname>SET XML OPTION</></primary>
+ <primary><varname>SET XML OPTION</varname></primary>
</indexterm>
<indexterm>
<primary>XML option</primary>
@@ -6709,16 +6709,16 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-gin-pending-list-limit" xreflabel="gin_pending_list_limit">
<term><varname>gin_pending_list_limit</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>gin_pending_list_limit</> configuration parameter</primary>
+ <primary><varname>gin_pending_list_limit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the maximum size of the GIN pending list which is used
- when <literal>fastupdate</> is enabled. If the list grows
+ when <literal>fastupdate</literal> is enabled. If the list grows
larger than this maximum size, it is cleaned up by moving
the entries in it to the main GIN data structure in bulk.
- The default is four megabytes (<literal>4MB</>). This setting
+ The default is four megabytes (<literal>4MB</literal>). This setting
can be overridden for individual GIN indexes by changing
index storage parameters.
See <xref linkend="gin-fast-update"> and <xref linkend="gin-tips">
@@ -6737,7 +6737,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-datestyle" xreflabel="DateStyle">
<term><varname>DateStyle</varname> (<type>string</type>)
<indexterm>
- <primary><varname>DateStyle</> configuration parameter</primary>
+ <primary><varname>DateStyle</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6745,16 +6745,16 @@ SET XML OPTION { DOCUMENT | CONTENT };
Sets the display format for date and time values, as well as the
rules for interpreting ambiguous date input values. For
historical reasons, this variable contains two independent
- components: the output format specification (<literal>ISO</>,
- <literal>Postgres</>, <literal>SQL</>, or <literal>German</>)
+ components: the output format specification (<literal>ISO</literal>,
+ <literal>Postgres</literal>, <literal>SQL</literal>, or <literal>German</literal>)
and the input/output specification for year/month/day ordering
- (<literal>DMY</>, <literal>MDY</>, or <literal>YMD</>). These
- can be set separately or together. The keywords <literal>Euro</>
- and <literal>European</> are synonyms for <literal>DMY</>; the
- keywords <literal>US</>, <literal>NonEuro</>, and
- <literal>NonEuropean</> are synonyms for <literal>MDY</>. See
+ (<literal>DMY</literal>, <literal>MDY</literal>, or <literal>YMD</literal>). These
+ can be set separately or together. The keywords <literal>Euro</literal>
+ and <literal>European</literal> are synonyms for <literal>DMY</literal>; the
+ keywords <literal>US</literal>, <literal>NonEuro</literal>, and
+ <literal>NonEuropean</literal> are synonyms for <literal>MDY</literal>. See
<xref linkend="datatype-datetime"> for more information. The
- built-in default is <literal>ISO, MDY</>, but
+ built-in default is <literal>ISO, MDY</literal>, but
<application>initdb</application> will initialize the
configuration file with a setting that corresponds to the
behavior of the chosen <varname>lc_time</varname> locale.
@@ -6765,28 +6765,28 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-intervalstyle" xreflabel="IntervalStyle">
<term><varname>IntervalStyle</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>IntervalStyle</> configuration parameter</primary>
+ <primary><varname>IntervalStyle</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the display format for interval values.
- The value <literal>sql_standard</> will produce
+ The value <literal>sql_standard</literal> will produce
output matching <acronym>SQL</acronym> standard interval literals.
- The value <literal>postgres</> (which is the default) will produce
- output matching <productname>PostgreSQL</> releases prior to 8.4
+ The value <literal>postgres</literal> (which is the default) will produce
+ output matching <productname>PostgreSQL</productname> releases prior to 8.4
when the <xref linkend="guc-datestyle">
- parameter was set to <literal>ISO</>.
- The value <literal>postgres_verbose</> will produce output
- matching <productname>PostgreSQL</> releases prior to 8.4
- when the <varname>DateStyle</>
- parameter was set to non-<literal>ISO</> output.
- The value <literal>iso_8601</> will produce output matching the time
- interval <quote>format with designators</> defined in section
+ parameter was set to <literal>ISO</literal>.
+ The value <literal>postgres_verbose</literal> will produce output
+ matching <productname>PostgreSQL</productname> releases prior to 8.4
+ when the <varname>DateStyle</varname>
+ parameter was set to non-<literal>ISO</literal> output.
+ The value <literal>iso_8601</literal> will produce output matching the time
+ interval <quote>format with designators</quote> defined in section
4.4.3.2 of ISO 8601.
</para>
<para>
- The <varname>IntervalStyle</> parameter also affects the
+ The <varname>IntervalStyle</varname> parameter also affects the
interpretation of ambiguous interval input. See
<xref linkend="datatype-interval-input"> for more information.
</para>
@@ -6796,15 +6796,15 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-timezone" xreflabel="TimeZone">
<term><varname>TimeZone</varname> (<type>string</type>)
<indexterm>
- <primary><varname>TimeZone</> configuration parameter</primary>
+ <primary><varname>TimeZone</varname> configuration parameter</primary>
</indexterm>
- <indexterm><primary>time zone</></>
+ <indexterm><primary>time zone</primary></indexterm>
</term>
<listitem>
<para>
Sets the time zone for displaying and interpreting time stamps.
- The built-in default is <literal>GMT</>, but that is typically
- overridden in <filename>postgresql.conf</>; <application>initdb</>
+ The built-in default is <literal>GMT</literal>, but that is typically
+ overridden in <filename>postgresql.conf</filename>; <application>initdb</application>
will install a setting there corresponding to its system environment.
See <xref linkend="datatype-timezones"> for more information.
</para>
@@ -6814,14 +6814,14 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-timezone-abbreviations" xreflabel="timezone_abbreviations">
<term><varname>timezone_abbreviations</varname> (<type>string</type>)
<indexterm>
- <primary><varname>timezone_abbreviations</> configuration parameter</primary>
+ <primary><varname>timezone_abbreviations</varname> configuration parameter</primary>
</indexterm>
- <indexterm><primary>time zone names</></>
+ <indexterm><primary>time zone names</primary></indexterm>
</term>
<listitem>
<para>
Sets the collection of time zone abbreviations that will be accepted
- by the server for datetime input. The default is <literal>'Default'</>,
+ by the server for datetime input. The default is <literal>'Default'</literal>,
which is a collection that works in most of the world; there are
also <literal>'Australia'</literal> and <literal>'India'</literal>,
and other collections can be defined for a particular installation.
@@ -6840,15 +6840,15 @@ SET XML OPTION { DOCUMENT | CONTENT };
<secondary>display</secondary>
</indexterm>
<indexterm>
- <primary><varname>extra_float_digits</> configuration parameter</primary>
+ <primary><varname>extra_float_digits</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
This parameter adjusts the number of digits displayed for
- floating-point values, including <type>float4</>, <type>float8</>,
+ floating-point values, including <type>float4</type>, <type>float8</type>,
and geometric data types. The parameter value is added to the
- standard number of digits (<literal>FLT_DIG</> or <literal>DBL_DIG</>
+ standard number of digits (<literal>FLT_DIG</literal> or <literal>DBL_DIG</literal>
as appropriate). The value can be set as high as 3, to include
partially-significant digits; this is especially useful for dumping
float data that needs to be restored exactly. Or it can be set
@@ -6861,9 +6861,9 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-client-encoding" xreflabel="client_encoding">
<term><varname>client_encoding</varname> (<type>string</type>)
<indexterm>
- <primary><varname>client_encoding</> configuration parameter</primary>
+ <primary><varname>client_encoding</varname> configuration parameter</primary>
</indexterm>
- <indexterm><primary>character set</></>
+ <indexterm><primary>character set</primary></indexterm>
</term>
<listitem>
<para>
@@ -6878,7 +6878,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-lc-messages" xreflabel="lc_messages">
<term><varname>lc_messages</varname> (<type>string</type>)
<indexterm>
- <primary><varname>lc_messages</> configuration parameter</primary>
+ <primary><varname>lc_messages</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6910,7 +6910,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-lc-monetary" xreflabel="lc_monetary">
<term><varname>lc_monetary</varname> (<type>string</type>)
<indexterm>
- <primary><varname>lc_monetary</> configuration parameter</primary>
+ <primary><varname>lc_monetary</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6929,7 +6929,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-lc-numeric" xreflabel="lc_numeric">
<term><varname>lc_numeric</varname> (<type>string</type>)
<indexterm>
- <primary><varname>lc_numeric</> configuration parameter</primary>
+ <primary><varname>lc_numeric</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6948,7 +6948,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-lc-time" xreflabel="lc_time">
<term><varname>lc_time</varname> (<type>string</type>)
<indexterm>
- <primary><varname>lc_time</> configuration parameter</primary>
+ <primary><varname>lc_time</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6967,7 +6967,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-default-text-search-config" xreflabel="default_text_search_config">
<term><varname>default_text_search_config</varname> (<type>string</type>)
<indexterm>
- <primary><varname>default_text_search_config</> configuration parameter</primary>
+ <primary><varname>default_text_search_config</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -6976,7 +6976,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
of the text search functions that do not have an explicit argument
specifying the configuration.
See <xref linkend="textsearch"> for further information.
- The built-in default is <literal>pg_catalog.simple</>, but
+ The built-in default is <literal>pg_catalog.simple</literal>, but
<application>initdb</application> will initialize the
configuration file with a setting that corresponds to the
chosen <varname>lc_ctype</varname> locale, if a configuration
@@ -6997,8 +6997,8 @@ SET XML OPTION { DOCUMENT | CONTENT };
server, in order to load additional functionality or achieve performance
benefits. For example, a setting of
<literal>'$libdir/mylib'</literal> would cause
- <literal>mylib.so</> (or on some platforms,
- <literal>mylib.sl</>) to be preloaded from the installation's standard
+ <literal>mylib.so</literal> (or on some platforms,
+ <literal>mylib.sl</literal>) to be preloaded from the installation's standard
library directory. The differences between the settings are when they
take effect and what privileges are required to change them.
</para>
@@ -7007,14 +7007,14 @@ SET XML OPTION { DOCUMENT | CONTENT };
<productname>PostgreSQL</productname> procedural language libraries can
be preloaded in this way, typically by using the
syntax <literal>'$libdir/plXXX'</literal> where
- <literal>XXX</literal> is <literal>pgsql</>, <literal>perl</>,
- <literal>tcl</>, or <literal>python</>.
+ <literal>XXX</literal> is <literal>pgsql</literal>, <literal>perl</literal>,
+ <literal>tcl</literal>, or <literal>python</literal>.
</para>
<para>
Only shared libraries specifically intended to be used with PostgreSQL
can be loaded this way. Every PostgreSQL-supported library has
- a <quote>magic block</> that is checked to guarantee compatibility. For
+ a <quote>magic block</quote> that is checked to guarantee compatibility. For
this reason, non-PostgreSQL libraries cannot be loaded in this way. You
might be able to use operating-system facilities such
as <envar>LD_PRELOAD</envar> for that.
@@ -7029,10 +7029,10 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-local-preload-libraries" xreflabel="local_preload_libraries">
<term><varname>local_preload_libraries</varname> (<type>string</type>)
<indexterm>
- <primary><varname>local_preload_libraries</> configuration parameter</primary>
+ <primary><varname>local_preload_libraries</varname> configuration parameter</primary>
</indexterm>
<indexterm>
- <primary><filename>$libdir/plugins</></primary>
+ <primary><filename>$libdir/plugins</filename></primary>
</indexterm>
</term>
<listitem>
@@ -7051,10 +7051,10 @@ SET XML OPTION { DOCUMENT | CONTENT };
<para>
This option can be set by any user. Because of that, the libraries
that can be loaded are restricted to those appearing in the
- <filename>plugins</> subdirectory of the installation's
+ <filename>plugins</filename> subdirectory of the installation's
standard library directory. (It is the database administrator's
- responsibility to ensure that only <quote>safe</> libraries
- are installed there.) Entries in <varname>local_preload_libraries</>
+ responsibility to ensure that only <quote>safe</quote> libraries
+ are installed there.) Entries in <varname>local_preload_libraries</varname>
can specify this directory explicitly, for example
<literal>$libdir/plugins/mylib</literal>, or just specify
the library name &mdash; <literal>mylib</literal> would have
@@ -7064,11 +7064,11 @@ SET XML OPTION { DOCUMENT | CONTENT };
<para>
The intent of this feature is to allow unprivileged users to load
debugging or performance-measurement libraries into specific sessions
- without requiring an explicit <command>LOAD</> command. To that end,
+ without requiring an explicit <command>LOAD</command> command. To that end,
it would be typical to set this parameter using
the <envar>PGOPTIONS</envar> environment variable on the client or by
using
- <command>ALTER ROLE SET</>.
+ <command>ALTER ROLE SET</command>.
</para>
<para>
@@ -7083,7 +7083,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-session-preload-libraries" xreflabel="session_preload_libraries">
<term><varname>session_preload_libraries</varname> (<type>string</type>)
<indexterm>
- <primary><varname>session_preload_libraries</> configuration parameter</primary>
+ <primary><varname>session_preload_libraries</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7104,10 +7104,10 @@ SET XML OPTION { DOCUMENT | CONTENT };
The intent of this feature is to allow debugging or
performance-measurement libraries to be loaded into specific sessions
without an explicit
- <command>LOAD</> command being given. For
+ <command>LOAD</command> command being given. For
example, <xref linkend="auto-explain"> could be enabled for all
sessions under a given user name by setting this parameter
- with <command>ALTER ROLE SET</>. Also, this parameter can be changed
+ with <command>ALTER ROLE SET</command>. Also, this parameter can be changed
without restarting the server (but changes only take effect when a new
session is started), so it is easier to add new modules this way, even
if they should apply to all sessions.
@@ -7125,7 +7125,7 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-shared-preload-libraries" xreflabel="shared_preload_libraries">
<term><varname>shared_preload_libraries</varname> (<type>string</type>)
<indexterm>
- <primary><varname>shared_preload_libraries</> configuration parameter</primary>
+ <primary><varname>shared_preload_libraries</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7182,9 +7182,9 @@ SET XML OPTION { DOCUMENT | CONTENT };
<varlistentry id="guc-dynamic-library-path" xreflabel="dynamic_library_path">
<term><varname>dynamic_library_path</varname> (<type>string</type>)
<indexterm>
- <primary><varname>dynamic_library_path</> configuration parameter</primary>
+ <primary><varname>dynamic_library_path</varname> configuration parameter</primary>
</indexterm>
- <indexterm><primary>dynamic loading</></>
+ <indexterm><primary>dynamic loading</primary></indexterm>
</term>
<listitem>
<para>
@@ -7236,7 +7236,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-gin-fuzzy-search-limit" xreflabel="gin_fuzzy_search_limit">
<term><varname>gin_fuzzy_search_limit</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>gin_fuzzy_search_limit</> configuration parameter</primary>
+ <primary><varname>gin_fuzzy_search_limit</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7267,7 +7267,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<secondary>deadlock</secondary>
</indexterm>
<indexterm>
- <primary><varname>deadlock_timeout</> configuration parameter</primary>
+ <primary><varname>deadlock_timeout</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7280,7 +7280,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
just wait on the lock for a while before checking for a
deadlock. Increasing this value reduces the amount of time
wasted in needless deadlock checks, but slows down reporting of
- real deadlock errors. The default is one second (<literal>1s</>),
+ real deadlock errors. The default is one second (<literal>1s</literal>),
which is probably about the smallest value you would want in
practice. On a heavily loaded server you might want to raise it.
Ideally the setting should exceed your typical transaction time,
@@ -7302,7 +7302,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-max-locks-per-transaction" xreflabel="max_locks_per_transaction">
<term><varname>max_locks_per_transaction</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_locks_per_transaction</> configuration parameter</primary>
+ <primary><varname>max_locks_per_transaction</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7315,7 +7315,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
any one time. This parameter controls the average number of object
locks allocated for each transaction; individual transactions
can lock more objects as long as the locks of all transactions
- fit in the lock table. This is <emphasis>not</> the number of
+ fit in the lock table. This is <emphasis>not</emphasis> the number of
rows that can be locked; that value is unlimited. The default,
64, has historically proven sufficient, but you might need to
raise this value if you have queries that touch many different
@@ -7334,7 +7334,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-max-pred-locks-per-transaction" xreflabel="max_pred_locks_per_transaction">
<term><varname>max_pred_locks_per_transaction</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_pred_locks_per_transaction</> configuration parameter</primary>
+ <primary><varname>max_pred_locks_per_transaction</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7347,7 +7347,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
any one time. This parameter controls the average number of object
locks allocated for each transaction; individual transactions
can lock more objects as long as the locks of all transactions
- fit in the lock table. This is <emphasis>not</> the number of
+ fit in the lock table. This is <emphasis>not</emphasis> the number of
rows that can be locked; that value is unlimited. The default,
64, has generally been sufficient in testing, but you might need to
raise this value if you have clients that touch many different
@@ -7360,7 +7360,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-max-pred-locks-per-relation" xreflabel="max_pred_locks_per_relation">
<term><varname>max_pred_locks_per_relation</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_pred_locks_per_relation</> configuration parameter</primary>
+ <primary><varname>max_pred_locks_per_relation</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7371,8 +7371,8 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
limit, while negative values
mean <xref linkend="guc-max-pred-locks-per-transaction"> divided by
the absolute value of this setting. The default is -2, which keeps
- the behavior from previous versions of <productname>PostgreSQL</>.
- This parameter can only be set in the <filename>postgresql.conf</>
+ the behavior from previous versions of <productname>PostgreSQL</productname>.
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -7381,7 +7381,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-max-pred-locks-per-page" xreflabel="max_pred_locks_per_page">
<term><varname>max_pred_locks_per_page</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_pred_locks_per_page</> configuration parameter</primary>
+ <primary><varname>max_pred_locks_per_page</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7389,7 +7389,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
This controls how many rows on a single page can be predicate-locked
before the lock is promoted to covering the whole page. The default
is 2. This parameter can only be set in
- the <filename>postgresql.conf</> file or on the server command line.
+ the <filename>postgresql.conf</filename> file or on the server command line.
</para>
</listitem>
</varlistentry>
@@ -7408,62 +7408,62 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-array-nulls" xreflabel="array_nulls">
<term><varname>array_nulls</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>array_nulls</> configuration parameter</primary>
+ <primary><varname>array_nulls</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
This controls whether the array input parser recognizes
- unquoted <literal>NULL</> as specifying a null array element.
- By default, this is <literal>on</>, allowing array values containing
- null values to be entered. However, <productname>PostgreSQL</> versions
+ unquoted <literal>NULL</literal> as specifying a null array element.
+ By default, this is <literal>on</literal>, allowing array values containing
+ null values to be entered. However, <productname>PostgreSQL</productname> versions
before 8.2 did not support null values in arrays, and therefore would
- treat <literal>NULL</> as specifying a normal array element with
- the string value <quote>NULL</>. For backward compatibility with
+ treat <literal>NULL</literal> as specifying a normal array element with
+ the string value <quote>NULL</quote>. For backward compatibility with
applications that require the old behavior, this variable can be
- turned <literal>off</>.
+ turned <literal>off</literal>.
</para>
<para>
Note that it is possible to create array values containing null values
- even when this variable is <literal>off</>.
+ even when this variable is <literal>off</literal>.
</para>
</listitem>
</varlistentry>
<varlistentry id="guc-backslash-quote" xreflabel="backslash_quote">
<term><varname>backslash_quote</varname> (<type>enum</type>)
- <indexterm><primary>strings</><secondary>backslash quotes</></>
+ <indexterm><primary>strings</primary><secondary>backslash quotes</secondary></indexterm>
<indexterm>
- <primary><varname>backslash_quote</> configuration parameter</primary>
+ <primary><varname>backslash_quote</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
This controls whether a quote mark can be represented by
- <literal>\'</> in a string literal. The preferred, SQL-standard way
- to represent a quote mark is by doubling it (<literal>''</>) but
- <productname>PostgreSQL</> has historically also accepted
- <literal>\'</>. However, use of <literal>\'</> creates security risks
+ <literal>\'</literal> in a string literal. The preferred, SQL-standard way
+ to represent a quote mark is by doubling it (<literal>''</literal>) but
+ <productname>PostgreSQL</productname> has historically also accepted
+ <literal>\'</literal>. However, use of <literal>\'</literal> creates security risks
because in some client character set encodings, there are multibyte
characters in which the last byte is numerically equivalent to ASCII
- <literal>\</>. If client-side code does escaping incorrectly then a
+ <literal>\</literal>. If client-side code does escaping incorrectly then a
SQL-injection attack is possible. This risk can be prevented by
making the server reject queries in which a quote mark appears to be
escaped by a backslash.
- The allowed values of <varname>backslash_quote</> are
- <literal>on</> (allow <literal>\'</> always),
- <literal>off</> (reject always), and
- <literal>safe_encoding</> (allow only if client encoding does not
- allow ASCII <literal>\</> within a multibyte character).
- <literal>safe_encoding</> is the default setting.
+ The allowed values of <varname>backslash_quote</varname> are
+ <literal>on</literal> (allow <literal>\'</literal> always),
+ <literal>off</literal> (reject always), and
+ <literal>safe_encoding</literal> (allow only if client encoding does not
+ allow ASCII <literal>\</literal> within a multibyte character).
+ <literal>safe_encoding</literal> is the default setting.
</para>
<para>
- Note that in a standard-conforming string literal, <literal>\</> just
- means <literal>\</> anyway. This parameter only affects the handling of
+ Note that in a standard-conforming string literal, <literal>\</literal> just
+ means <literal>\</literal> anyway. This parameter only affects the handling of
non-standard-conforming literals, including
- escape string syntax (<literal>E'...'</>).
+ escape string syntax (<literal>E'...'</literal>).
</para>
</listitem>
</varlistentry>
@@ -7471,7 +7471,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-default-with-oids" xreflabel="default_with_oids">
<term><varname>default_with_oids</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>default_with_oids</> configuration parameter</primary>
+ <primary><varname>default_with_oids</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7481,9 +7481,9 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
newly-created tables, if neither <literal>WITH OIDS</literal>
nor <literal>WITHOUT OIDS</literal> is specified. It also
determines whether OIDs will be included in tables created by
- <command>SELECT INTO</command>. The parameter is <literal>off</>
- by default; in <productname>PostgreSQL</> 8.0 and earlier, it
- was <literal>on</> by default.
+ <command>SELECT INTO</command>. The parameter is <literal>off</literal>
+ by default; in <productname>PostgreSQL</productname> 8.0 and earlier, it
+ was <literal>on</literal> by default.
</para>
<para>
@@ -7499,21 +7499,21 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-escape-string-warning" xreflabel="escape_string_warning">
<term><varname>escape_string_warning</varname> (<type>boolean</type>)
- <indexterm><primary>strings</><secondary>escape warning</></>
+ <indexterm><primary>strings</primary><secondary>escape warning</secondary></indexterm>
<indexterm>
- <primary><varname>escape_string_warning</> configuration parameter</primary>
+ <primary><varname>escape_string_warning</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When on, a warning is issued if a backslash (<literal>\</>)
- appears in an ordinary string literal (<literal>'...'</>
+ When on, a warning is issued if a backslash (<literal>\</literal>)
+ appears in an ordinary string literal (<literal>'...'</literal>
syntax) and <varname>standard_conforming_strings</varname> is off.
- The default is <literal>on</>.
+ The default is <literal>on</literal>.
</para>
<para>
Applications that wish to use backslash as escape should be
- modified to use escape string syntax (<literal>E'...'</>),
+ modified to use escape string syntax (<literal>E'...'</literal>),
because the default behavior of ordinary strings is now to treat
backslash as an ordinary character, per SQL standard. This variable
can be enabled to help locate code that needs to be changed.
@@ -7524,22 +7524,22 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-lo-compat-privileges" xreflabel="lo_compat_privileges">
<term><varname>lo_compat_privileges</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>lo_compat_privileges</> configuration parameter</primary>
+ <primary><varname>lo_compat_privileges</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- In <productname>PostgreSQL</> releases prior to 9.0, large objects
+ In <productname>PostgreSQL</productname> releases prior to 9.0, large objects
did not have access privileges and were, therefore, always readable
- and writable by all users. Setting this variable to <literal>on</>
+ and writable by all users. Setting this variable to <literal>on</literal>
disables the new privilege checks, for compatibility with prior
- releases. The default is <literal>off</>.
+ releases. The default is <literal>off</literal>.
Only superusers can change this setting.
</para>
<para>
Setting this variable does not disable all security checks related to
large objects &mdash; only those for which the default behavior has
- changed in <productname>PostgreSQL</> 9.0.
+ changed in <productname>PostgreSQL</productname> 9.0.
For example, <literal>lo_import()</literal> and
<literal>lo_export()</literal> need superuser privileges regardless
of this setting.
@@ -7550,18 +7550,18 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-operator-precedence-warning" xreflabel="operator_precedence_warning">
<term><varname>operator_precedence_warning</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>operator_precedence_warning</> configuration parameter</primary>
+ <primary><varname>operator_precedence_warning</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
When on, the parser will emit a warning for any construct that might
- have changed meanings since <productname>PostgreSQL</> 9.4 as a result
+ have changed meanings since <productname>PostgreSQL</productname> 9.4 as a result
of changes in operator precedence. This is useful for auditing
applications to see if precedence changes have broken anything; but it
is not meant to be kept turned on in production, since it will warn
about some perfectly valid, standard-compliant SQL code.
- The default is <literal>off</>.
+ The default is <literal>off</literal>.
</para>
<para>
@@ -7573,15 +7573,15 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-quote-all-identifiers" xreflabel="quote-all-identifiers">
<term><varname>quote_all_identifiers</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>quote_all_identifiers</> configuration parameter</primary>
+ <primary><varname>quote_all_identifiers</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
When the database generates SQL, force all identifiers to be quoted,
even if they are not (currently) keywords. This will affect the
- output of <command>EXPLAIN</> as well as the results of functions
- like <function>pg_get_viewdef</>. See also the
+ output of <command>EXPLAIN</command> as well as the results of functions
+ like <function>pg_get_viewdef</function>. See also the
<option>--quote-all-identifiers</option> option of
<xref linkend="app-pgdump"> and <xref linkend="app-pg-dumpall">.
</para>
@@ -7590,22 +7590,22 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-standard-conforming-strings" xreflabel="standard_conforming_strings">
<term><varname>standard_conforming_strings</varname> (<type>boolean</type>)
- <indexterm><primary>strings</><secondary>standard conforming</></>
+ <indexterm><primary>strings</primary><secondary>standard conforming</secondary></indexterm>
<indexterm>
- <primary><varname>standard_conforming_strings</> configuration parameter</primary>
+ <primary><varname>standard_conforming_strings</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
This controls whether ordinary string literals
- (<literal>'...'</>) treat backslashes literally, as specified in
+ (<literal>'...'</literal>) treat backslashes literally, as specified in
the SQL standard.
Beginning in <productname>PostgreSQL</productname> 9.1, the default is
- <literal>on</> (prior releases defaulted to <literal>off</>).
+ <literal>on</literal> (prior releases defaulted to <literal>off</literal>).
Applications can check this
parameter to determine how string literals will be processed.
The presence of this parameter can also be taken as an indication
- that the escape string syntax (<literal>E'...'</>) is supported.
+ that the escape string syntax (<literal>E'...'</literal>) is supported.
Escape string syntax (<xref linkend="sql-syntax-strings-escape">)
should be used if an application desires
backslashes to be treated as escape characters.
@@ -7616,7 +7616,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-synchronize-seqscans" xreflabel="synchronize_seqscans">
<term><varname>synchronize_seqscans</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>synchronize_seqscans</> configuration parameter</primary>
+ <primary><varname>synchronize_seqscans</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7625,13 +7625,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
other, so that concurrent scans read the same block at about the
same time and hence share the I/O workload. When this is enabled,
a scan might start in the middle of the table and then <quote>wrap
- around</> the end to cover all rows, so as to synchronize with the
+ around</quote> the end to cover all rows, so as to synchronize with the
activity of scans already in progress. This can result in
unpredictable changes in the row ordering returned by queries that
- have no <literal>ORDER BY</> clause. Setting this parameter to
- <literal>off</> ensures the pre-8.3 behavior in which a sequential
+ have no <literal>ORDER BY</literal> clause. Setting this parameter to
+ <literal>off</literal> ensures the pre-8.3 behavior in which a sequential
scan always starts from the beginning of the table. The default
- is <literal>on</>.
+ is <literal>on</literal>.
</para>
</listitem>
</varlistentry>
@@ -7645,31 +7645,31 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-transform-null-equals" xreflabel="transform_null_equals">
<term><varname>transform_null_equals</varname> (<type>boolean</type>)
- <indexterm><primary>IS NULL</></>
+ <indexterm><primary>IS NULL</primary></indexterm>
<indexterm>
- <primary><varname>transform_null_equals</> configuration parameter</primary>
+ <primary><varname>transform_null_equals</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When on, expressions of the form <literal><replaceable>expr</> =
+ When on, expressions of the form <literal><replaceable>expr</replaceable> =
NULL</literal> (or <literal>NULL =
- <replaceable>expr</></literal>) are treated as
- <literal><replaceable>expr</> IS NULL</literal>, that is, they
- return true if <replaceable>expr</> evaluates to the null value,
+ <replaceable>expr</replaceable></literal>) are treated as
+ <literal><replaceable>expr</replaceable> IS NULL</literal>, that is, they
+ return true if <replaceable>expr</replaceable> evaluates to the null value,
and false otherwise. The correct SQL-spec-compliant behavior of
- <literal><replaceable>expr</> = NULL</literal> is to always
+ <literal><replaceable>expr</replaceable> = NULL</literal> is to always
return null (unknown). Therefore this parameter defaults to
- <literal>off</>.
+ <literal>off</literal>.
</para>
<para>
However, filtered forms in <productname>Microsoft
Access</productname> generate queries that appear to use
- <literal><replaceable>expr</> = NULL</literal> to test for
+ <literal><replaceable>expr</replaceable> = NULL</literal> to test for
null values, so if you use that interface to access the database you
might want to turn this option on. Since expressions of the
- form <literal><replaceable>expr</> = NULL</literal> always
+ form <literal><replaceable>expr</replaceable> = NULL</literal> always
return the null value (using the SQL standard interpretation), they are not
very useful and do not appear often in normal applications so
this option does little harm in practice. But new users are
@@ -7678,7 +7678,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
</para>
<para>
- Note that this option only affects the exact form <literal>= NULL</>,
+ Note that this option only affects the exact form <literal>= NULL</literal>,
not other comparison operators or other expressions
that are computationally equivalent to some expression
involving the equals operator (such as <literal>IN</literal>).
@@ -7703,7 +7703,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-exit-on-error" xreflabel="exit_on_error">
<term><varname>exit_on_error</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>exit_on_error</> configuration parameter</primary>
+ <primary><varname>exit_on_error</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7718,16 +7718,16 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-restart-after-crash" xreflabel="restart_after_crash">
<term><varname>restart_after_crash</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>restart_after_crash</> configuration parameter</primary>
+ <primary><varname>restart_after_crash</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- When set to true, which is the default, <productname>PostgreSQL</>
+ When set to true, which is the default, <productname>PostgreSQL</productname>
will automatically reinitialize after a backend crash. Leaving this
value set to true is normally the best way to maximize the availability
of the database. However, in some circumstances, such as when
- <productname>PostgreSQL</> is being invoked by clusterware, it may be
+ <productname>PostgreSQL</productname> is being invoked by clusterware, it may be
useful to disable the restart so that the clusterware can gain
control and take any actions it deems appropriate.
</para>
@@ -7742,10 +7742,10 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<title>Preset Options</title>
<para>
- The following <quote>parameters</> are read-only, and are determined
+ The following <quote>parameters</quote> are read-only, and are determined
when <productname>PostgreSQL</productname> is compiled or when it is
installed. As such, they have been excluded from the sample
- <filename>postgresql.conf</> file. These options report
+ <filename>postgresql.conf</filename> file. These options report
various aspects of <productname>PostgreSQL</productname> behavior
that might be of interest to certain applications, particularly
administrative front-ends.
@@ -7756,13 +7756,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-block-size" xreflabel="block_size">
<term><varname>block_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>block_size</> configuration parameter</primary>
+ <primary><varname>block_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the size of a disk block. It is determined by the value
- of <literal>BLCKSZ</> when building the server. The default
+ of <literal>BLCKSZ</literal> when building the server. The default
value is 8192 bytes. The meaning of some configuration
variables (such as <xref linkend="guc-shared-buffers">) is
influenced by <varname>block_size</varname>. See <xref
@@ -7774,7 +7774,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-data-checksums" xreflabel="data_checksums">
<term><varname>data_checksums</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>data_checksums</> configuration parameter</primary>
+ <primary><varname>data_checksums</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7788,7 +7788,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-debug-assertions" xreflabel="debug_assertions">
<term><varname>debug_assertions</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>debug_assertions</> configuration parameter</primary>
+ <primary><varname>debug_assertions</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7808,13 +7808,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-integer-datetimes" xreflabel="integer_datetimes">
<term><varname>integer_datetimes</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>integer_datetimes</> configuration parameter</primary>
+ <primary><varname>integer_datetimes</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
- Reports whether <productname>PostgreSQL</> was built with support for
- 64-bit-integer dates and times. As of <productname>PostgreSQL</> 10,
+ Reports whether <productname>PostgreSQL</productname> was built with support for
+ 64-bit-integer dates and times. As of <productname>PostgreSQL</productname> 10,
this is always <literal>on</literal>.
</para>
</listitem>
@@ -7823,7 +7823,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-lc-collate" xreflabel="lc_collate">
<term><varname>lc_collate</varname> (<type>string</type>)
<indexterm>
- <primary><varname>lc_collate</> configuration parameter</primary>
+ <primary><varname>lc_collate</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7838,7 +7838,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-lc-ctype" xreflabel="lc_ctype">
<term><varname>lc_ctype</varname> (<type>string</type>)
<indexterm>
- <primary><varname>lc_ctype</> configuration parameter</primary>
+ <primary><varname>lc_ctype</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -7855,13 +7855,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-max-function-args" xreflabel="max_function_args">
<term><varname>max_function_args</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_function_args</> configuration parameter</primary>
+ <primary><varname>max_function_args</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the maximum number of function arguments. It is determined by
- the value of <literal>FUNC_MAX_ARGS</> when building the server. The
+ the value of <literal>FUNC_MAX_ARGS</literal> when building the server. The
default value is 100 arguments.
</para>
</listitem>
@@ -7870,14 +7870,14 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-max-identifier-length" xreflabel="max_identifier_length">
<term><varname>max_identifier_length</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_identifier_length</> configuration parameter</primary>
+ <primary><varname>max_identifier_length</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the maximum identifier length. It is determined as one
- less than the value of <literal>NAMEDATALEN</> when building
- the server. The default value of <literal>NAMEDATALEN</> is
+ less than the value of <literal>NAMEDATALEN</literal> when building
+ the server. The default value of <literal>NAMEDATALEN</literal> is
64; therefore the default
<varname>max_identifier_length</varname> is 63 bytes, which
can be less than 63 characters when using multibyte encodings.
@@ -7888,13 +7888,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-max-index-keys" xreflabel="max_index_keys">
<term><varname>max_index_keys</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>max_index_keys</> configuration parameter</primary>
+ <primary><varname>max_index_keys</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the maximum number of index keys. It is determined by
- the value of <literal>INDEX_MAX_KEYS</> when building the server. The
+ the value of <literal>INDEX_MAX_KEYS</literal> when building the server. The
default value is 32 keys.
</para>
</listitem>
@@ -7903,16 +7903,16 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-segment-size" xreflabel="segment_size">
<term><varname>segment_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>segment_size</> configuration parameter</primary>
+ <primary><varname>segment_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the number of blocks (pages) that can be stored within a file
- segment. It is determined by the value of <literal>RELSEG_SIZE</>
+ segment. It is determined by the value of <literal>RELSEG_SIZE</literal>
when building the server. The maximum size of a segment file in bytes
- is equal to <varname>segment_size</> multiplied by
- <varname>block_size</>; by default this is 1GB.
+ is equal to <varname>segment_size</varname> multiplied by
+ <varname>block_size</varname>; by default this is 1GB.
</para>
</listitem>
</varlistentry>
@@ -7920,9 +7920,9 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-server-encoding" xreflabel="server_encoding">
<term><varname>server_encoding</varname> (<type>string</type>)
<indexterm>
- <primary><varname>server_encoding</> configuration parameter</primary>
+ <primary><varname>server_encoding</varname> configuration parameter</primary>
</indexterm>
- <indexterm><primary>character set</></>
+ <indexterm><primary>character set</primary></indexterm>
</term>
<listitem>
<para>
@@ -7937,13 +7937,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-server-version" xreflabel="server_version">
<term><varname>server_version</varname> (<type>string</type>)
<indexterm>
- <primary><varname>server_version</> configuration parameter</primary>
+ <primary><varname>server_version</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the version number of the server. It is determined by the
- value of <literal>PG_VERSION</> when building the server.
+ value of <literal>PG_VERSION</literal> when building the server.
</para>
</listitem>
</varlistentry>
@@ -7951,13 +7951,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-server-version-num" xreflabel="server_version_num">
<term><varname>server_version_num</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>server_version_num</> configuration parameter</primary>
+ <primary><varname>server_version_num</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the version number of the server as an integer. It is determined
- by the value of <literal>PG_VERSION_NUM</> when building the server.
+ by the value of <literal>PG_VERSION_NUM</literal> when building the server.
</para>
</listitem>
</varlistentry>
@@ -7965,13 +7965,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-wal-block-size" xreflabel="wal_block_size">
<term><varname>wal_block_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_block_size</> configuration parameter</primary>
+ <primary><varname>wal_block_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the size of a WAL disk block. It is determined by the value
- of <literal>XLOG_BLCKSZ</> when building the server. The default value
+ of <literal>XLOG_BLCKSZ</literal> when building the server. The default value
is 8192 bytes.
</para>
</listitem>
@@ -7980,14 +7980,14 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-wal-segment-size" xreflabel="wal_segment_size">
<term><varname>wal_segment_size</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>wal_segment_size</> configuration parameter</primary>
+ <primary><varname>wal_segment_size</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Reports the number of blocks (pages) in a WAL segment file.
The total size of a WAL segment file in bytes is equal to
- <varname>wal_segment_size</> multiplied by <varname>wal_block_size</>;
+ <varname>wal_segment_size</varname> multiplied by <varname>wal_block_size</varname>;
by default this is 16MB. See <xref linkend="wal-configuration"> for
more information.
</para>
@@ -8010,12 +8010,12 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<para>
Custom options have two-part names: an extension name, then a dot, then
the parameter name proper, much like qualified names in SQL. An example
- is <literal>plpgsql.variable_conflict</>.
+ is <literal>plpgsql.variable_conflict</literal>.
</para>
<para>
Because custom options may need to be set in processes that have not
- loaded the relevant extension module, <productname>PostgreSQL</>
+ loaded the relevant extension module, <productname>PostgreSQL</productname>
will accept a setting for any two-part parameter name. Such variables
are treated as placeholders and have no function until the module that
defines them is loaded. When an extension module is loaded, it will add
@@ -8034,7 +8034,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
to assist with recovery of severely damaged databases. There
should be no reason to use them on a production database.
As such, they have been excluded from the sample
- <filename>postgresql.conf</> file. Note that many of these
+ <filename>postgresql.conf</filename> file. Note that many of these
parameters require special source compilation flags to work at all.
</para>
@@ -8073,7 +8073,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-post-auth-delay" xreflabel="post_auth_delay">
<term><varname>post_auth_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>post_auth_delay</> configuration parameter</primary>
+ <primary><varname>post_auth_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8090,7 +8090,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-pre-auth-delay" xreflabel="pre_auth_delay">
<term><varname>pre_auth_delay</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>pre_auth_delay</> configuration parameter</primary>
+ <primary><varname>pre_auth_delay</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8100,7 +8100,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
authentication procedure. This is intended to give developers an
opportunity to attach to the server process with a debugger to
trace down misbehavior in authentication.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -8109,7 +8109,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-trace-notify" xreflabel="trace_notify">
<term><varname>trace_notify</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>trace_notify</> configuration parameter</primary>
+ <primary><varname>trace_notify</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8127,7 +8127,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-trace-recovery-messages" xreflabel="trace_recovery_messages">
<term><varname>trace_recovery_messages</varname> (<type>enum</type>)
<indexterm>
- <primary><varname>trace_recovery_messages</> configuration parameter</primary>
+ <primary><varname>trace_recovery_messages</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8136,15 +8136,15 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
would not be logged. This parameter allows the user to override the
normal setting of <xref linkend="guc-log-min-messages">, but only for
specific messages. This is intended for use in debugging Hot Standby.
- Valid values are <literal>DEBUG5</>, <literal>DEBUG4</>,
- <literal>DEBUG3</>, <literal>DEBUG2</>, <literal>DEBUG1</>, and
- <literal>LOG</>. The default, <literal>LOG</>, does not affect
+ Valid values are <literal>DEBUG5</literal>, <literal>DEBUG4</literal>,
+ <literal>DEBUG3</literal>, <literal>DEBUG2</literal>, <literal>DEBUG1</literal>, and
+ <literal>LOG</literal>. The default, <literal>LOG</literal>, does not affect
logging decisions at all. The other values cause recovery-related
debug messages of that priority or higher to be logged as though they
- had <literal>LOG</> priority; for common settings of
- <varname>log_min_messages</> this results in unconditionally sending
+ had <literal>LOG</literal> priority; for common settings of
+ <varname>log_min_messages</varname> this results in unconditionally sending
them to the server log.
- This parameter can only be set in the <filename>postgresql.conf</>
+ This parameter can only be set in the <filename>postgresql.conf</filename>
file or on the server command line.
</para>
</listitem>
@@ -8153,7 +8153,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry id="guc-trace-sort" xreflabel="trace_sort">
<term><varname>trace_sort</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>trace_sort</> configuration parameter</primary>
+ <primary><varname>trace_sort</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8169,7 +8169,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<varlistentry>
<term><varname>trace_locks</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>trace_locks</> configuration parameter</primary>
+ <primary><varname>trace_locks</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8210,7 +8210,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry>
<term><varname>trace_lwlocks</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>trace_lwlocks</> configuration parameter</primary>
+ <primary><varname>trace_lwlocks</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8230,7 +8230,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry>
<term><varname>trace_userlocks</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>trace_userlocks</> configuration parameter</primary>
+ <primary><varname>trace_userlocks</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8249,7 +8249,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry>
<term><varname>trace_lock_oidmin</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>trace_lock_oidmin</> configuration parameter</primary>
+ <primary><varname>trace_lock_oidmin</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8268,7 +8268,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry>
<term><varname>trace_lock_table</varname> (<type>integer</type>)
<indexterm>
- <primary><varname>trace_lock_table</> configuration parameter</primary>
+ <primary><varname>trace_lock_table</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8286,7 +8286,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry>
<term><varname>debug_deadlocks</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>debug_deadlocks</> configuration parameter</primary>
+ <primary><varname>debug_deadlocks</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8305,7 +8305,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry>
<term><varname>log_btree_build_stats</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_btree_build_stats</> configuration parameter</primary>
+ <primary><varname>log_btree_build_stats</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8324,7 +8324,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry id="guc-wal-consistency-checking" xreflabel="wal_consistency_checking">
<term><varname>wal_consistency_checking</varname> (<type>string</type>)
<indexterm>
- <primary><varname>wal_consistency_checking</> configuration parameter</primary>
+ <primary><varname>wal_consistency_checking</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8344,10 +8344,10 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
the feature. It can be set to <literal>all</literal> to check all
records, or to a comma-separated list of resource managers to check
only records originating from those resource managers. Currently,
- the supported resource managers are <literal>heap</>,
- <literal>heap2</>, <literal>btree</>, <literal>hash</>,
- <literal>gin</>, <literal>gist</>, <literal>sequence</>,
- <literal>spgist</>, <literal>brin</>, and <literal>generic</>. Only
+ the supported resource managers are <literal>heap</literal>,
+ <literal>heap2</literal>, <literal>btree</literal>, <literal>hash</literal>,
+ <literal>gin</literal>, <literal>gist</literal>, <literal>sequence</literal>,
+ <literal>spgist</literal>, <literal>brin</literal>, and <literal>generic</literal>. Only
superusers can change this setting.
</para>
</listitem>
@@ -8356,7 +8356,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry id="guc-wal-debug" xreflabel="wal_debug">
<term><varname>wal_debug</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>wal_debug</> configuration parameter</primary>
+ <primary><varname>wal_debug</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8372,7 +8372,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry id="guc-ignore-checksum-failure" xreflabel="ignore_checksum_failure">
<term><varname>ignore_checksum_failure</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>ignore_checksum_failure</> configuration parameter</primary>
+ <primary><varname>ignore_checksum_failure</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8381,15 +8381,15 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
</para>
<para>
Detection of a checksum failure during a read normally causes
- <productname>PostgreSQL</> to report an error, aborting the current
- transaction. Setting <varname>ignore_checksum_failure</> to on causes
+ <productname>PostgreSQL</productname> to report an error, aborting the current
+ transaction. Setting <varname>ignore_checksum_failure</varname> to on causes
the system to ignore the failure (but still report a warning), and
continue processing. This behavior may <emphasis>cause crashes, propagate
- or hide corruption, or other serious problems</>. However, it may allow
+ or hide corruption, or other serious problems</emphasis>. However, it may allow
you to get past the error and retrieve undamaged tuples that might still be
present in the table if the block header is still sane. If the header is
corrupt an error will be reported even if this option is enabled. The
- default setting is <literal>off</>, and it can only be changed by a superuser.
+ default setting is <literal>off</literal>, and it can only be changed by a superuser.
</para>
</listitem>
</varlistentry>
@@ -8397,16 +8397,16 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<varlistentry id="guc-zero-damaged-pages" xreflabel="zero_damaged_pages">
<term><varname>zero_damaged_pages</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>zero_damaged_pages</> configuration parameter</primary>
+ <primary><varname>zero_damaged_pages</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Detection of a damaged page header normally causes
- <productname>PostgreSQL</> to report an error, aborting the current
- transaction. Setting <varname>zero_damaged_pages</> to on causes
+ <productname>PostgreSQL</productname> to report an error, aborting the current
+ transaction. Setting <varname>zero_damaged_pages</varname> to on causes
the system to instead report a warning, zero out the damaged
- page in memory, and continue processing. This behavior <emphasis>will destroy data</>,
+ page in memory, and continue processing. This behavior <emphasis>will destroy data</emphasis>,
namely all the rows on the damaged page. However, it does allow you to get
past the error and retrieve rows from any undamaged pages that might
be present in the table. It is useful for recovering data if
@@ -8415,7 +8415,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
data from the damaged pages of a table. Zeroed-out pages are not
forced to disk so it is recommended to recreate the table or
the index before turning this parameter off again. The
- default setting is <literal>off</>, and it can only be changed
+ default setting is <literal>off</literal>, and it can only be changed
by a superuser.
</para>
</listitem>
@@ -8447,15 +8447,15 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<tbody>
<row>
<entry><option>-B <replaceable>x</replaceable></option></entry>
- <entry><literal>shared_buffers = <replaceable>x</replaceable></></entry>
+ <entry><literal>shared_buffers = <replaceable>x</replaceable></literal></entry>
</row>
<row>
<entry><option>-d <replaceable>x</replaceable></option></entry>
- <entry><literal>log_min_messages = DEBUG<replaceable>x</replaceable></></entry>
+ <entry><literal>log_min_messages = DEBUG<replaceable>x</replaceable></literal></entry>
</row>
<row>
<entry><option>-e</option></entry>
- <entry><literal>datestyle = euro</></entry>
+ <entry><literal>datestyle = euro</literal></entry>
</row>
<row>
<entry>
@@ -8464,69 +8464,69 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
<option>-fs</option>, <option>-ft</option>
</entry>
<entry>
- <literal>enable_bitmapscan = off</>,
- <literal>enable_hashjoin = off</>,
- <literal>enable_indexscan = off</>,
- <literal>enable_mergejoin = off</>,
- <literal>enable_nestloop = off</>,
- <literal>enable_indexonlyscan = off</>,
- <literal>enable_seqscan = off</>,
- <literal>enable_tidscan = off</>
+ <literal>enable_bitmapscan = off</literal>,
+ <literal>enable_hashjoin = off</literal>,
+ <literal>enable_indexscan = off</literal>,
+ <literal>enable_mergejoin = off</literal>,
+ <literal>enable_nestloop = off</literal>,
+ <literal>enable_indexonlyscan = off</literal>,
+ <literal>enable_seqscan = off</literal>,
+ <literal>enable_tidscan = off</literal>
</entry>
</row>
<row>
<entry><option>-F</option></entry>
- <entry><literal>fsync = off</></entry>
+ <entry><literal>fsync = off</literal></entry>
</row>
<row>
<entry><option>-h <replaceable>x</replaceable></option></entry>
- <entry><literal>listen_addresses = <replaceable>x</replaceable></></entry>
+ <entry><literal>listen_addresses = <replaceable>x</replaceable></literal></entry>
</row>
<row>
<entry><option>-i</option></entry>
- <entry><literal>listen_addresses = '*'</></entry>
+ <entry><literal>listen_addresses = '*'</literal></entry>
</row>
<row>
<entry><option>-k <replaceable>x</replaceable></option></entry>
- <entry><literal>unix_socket_directories = <replaceable>x</replaceable></></entry>
+ <entry><literal>unix_socket_directories = <replaceable>x</replaceable></literal></entry>
</row>
<row>
<entry><option>-l</option></entry>
- <entry><literal>ssl = on</></entry>
+ <entry><literal>ssl = on</literal></entry>
</row>
<row>
<entry><option>-N <replaceable>x</replaceable></option></entry>
- <entry><literal>max_connections = <replaceable>x</replaceable></></entry>
+ <entry><literal>max_connections = <replaceable>x</replaceable></literal></entry>
</row>
<row>
<entry><option>-O</option></entry>
- <entry><literal>allow_system_table_mods = on</></entry>
+ <entry><literal>allow_system_table_mods = on</literal></entry>
</row>
<row>
<entry><option>-p <replaceable>x</replaceable></option></entry>
- <entry><literal>port = <replaceable>x</replaceable></></entry>
+ <entry><literal>port = <replaceable>x</replaceable></literal></entry>
</row>
<row>
<entry><option>-P</option></entry>
- <entry><literal>ignore_system_indexes = on</></entry>
+ <entry><literal>ignore_system_indexes = on</literal></entry>
</row>
<row>
<entry><option>-s</option></entry>
- <entry><literal>log_statement_stats = on</></entry>
+ <entry><literal>log_statement_stats = on</literal></entry>
</row>
<row>
<entry><option>-S <replaceable>x</replaceable></option></entry>
- <entry><literal>work_mem = <replaceable>x</replaceable></></entry>
+ <entry><literal>work_mem = <replaceable>x</replaceable></literal></entry>
</row>
<row>
<entry><option>-tpa</option>, <option>-tpl</option>, <option>-te</option></entry>
- <entry><literal>log_parser_stats = on</>,
- <literal>log_planner_stats = on</>,
- <literal>log_executor_stats = on</></entry>
+ <entry><literal>log_parser_stats = on</literal>,
+ <literal>log_planner_stats = on</literal>,
+ <literal>log_executor_stats = on</literal></entry>
</row>
<row>
<entry><option>-W <replaceable>x</replaceable></option></entry>
- <entry><literal>post_auth_delay = <replaceable>x</replaceable></></entry>
+ <entry><literal>post_auth_delay = <replaceable>x</replaceable></literal></entry>
</row>
</tbody>
</tgroup>
diff --git a/doc/src/sgml/contrib-spi.sgml b/doc/src/sgml/contrib-spi.sgml
index 3287c18d27..32c7105cf6 100644
--- a/doc/src/sgml/contrib-spi.sgml
+++ b/doc/src/sgml/contrib-spi.sgml
@@ -9,7 +9,7 @@
</indexterm>
<para>
- The <application>spi</> module provides several workable examples
+ The <application>spi</application> module provides several workable examples
of using SPI and triggers. While these functions are of some value in
their own right, they are even more useful as examples to modify for
your own purposes. The functions are general enough to be used
@@ -26,15 +26,15 @@
<title>refint &mdash; Functions for Implementing Referential Integrity</title>
<para>
- <function>check_primary_key()</> and
- <function>check_foreign_key()</> are used to check foreign key constraints.
+ <function>check_primary_key()</function> and
+ <function>check_foreign_key()</function> are used to check foreign key constraints.
(This functionality is long since superseded by the built-in foreign
key mechanism, of course, but the module is still useful as an example.)
</para>
<para>
- <function>check_primary_key()</> checks the referencing table.
- To use, create a <literal>BEFORE INSERT OR UPDATE</> trigger using this
+ <function>check_primary_key()</function> checks the referencing table.
+ To use, create a <literal>BEFORE INSERT OR UPDATE</literal> trigger using this
function on a table referencing another table. Specify as the trigger
arguments: the referencing table's column name(s) which form the foreign
key, the referenced table name, and the column names in the referenced table
@@ -43,14 +43,14 @@
</para>
<para>
- <function>check_foreign_key()</> checks the referenced table.
- To use, create a <literal>BEFORE DELETE OR UPDATE</> trigger using this
+ <function>check_foreign_key()</function> checks the referenced table.
+ To use, create a <literal>BEFORE DELETE OR UPDATE</literal> trigger using this
function on a table referenced by other table(s). Specify as the trigger
arguments: the number of referencing tables for which the function has to
perform checking, the action if a referencing key is found
- (<literal>cascade</> &mdash; to delete the referencing row,
- <literal>restrict</> &mdash; to abort transaction if referencing keys
- exist, <literal>setnull</> &mdash; to set referencing key fields to null),
+ (<literal>cascade</literal> &mdash; to delete the referencing row,
+ <literal>restrict</literal> &mdash; to abort transaction if referencing keys
+ exist, <literal>setnull</literal> &mdash; to set referencing key fields to null),
the triggered table's column names which form the primary/unique key, then
the referencing table name and column names (repeated for as many
referencing tables as were specified by first argument). Note that the
@@ -59,7 +59,7 @@
</para>
<para>
- There are examples in <filename>refint.example</>.
+ There are examples in <filename>refint.example</filename>.
</para>
</sect2>
@@ -67,10 +67,10 @@
<title>timetravel &mdash; Functions for Implementing Time Travel</title>
<para>
- Long ago, <productname>PostgreSQL</> had a built-in time travel feature
+ Long ago, <productname>PostgreSQL</productname> had a built-in time travel feature
that kept the insert and delete times for each tuple. This can be
emulated using these functions. To use these functions,
- you must add to a table two columns of <type>abstime</> type to store
+ you must add to a table two columns of <type>abstime</type> type to store
the date when a tuple was inserted (start_date) and changed/deleted
(stop_date):
@@ -89,7 +89,7 @@ CREATE TABLE mytab (
<para>
When a new row is inserted, start_date should normally be set to
- current time, and stop_date to <literal>infinity</>. The trigger
+ current time, and stop_date to <literal>infinity</literal>. The trigger
will automatically substitute these values if the inserted data
contains nulls in these columns. Generally, inserting explicit
non-null data in these columns should only be done when re-loading
@@ -97,7 +97,7 @@ CREATE TABLE mytab (
</para>
<para>
- Tuples with stop_date equal to <literal>infinity</> are <quote>valid
+ Tuples with stop_date equal to <literal>infinity</literal> are <quote>valid
now</quote>, and can be modified. Tuples with a finite stop_date cannot
be modified anymore &mdash; the trigger will prevent it. (If you need
to do that, you can turn off time travel as shown below.)
@@ -107,7 +107,7 @@ CREATE TABLE mytab (
For a modifiable row, on update only the stop_date in the tuple being
updated will be changed (to current time) and a new tuple with the modified
data will be inserted. Start_date in this new tuple will be set to current
- time and stop_date to <literal>infinity</>.
+ time and stop_date to <literal>infinity</literal>.
</para>
<para>
@@ -117,29 +117,29 @@ CREATE TABLE mytab (
<para>
To query for tuples <quote>valid now</quote>, include
- <literal>stop_date = 'infinity'</> in the query's WHERE condition.
+ <literal>stop_date = 'infinity'</literal> in the query's WHERE condition.
(You might wish to incorporate that in a view.) Similarly, you can
query for tuples valid at any past time with suitable conditions on
start_date and stop_date.
</para>
<para>
- <function>timetravel()</> is the general trigger function that supports
- this behavior. Create a <literal>BEFORE INSERT OR UPDATE OR DELETE</>
+ <function>timetravel()</function> is the general trigger function that supports
+ this behavior. Create a <literal>BEFORE INSERT OR UPDATE OR DELETE</literal>
trigger using this function on each time-traveled table. Specify two
trigger arguments: the actual
names of the start_date and stop_date columns.
Optionally, you can specify one to three more arguments, which must refer
- to columns of type <type>text</>. The trigger will store the name of
+ to columns of type <type>text</type>. The trigger will store the name of
the current user into the first of these columns during INSERT, the
second column during UPDATE, and the third during DELETE.
</para>
<para>
- <function>set_timetravel()</> allows you to turn time-travel on or off for
+ <function>set_timetravel()</function> allows you to turn time-travel on or off for
a table.
- <literal>set_timetravel('mytab', 1)</> will turn TT ON for table <literal>mytab</>.
- <literal>set_timetravel('mytab', 0)</> will turn TT OFF for table <literal>mytab</>.
+ <literal>set_timetravel('mytab', 1)</literal> will turn TT ON for table <literal>mytab</literal>.
+ <literal>set_timetravel('mytab', 0)</literal> will turn TT OFF for table <literal>mytab</literal>.
In both cases the old status is reported. While TT is off, you can modify
the start_date and stop_date columns freely. Note that the on/off status
is local to the current database session &mdash; fresh sessions will
@@ -147,12 +147,12 @@ CREATE TABLE mytab (
</para>
<para>
- <function>get_timetravel()</> returns the TT state for a table without
+ <function>get_timetravel()</function> returns the TT state for a table without
changing it.
</para>
<para>
- There is an example in <filename>timetravel.example</>.
+ There is an example in <filename>timetravel.example</filename>.
</para>
</sect2>
@@ -160,17 +160,17 @@ CREATE TABLE mytab (
<title>autoinc &mdash; Functions for Autoincrementing Fields</title>
<para>
- <function>autoinc()</> is a trigger that stores the next value of
+ <function>autoinc()</function> is a trigger that stores the next value of
a sequence into an integer field. This has some overlap with the
- built-in <quote>serial column</> feature, but it is not the same:
- <function>autoinc()</> will override attempts to substitute a
+ built-in <quote>serial column</quote> feature, but it is not the same:
+ <function>autoinc()</function> will override attempts to substitute a
different field value during inserts, and optionally it can be
used to increment the field during updates, too.
</para>
<para>
- To use, create a <literal>BEFORE INSERT</> (or optionally <literal>BEFORE
- INSERT OR UPDATE</>) trigger using this function. Specify two
+ To use, create a <literal>BEFORE INSERT</literal> (or optionally <literal>BEFORE
+ INSERT OR UPDATE</literal>) trigger using this function. Specify two
trigger arguments: the name of the integer column to be modified,
and the name of the sequence object that will supply values.
(Actually, you can specify any number of pairs of such names, if
@@ -178,7 +178,7 @@ CREATE TABLE mytab (
</para>
<para>
- There is an example in <filename>autoinc.example</>.
+ There is an example in <filename>autoinc.example</filename>.
</para>
</sect2>
@@ -187,19 +187,19 @@ CREATE TABLE mytab (
<title>insert_username &mdash; Functions for Tracking Who Changed a Table</title>
<para>
- <function>insert_username()</> is a trigger that stores the current
+ <function>insert_username()</function> is a trigger that stores the current
user's name into a text field. This can be useful for tracking
who last modified a particular row within a table.
</para>
<para>
- To use, create a <literal>BEFORE INSERT</> and/or <literal>UPDATE</>
+ To use, create a <literal>BEFORE INSERT</literal> and/or <literal>UPDATE</literal>
trigger using this function. Specify a single trigger
argument: the name of the text column to be modified.
</para>
<para>
- There is an example in <filename>insert_username.example</>.
+ There is an example in <filename>insert_username.example</filename>.
</para>
</sect2>
@@ -208,21 +208,21 @@ CREATE TABLE mytab (
<title>moddatetime &mdash; Functions for Tracking Last Modification Time</title>
<para>
- <function>moddatetime()</> is a trigger that stores the current
- time into a <type>timestamp</> field. This can be useful for tracking
+ <function>moddatetime()</function> is a trigger that stores the current
+ time into a <type>timestamp</type> field. This can be useful for tracking
the last modification time of a particular row within a table.
</para>
<para>
- To use, create a <literal>BEFORE UPDATE</>
+ To use, create a <literal>BEFORE UPDATE</literal>
trigger using this function. Specify a single trigger
argument: the name of the column to be modified.
- The column must be of type <type>timestamp</> or <type>timestamp with
- time zone</>.
+ The column must be of type <type>timestamp</type> or <type>timestamp with
+ time zone</type>.
</para>
<para>
- There is an example in <filename>moddatetime.example</>.
+ There is an example in <filename>moddatetime.example</filename>.
</para>
</sect2>
diff --git a/doc/src/sgml/contrib.sgml b/doc/src/sgml/contrib.sgml
index f32b8a81a2..7dd203e9cd 100644
--- a/doc/src/sgml/contrib.sgml
+++ b/doc/src/sgml/contrib.sgml
@@ -6,7 +6,7 @@
<para>
This appendix and the next one contain information regarding the modules that
can be found in the <literal>contrib</literal> directory of the
- <productname>PostgreSQL</> distribution.
+ <productname>PostgreSQL</productname> distribution.
These include porting tools, analysis utilities,
and plug-in features that are not part of the core PostgreSQL system,
mainly because they address a limited audience or are too experimental
@@ -41,54 +41,54 @@
<screen>
<userinput>make installcheck</userinput>
</screen>
- once you have a <productname>PostgreSQL</> server running.
+ once you have a <productname>PostgreSQL</productname> server running.
</para>
<para>
- If you are using a pre-packaged version of <productname>PostgreSQL</>,
+ If you are using a pre-packaged version of <productname>PostgreSQL</productname>,
these modules are typically made available as a separate subpackage,
- such as <literal>postgresql-contrib</>.
+ such as <literal>postgresql-contrib</literal>.
</para>
<para>
Many modules supply new user-defined functions, operators, or types.
To make use of one of these modules, after you have installed the code
you need to register the new SQL objects in the database system.
- In <productname>PostgreSQL</> 9.1 and later, this is done by executing
+ In <productname>PostgreSQL</productname> 9.1 and later, this is done by executing
a <xref linkend="sql-createextension"> command. In a fresh database,
you can simply do
<programlisting>
-CREATE EXTENSION <replaceable>module_name</>;
+CREATE EXTENSION <replaceable>module_name</replaceable>;
</programlisting>
This command must be run by a database superuser. This registers the
new SQL objects in the current database only, so you need to run this
command in each database that you want
the module's facilities to be available in. Alternatively, run it in
- database <literal>template1</> so that the extension will be copied into
+ database <literal>template1</literal> so that the extension will be copied into
subsequently-created databases by default.
</para>
<para>
Many modules allow you to install their objects in a schema of your
choice. To do that, add <literal>SCHEMA
- <replaceable>schema_name</></literal> to the <command>CREATE EXTENSION</>
+ <replaceable>schema_name</replaceable></literal> to the <command>CREATE EXTENSION</command>
command. By default, the objects will be placed in your current creation
- target schema, typically <literal>public</>.
+ target schema, typically <literal>public</literal>.
</para>
<para>
If your database was brought forward by dump and reload from a pre-9.1
- version of <productname>PostgreSQL</>, and you had been using the pre-9.1
+ version of <productname>PostgreSQL</productname>, and you had been using the pre-9.1
version of the module in it, you should instead do
<programlisting>
-CREATE EXTENSION <replaceable>module_name</> FROM unpackaged;
+CREATE EXTENSION <replaceable>module_name</replaceable> FROM unpackaged;
</programlisting>
This will update the pre-9.1 objects of the module into a proper
- <firstterm>extension</> object. Future updates to the module will be
+ <firstterm>extension</firstterm> object. Future updates to the module will be
managed by <xref linkend="sql-alterextension">.
For more information about extension updates, see
<xref linkend="extend-extensions">.
@@ -163,7 +163,7 @@ pages.
<para>
This appendix and the previous one contain information regarding the modules that
can be found in the <literal>contrib</literal> directory of the
- <productname>PostgreSQL</> distribution. See <xref linkend="contrib"> for
+ <productname>PostgreSQL</productname> distribution. See <xref linkend="contrib"> for
more information about the <literal>contrib</literal> section in general and
server extensions and plug-ins found in <literal>contrib</literal>
specifically.
diff --git a/doc/src/sgml/cube.sgml b/doc/src/sgml/cube.sgml
index 1ffc40f1a5..46d8e4eb8f 100644
--- a/doc/src/sgml/cube.sgml
+++ b/doc/src/sgml/cube.sgml
@@ -8,7 +8,7 @@
</indexterm>
<para>
- This module implements a data type <type>cube</> for
+ This module implements a data type <type>cube</type> for
representing multidimensional cubes.
</para>
@@ -17,8 +17,8 @@
<para>
<xref linkend="cube-repr-table"> shows the valid external
- representations for the <type>cube</>
- type. <replaceable>x</>, <replaceable>y</>, etc. denote
+ representations for the <type>cube</type>
+ type. <replaceable>x</replaceable>, <replaceable>y</replaceable>, etc. denote
floating-point numbers.
</para>
@@ -34,43 +34,43 @@
<tbody>
<row>
- <entry><literal><replaceable>x</></literal></entry>
+ <entry><literal><replaceable>x</replaceable></literal></entry>
<entry>A one-dimensional point
(or, zero-length one-dimensional interval)
</entry>
</row>
<row>
- <entry><literal>(<replaceable>x</>)</literal></entry>
+ <entry><literal>(<replaceable>x</replaceable>)</literal></entry>
<entry>Same as above</entry>
</row>
<row>
- <entry><literal><replaceable>x1</>,<replaceable>x2</>,...,<replaceable>xn</></literal></entry>
+ <entry><literal><replaceable>x1</replaceable>,<replaceable>x2</replaceable>,...,<replaceable>xn</replaceable></literal></entry>
<entry>A point in n-dimensional space, represented internally as a
zero-volume cube
</entry>
</row>
<row>
- <entry><literal>(<replaceable>x1</>,<replaceable>x2</>,...,<replaceable>xn</>)</literal></entry>
+ <entry><literal>(<replaceable>x1</replaceable>,<replaceable>x2</replaceable>,...,<replaceable>xn</replaceable>)</literal></entry>
<entry>Same as above</entry>
</row>
<row>
- <entry><literal>(<replaceable>x</>),(<replaceable>y</>)</literal></entry>
- <entry>A one-dimensional interval starting at <replaceable>x</> and ending at <replaceable>y</> or vice versa; the
+ <entry><literal>(<replaceable>x</replaceable>),(<replaceable>y</replaceable>)</literal></entry>
+ <entry>A one-dimensional interval starting at <replaceable>x</replaceable> and ending at <replaceable>y</replaceable> or vice versa; the
order does not matter
</entry>
</row>
<row>
- <entry><literal>[(<replaceable>x</>),(<replaceable>y</>)]</literal></entry>
+ <entry><literal>[(<replaceable>x</replaceable>),(<replaceable>y</replaceable>)]</literal></entry>
<entry>Same as above</entry>
</row>
<row>
- <entry><literal>(<replaceable>x1</>,...,<replaceable>xn</>),(<replaceable>y1</>,...,<replaceable>yn</>)</literal></entry>
+ <entry><literal>(<replaceable>x1</replaceable>,...,<replaceable>xn</replaceable>),(<replaceable>y1</replaceable>,...,<replaceable>yn</replaceable>)</literal></entry>
<entry>An n-dimensional cube represented by a pair of its diagonally
opposite corners
</entry>
</row>
<row>
- <entry><literal>[(<replaceable>x1</>,...,<replaceable>xn</>),(<replaceable>y1</>,...,<replaceable>yn</>)]</literal></entry>
+ <entry><literal>[(<replaceable>x1</replaceable>,...,<replaceable>xn</replaceable>),(<replaceable>y1</replaceable>,...,<replaceable>yn</replaceable>)]</literal></entry>
<entry>Same as above</entry>
</row>
</tbody>
@@ -79,17 +79,17 @@
<para>
It does not matter which order the opposite corners of a cube are
- entered in. The <type>cube</> functions
+ entered in. The <type>cube</type> functions
automatically swap values if needed to create a uniform
- <quote>lower left &mdash; upper right</> internal representation.
- When the corners coincide, <type>cube</> stores only one corner
- along with an <quote>is point</> flag to avoid wasting space.
+ <quote>lower left &mdash; upper right</quote> internal representation.
+ When the corners coincide, <type>cube</type> stores only one corner
+ along with an <quote>is point</quote> flag to avoid wasting space.
</para>
<para>
White space is ignored on input, so
- <literal>[(<replaceable>x</>),(<replaceable>y</>)]</literal> is the same as
- <literal>[ ( <replaceable>x</> ), ( <replaceable>y</> ) ]</literal>.
+ <literal>[(<replaceable>x</replaceable>),(<replaceable>y</replaceable>)]</literal> is the same as
+ <literal>[ ( <replaceable>x</replaceable> ), ( <replaceable>y</replaceable> ) ]</literal>.
</para>
</sect2>
@@ -107,7 +107,7 @@
<para>
<xref linkend="cube-operators-table"> shows the operators provided for
- type <type>cube</>.
+ type <type>cube</type>.
</para>
<table id="cube-operators-table">
@@ -123,91 +123,91 @@
<tbody>
<row>
- <entry><literal>a = b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a = b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cubes a and b are identical.</entry>
</row>
<row>
- <entry><literal>a &amp;&amp; b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a &amp;&amp; b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cubes a and b overlap.</entry>
</row>
<row>
- <entry><literal>a @&gt; b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a @&gt; b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cube a contains the cube b.</entry>
</row>
<row>
- <entry><literal>a &lt;@ b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a &lt;@ b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cube a is contained in the cube b.</entry>
</row>
<row>
- <entry><literal>a &lt; b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a &lt; b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cube a is less than the cube b.</entry>
</row>
<row>
- <entry><literal>a &lt;= b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a &lt;= b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cube a is less than or equal to the cube b.</entry>
</row>
<row>
- <entry><literal>a &gt; b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a &gt; b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cube a is greater than the cube b.</entry>
</row>
<row>
- <entry><literal>a &gt;= b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a &gt;= b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cube a is greater than or equal to the cube b.</entry>
</row>
<row>
- <entry><literal>a &lt;&gt; b</></entry>
- <entry><type>boolean</></entry>
+ <entry><literal>a &lt;&gt; b</literal></entry>
+ <entry><type>boolean</type></entry>
<entry>The cube a is not equal to the cube b.</entry>
</row>
<row>
- <entry><literal>a -&gt; n</></entry>
- <entry><type>float8</></entry>
- <entry>Get <replaceable>n</>-th coordinate of cube (counting from 1).</entry>
+ <entry><literal>a -&gt; n</literal></entry>
+ <entry><type>float8</type></entry>
+ <entry>Get <replaceable>n</replaceable>-th coordinate of cube (counting from 1).</entry>
</row>
<row>
- <entry><literal>a ~&gt; n</></entry>
- <entry><type>float8</></entry>
+ <entry><literal>a ~&gt; n</literal></entry>
+ <entry><type>float8</type></entry>
<entry>
- Get <replaceable>n</>-th coordinate in <quote>normalized</> cube
+ Get <replaceable>n</replaceable>-th coordinate in <quote>normalized</quote> cube
representation, in which the coordinates have been rearranged into
- the form <quote>lower left &mdash; upper right</>; that is, the
+ the form <quote>lower left &mdash; upper right</quote>; that is, the
smaller endpoint along each dimension appears first.
</entry>
</row>
<row>
- <entry><literal>a &lt;-&gt; b</></entry>
- <entry><type>float8</></entry>
+ <entry><literal>a &lt;-&gt; b</literal></entry>
+ <entry><type>float8</type></entry>
<entry>Euclidean distance between a and b.</entry>
</row>
<row>
- <entry><literal>a &lt;#&gt; b</></entry>
- <entry><type>float8</></entry>
+ <entry><literal>a &lt;#&gt; b</literal></entry>
+ <entry><type>float8</type></entry>
<entry>Taxicab (L-1 metric) distance between a and b.</entry>
</row>
<row>
- <entry><literal>a &lt;=&gt; b</></entry>
- <entry><type>float8</></entry>
+ <entry><literal>a &lt;=&gt; b</literal></entry>
+ <entry><type>float8</type></entry>
<entry>Chebyshev (L-inf metric) distance between a and b.</entry>
</row>
@@ -216,35 +216,35 @@
</table>
<para>
- (Before PostgreSQL 8.2, the containment operators <literal>@&gt;</> and <literal>&lt;@</> were
- respectively called <literal>@</> and <literal>~</>. These names are still available, but are
+ (Before PostgreSQL 8.2, the containment operators <literal>@&gt;</literal> and <literal>&lt;@</literal> were
+ respectively called <literal>@</literal> and <literal>~</literal>. These names are still available, but are
deprecated and will eventually be retired. Notice that the old names
are reversed from the convention formerly followed by the core geometric
data types!)
</para>
<para>
- The scalar ordering operators (<literal>&lt;</>, <literal>&gt;=</>, etc)
+ The scalar ordering operators (<literal>&lt;</literal>, <literal>&gt;=</literal>, etc)
do not make a lot of sense for any practical purpose but sorting. These
operators first compare the first coordinates, and if those are equal,
compare the second coordinates, etc. They exist mainly to support the
- b-tree index operator class for <type>cube</>, which can be useful for
- example if you would like a UNIQUE constraint on a <type>cube</> column.
+ b-tree index operator class for <type>cube</type>, which can be useful for
+ example if you would like a UNIQUE constraint on a <type>cube</type> column.
</para>
<para>
- The <filename>cube</> module also provides a GiST index operator class for
- <type>cube</> values.
- A <type>cube</> GiST index can be used to search for values using the
- <literal>=</>, <literal>&amp;&amp;</>, <literal>@&gt;</>, and
- <literal>&lt;@</> operators in <literal>WHERE</> clauses.
+ The <filename>cube</filename> module also provides a GiST index operator class for
+ <type>cube</type> values.
+ A <type>cube</type> GiST index can be used to search for values using the
+ <literal>=</literal>, <literal>&amp;&amp;</literal>, <literal>@&gt;</literal>, and
+ <literal>&lt;@</literal> operators in <literal>WHERE</literal> clauses.
</para>
<para>
- In addition, a <type>cube</> GiST index can be used to find nearest
+ In addition, a <type>cube</type> GiST index can be used to find nearest
neighbors using the metric operators
- <literal>&lt;-&gt;</>, <literal>&lt;#&gt;</>, and
- <literal>&lt;=&gt;</> in <literal>ORDER BY</> clauses.
+ <literal>&lt;-&gt;</literal>, <literal>&lt;#&gt;</literal>, and
+ <literal>&lt;=&gt;</literal> in <literal>ORDER BY</literal> clauses.
For example, the nearest neighbor of the 3-D point (0.5, 0.5, 0.5)
could be found efficiently with:
<programlisting>
@@ -253,7 +253,7 @@ SELECT c FROM test ORDER BY c &lt;-&gt; cube(array[0.5,0.5,0.5]) LIMIT 1;
</para>
<para>
- The <literal>~&gt;</> operator can also be used in this way to
+ The <literal>~&gt;</literal> operator can also be used in this way to
efficiently retrieve the first few values sorted by a selected coordinate.
For example, to get the first few cubes ordered by the first coordinate
(lower left corner) ascending one could use the following query:
@@ -365,7 +365,7 @@ SELECT c FROM test ORDER BY c ~&gt; 3 DESC LIMIT 5;
<row>
<entry><literal>cube_ll_coord(cube, integer)</literal></entry>
<entry><type>float8</type></entry>
- <entry>Returns the <replaceable>n</>-th coordinate value for the lower
+ <entry>Returns the <replaceable>n</replaceable>-th coordinate value for the lower
left corner of the cube.
</entry>
<entry>
@@ -376,7 +376,7 @@ SELECT c FROM test ORDER BY c ~&gt; 3 DESC LIMIT 5;
<row>
<entry><literal>cube_ur_coord(cube, integer)</literal></entry>
<entry><type>float8</type></entry>
- <entry>Returns the <replaceable>n</>-th coordinate value for the
+ <entry>Returns the <replaceable>n</replaceable>-th coordinate value for the
upper right corner of the cube.
</entry>
<entry>
@@ -412,9 +412,9 @@ SELECT c FROM test ORDER BY c ~&gt; 3 DESC LIMIT 5;
desired.
</entry>
<entry>
- <literal>cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[2]) == '(3),(7)'</>
+ <literal>cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[2]) == '(3),(7)'</literal>
<literal>cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]) ==
- '(5,3,1,1),(8,7,6,6)'</>
+ '(5,3,1,1),(8,7,6,6)'</literal>
</entry>
</row>
@@ -440,24 +440,24 @@ SELECT c FROM test ORDER BY c ~&gt; 3 DESC LIMIT 5;
<entry><literal>cube_enlarge(c cube, r double, n integer)</literal></entry>
<entry><type>cube</type></entry>
<entry>Increases the size of the cube by the specified
- radius <replaceable>r</> in at least <replaceable>n</> dimensions.
+ radius <replaceable>r</replaceable> in at least <replaceable>n</replaceable> dimensions.
If the radius is negative the cube is shrunk instead.
- All defined dimensions are changed by the radius <replaceable>r</>.
- Lower-left coordinates are decreased by <replaceable>r</> and
- upper-right coordinates are increased by <replaceable>r</>. If a
+ All defined dimensions are changed by the radius <replaceable>r</replaceable>.
+ Lower-left coordinates are decreased by <replaceable>r</replaceable> and
+ upper-right coordinates are increased by <replaceable>r</replaceable>. If a
lower-left coordinate is increased to more than the corresponding
- upper-right coordinate (this can only happen when <replaceable>r</>
+ upper-right coordinate (this can only happen when <replaceable>r</replaceable>
&lt; 0) than both coordinates are set to their average.
- If <replaceable>n</> is greater than the number of defined dimensions
- and the cube is being enlarged (<replaceable>r</> &gt; 0), then extra
- dimensions are added to make <replaceable>n</> altogether;
+ If <replaceable>n</replaceable> is greater than the number of defined dimensions
+ and the cube is being enlarged (<replaceable>r</replaceable> &gt; 0), then extra
+ dimensions are added to make <replaceable>n</replaceable> altogether;
0 is used as the initial value for the extra coordinates.
This function is useful for creating bounding boxes around a point for
searching for nearby points.
</entry>
<entry>
<literal>cube_enlarge('(1,2),(3,4)', 0.5, 3) ==
- '(0.5,1.5,-0.5),(3.5,4.5,0.5)'</>
+ '(0.5,1.5,-0.5),(3.5,4.5,0.5)'</literal>
</entry>
</row>
</tbody>
@@ -523,13 +523,13 @@ t
<title>Notes</title>
<para>
- For examples of usage, see the regression test <filename>sql/cube.sql</>.
+ For examples of usage, see the regression test <filename>sql/cube.sql</filename>.
</para>
<para>
To make it harder for people to break things, there
is a limit of 100 on the number of dimensions of cubes. This is set
- in <filename>cubedata.h</> if you need something bigger.
+ in <filename>cubedata.h</filename> if you need something bigger.
</para>
</sect2>
diff --git a/doc/src/sgml/custom-scan.sgml b/doc/src/sgml/custom-scan.sgml
index 9d1ca7bfe1..a46641674f 100644
--- a/doc/src/sgml/custom-scan.sgml
+++ b/doc/src/sgml/custom-scan.sgml
@@ -9,9 +9,9 @@
</indexterm>
<para>
- <productname>PostgreSQL</> supports a set of experimental facilities which
+ <productname>PostgreSQL</productname> supports a set of experimental facilities which
are intended to allow extension modules to add new scan types to the system.
- Unlike a <link linkend="fdwhandler">foreign data wrapper</>, which is only
+ Unlike a <link linkend="fdwhandler">foreign data wrapper</link>, which is only
responsible for knowing how to scan its own foreign tables, a custom scan
provider can provide an alternative method of scanning any relation in the
system. Typically, the motivation for writing a custom scan provider will
@@ -51,9 +51,9 @@ extern PGDLLIMPORT set_rel_pathlist_hook_type set_rel_pathlist_hook;
<para>
Although this hook function can be used to examine, modify, or remove
paths generated by the core system, a custom scan provider will typically
- confine itself to generating <structname>CustomPath</> objects and adding
- them to <literal>rel</> using <function>add_path</>. The custom scan
- provider is responsible for initializing the <structname>CustomPath</>
+ confine itself to generating <structname>CustomPath</structname> objects and adding
+ them to <literal>rel</literal> using <function>add_path</function>. The custom scan
+ provider is responsible for initializing the <structname>CustomPath</structname>
object, which is declared like this:
<programlisting>
typedef struct CustomPath
@@ -68,22 +68,22 @@ typedef struct CustomPath
</para>
<para>
- <structfield>path</> must be initialized as for any other path, including
+ <structfield>path</structfield> must be initialized as for any other path, including
the row-count estimate, start and total cost, and sort ordering provided
- by this path. <structfield>flags</> is a bit mask, which should include
- <literal>CUSTOMPATH_SUPPORT_BACKWARD_SCAN</> if the custom path can support
- a backward scan and <literal>CUSTOMPATH_SUPPORT_MARK_RESTORE</> if it
+ by this path. <structfield>flags</structfield> is a bit mask, which should include
+ <literal>CUSTOMPATH_SUPPORT_BACKWARD_SCAN</literal> if the custom path can support
+ a backward scan and <literal>CUSTOMPATH_SUPPORT_MARK_RESTORE</literal> if it
can support mark and restore. Both capabilities are optional.
- An optional <structfield>custom_paths</> is a list of <structname>Path</>
+ An optional <structfield>custom_paths</structfield> is a list of <structname>Path</structname>
nodes used by this custom-path node; these will be transformed into
- <structname>Plan</> nodes by planner.
- <structfield>custom_private</> can be used to store the custom path's
+ <structname>Plan</structname> nodes by planner.
+ <structfield>custom_private</structfield> can be used to store the custom path's
private data. Private data should be stored in a form that can be handled
- by <literal>nodeToString</>, so that debugging routines that attempt to
- print the custom path will work as designed. <structfield>methods</> must
+ by <literal>nodeToString</literal>, so that debugging routines that attempt to
+ print the custom path will work as designed. <structfield>methods</structfield> must
point to a (usually statically allocated) object implementing the required
custom path methods, of which there is currently only one. The
- <structfield>LibraryName</> and <structfield>SymbolName</> fields must also
+ <structfield>LibraryName</structfield> and <structfield>SymbolName</structfield> fields must also
be initialized so that the dynamic loader can resolve them to locate the
method table.
</para>
@@ -93,7 +93,7 @@ typedef struct CustomPath
relations, such a path must produce the same output as would normally be
produced by the join it replaces. To do this, the join provider should
set the following hook, and then within the hook function,
- create <structname>CustomPath</> path(s) for the join relation.
+ create <structname>CustomPath</structname> path(s) for the join relation.
<programlisting>
typedef void (*set_join_pathlist_hook_type) (PlannerInfo *root,
RelOptInfo *joinrel,
@@ -122,7 +122,7 @@ Plan *(*PlanCustomPath) (PlannerInfo *root,
List *custom_plans);
</programlisting>
Convert a custom path to a finished plan. The return value will generally
- be a <literal>CustomScan</> object, which the callback must allocate and
+ be a <literal>CustomScan</literal> object, which the callback must allocate and
initialize. See <xref linkend="custom-scan-plan"> for more details.
</para>
</sect2>
@@ -150,45 +150,45 @@ typedef struct CustomScan
</para>
<para>
- <structfield>scan</> must be initialized as for any other scan, including
+ <structfield>scan</structfield> must be initialized as for any other scan, including
estimated costs, target lists, qualifications, and so on.
- <structfield>flags</> is a bit mask with the same meaning as in
- <structname>CustomPath</>.
- <structfield>custom_plans</> can be used to store child
- <structname>Plan</> nodes.
- <structfield>custom_exprs</> should be used to
+ <structfield>flags</structfield> is a bit mask with the same meaning as in
+ <structname>CustomPath</structname>.
+ <structfield>custom_plans</structfield> can be used to store child
+ <structname>Plan</structname> nodes.
+ <structfield>custom_exprs</structfield> should be used to
store expression trees that will need to be fixed up by
- <filename>setrefs.c</> and <filename>subselect.c</>, while
- <structfield>custom_private</> should be used to store other private data
+ <filename>setrefs.c</filename> and <filename>subselect.c</filename>, while
+ <structfield>custom_private</structfield> should be used to store other private data
that is only used by the custom scan provider itself.
- <structfield>custom_scan_tlist</> can be NIL when scanning a base
+ <structfield>custom_scan_tlist</structfield> can be NIL when scanning a base
relation, indicating that the custom scan returns scan tuples that match
the base relation's row type. Otherwise it is a target list describing
- the actual scan tuples. <structfield>custom_scan_tlist</> must be
+ the actual scan tuples. <structfield>custom_scan_tlist</structfield> must be
provided for joins, and could be provided for scans if the custom scan
provider can compute some non-Var expressions.
- <structfield>custom_relids</> is set by the core code to the set of
+ <structfield>custom_relids</structfield> is set by the core code to the set of
relations (range table indexes) that this scan node handles; except when
this scan is replacing a join, it will have only one member.
- <structfield>methods</> must point to a (usually statically allocated)
+ <structfield>methods</structfield> must point to a (usually statically allocated)
object implementing the required custom scan methods, which are further
detailed below.
</para>
<para>
- When a <structname>CustomScan</> scans a single relation,
- <structfield>scan.scanrelid</> must be the range table index of the table
- to be scanned. When it replaces a join, <structfield>scan.scanrelid</>
+ When a <structname>CustomScan</structname> scans a single relation,
+ <structfield>scan.scanrelid</structfield> must be the range table index of the table
+ to be scanned. When it replaces a join, <structfield>scan.scanrelid</structfield>
should be zero.
</para>
<para>
- Plan trees must be able to be duplicated using <function>copyObject</>,
- so all the data stored within the <quote>custom</> fields must consist of
+ Plan trees must be able to be duplicated using <function>copyObject</function>,
+ so all the data stored within the <quote>custom</quote> fields must consist of
nodes that that function can handle. Furthermore, custom scan providers
cannot substitute a larger structure that embeds
- a <structname>CustomScan</> for the structure itself, as would be possible
- for a <structname>CustomPath</> or <structname>CustomScanState</>.
+ a <structname>CustomScan</structname> for the structure itself, as would be possible
+ for a <structname>CustomPath</structname> or <structname>CustomScanState</structname>.
</para>
<sect2 id="custom-scan-plan-callbacks">
@@ -197,14 +197,14 @@ typedef struct CustomScan
<programlisting>
Node *(*CreateCustomScanState) (CustomScan *cscan);
</programlisting>
- Allocate a <structname>CustomScanState</> for this
- <structname>CustomScan</>. The actual allocation will often be larger than
- required for an ordinary <structname>CustomScanState</>, because many
+ Allocate a <structname>CustomScanState</structname> for this
+ <structname>CustomScan</structname>. The actual allocation will often be larger than
+ required for an ordinary <structname>CustomScanState</structname>, because many
providers will wish to embed that as the first field of a larger structure.
- The value returned must have the node tag and <structfield>methods</>
+ The value returned must have the node tag and <structfield>methods</structfield>
set appropriately, but other fields should be left as zeroes at this
- stage; after <function>ExecInitCustomScan</> performs basic initialization,
- the <function>BeginCustomScan</> callback will be invoked to give the
+ stage; after <function>ExecInitCustomScan</function> performs basic initialization,
+ the <function>BeginCustomScan</function> callback will be invoked to give the
custom scan provider a chance to do whatever else is needed.
</para>
</sect2>
@@ -214,8 +214,8 @@ Node *(*CreateCustomScanState) (CustomScan *cscan);
<title>Executing Custom Scans</title>
<para>
- When a <structfield>CustomScan</> is executed, its execution state is
- represented by a <structfield>CustomScanState</>, which is declared as
+ When a <structfield>CustomScan</structfield> is executed, its execution state is
+ represented by a <structfield>CustomScanState</structfield>, which is declared as
follows:
<programlisting>
typedef struct CustomScanState
@@ -228,15 +228,15 @@ typedef struct CustomScanState
</para>
<para>
- <structfield>ss</> is initialized as for any other scan state,
+ <structfield>ss</structfield> is initialized as for any other scan state,
except that if the scan is for a join rather than a base relation,
- <literal>ss.ss_currentRelation</> is left NULL.
- <structfield>flags</> is a bit mask with the same meaning as in
- <structname>CustomPath</> and <structname>CustomScan</>.
- <structfield>methods</> must point to a (usually statically allocated)
+ <literal>ss.ss_currentRelation</literal> is left NULL.
+ <structfield>flags</structfield> is a bit mask with the same meaning as in
+ <structname>CustomPath</structname> and <structname>CustomScan</structname>.
+ <structfield>methods</structfield> must point to a (usually statically allocated)
object implementing the required custom scan state methods, which are
- further detailed below. Typically, a <structname>CustomScanState</>, which
- need not support <function>copyObject</>, will actually be a larger
+ further detailed below. Typically, a <structname>CustomScanState</structname>, which
+ need not support <function>copyObject</function>, will actually be a larger
structure embedding the above as its first member.
</para>
@@ -249,8 +249,8 @@ void (*BeginCustomScan) (CustomScanState *node,
EState *estate,
int eflags);
</programlisting>
- Complete initialization of the supplied <structname>CustomScanState</>.
- Standard fields have been initialized by <function>ExecInitCustomScan</>,
+ Complete initialization of the supplied <structname>CustomScanState</structname>.
+ Standard fields have been initialized by <function>ExecInitCustomScan</function>,
but any private fields should be initialized here.
</para>
@@ -259,16 +259,16 @@ void (*BeginCustomScan) (CustomScanState *node,
TupleTableSlot *(*ExecCustomScan) (CustomScanState *node);
</programlisting>
Fetch the next scan tuple. If any tuples remain, it should fill
- <literal>ps_ResultTupleSlot</> with the next tuple in the current scan
+ <literal>ps_ResultTupleSlot</literal> with the next tuple in the current scan
direction, and then return the tuple slot. If not,
- <literal>NULL</> or an empty slot should be returned.
+ <literal>NULL</literal> or an empty slot should be returned.
</para>
<para>
<programlisting>
void (*EndCustomScan) (CustomScanState *node);
</programlisting>
- Clean up any private data associated with the <literal>CustomScanState</>.
+ Clean up any private data associated with the <literal>CustomScanState</literal>.
This method is required, but it does not need to do anything if there is
no associated data or it will be cleaned up automatically.
</para>
@@ -286,9 +286,9 @@ void (*ReScanCustomScan) (CustomScanState *node);
void (*MarkPosCustomScan) (CustomScanState *node);
</programlisting>
Save the current scan position so that it can subsequently be restored
- by the <function>RestrPosCustomScan</> callback. This callback is
+ by the <function>RestrPosCustomScan</function> callback. This callback is
optional, and need only be supplied if the
- <literal>CUSTOMPATH_SUPPORT_MARK_RESTORE</> flag is set.
+ <literal>CUSTOMPATH_SUPPORT_MARK_RESTORE</literal> flag is set.
</para>
<para>
@@ -296,9 +296,9 @@ void (*MarkPosCustomScan) (CustomScanState *node);
void (*RestrPosCustomScan) (CustomScanState *node);
</programlisting>
Restore the previous scan position as saved by the
- <function>MarkPosCustomScan</> callback. This callback is optional,
+ <function>MarkPosCustomScan</function> callback. This callback is optional,
and need only be supplied if the
- <literal>CUSTOMPATH_SUPPORT_MARK_RESTORE</> flag is set.
+ <literal>CUSTOMPATH_SUPPORT_MARK_RESTORE</literal> flag is set.
</para>
<para>
@@ -320,8 +320,8 @@ void (*InitializeDSMCustomScan) (CustomScanState *node,
void *coordinate);
</programlisting>
Initialize the dynamic shared memory that will be required for parallel
- operation. <literal>coordinate</> points to a shared memory area of
- size equal to the return value of <function>EstimateDSMCustomScan</>.
+ operation. <literal>coordinate</literal> points to a shared memory area of
+ size equal to the return value of <function>EstimateDSMCustomScan</function>.
This callback is optional, and need only be supplied if this custom
scan provider supports parallel execution.
</para>
@@ -337,9 +337,9 @@ void (*ReInitializeDSMCustomScan) (CustomScanState *node,
This callback is optional, and need only be supplied if this custom
scan provider supports parallel execution.
Recommended practice is that this callback reset only shared state,
- while the <function>ReScanCustomScan</> callback resets only local
+ while the <function>ReScanCustomScan</function> callback resets only local
state. Currently, this callback will be called
- before <function>ReScanCustomScan</>, but it's best not to rely on
+ before <function>ReScanCustomScan</function>, but it's best not to rely on
that ordering.
</para>
@@ -350,7 +350,7 @@ void (*InitializeWorkerCustomScan) (CustomScanState *node,
void *coordinate);
</programlisting>
Initialize a parallel worker's local state based on the shared state
- set up by the leader during <function>InitializeDSMCustomScan</>.
+ set up by the leader during <function>InitializeDSMCustomScan</function>.
This callback is optional, and need only be supplied if this custom
scan provider supports parallel execution.
</para>
@@ -361,7 +361,7 @@ void (*ShutdownCustomScan) (CustomScanState *node);
</programlisting>
Release resources when it is anticipated the node will not be executed
to completion. This is not called in all cases; sometimes,
- <literal>EndCustomScan</> may be called without this function having
+ <literal>EndCustomScan</literal> may be called without this function having
been called first. Since the DSM segment used by parallel query is
destroyed just after this callback is invoked, custom scan providers that
wish to take some action before the DSM segment goes away should implement
@@ -374,9 +374,9 @@ void (*ExplainCustomScan) (CustomScanState *node,
List *ancestors,
ExplainState *es);
</programlisting>
- Output additional information for <command>EXPLAIN</> of a custom-scan
+ Output additional information for <command>EXPLAIN</command> of a custom-scan
plan node. This callback is optional. Common data stored in the
- <structname>ScanState</>, such as the target list and scan relation, will
+ <structname>ScanState</structname>, such as the target list and scan relation, will
be shown even without this callback, but the callback allows the display
of additional, private state.
</para>
diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml
index 512756df4a..6a15f9030c 100644
--- a/doc/src/sgml/datatype.sgml
+++ b/doc/src/sgml/datatype.sgml
@@ -79,7 +79,7 @@
<row>
<entry><type>bytea</type></entry>
<entry></entry>
- <entry>binary data (<quote>byte array</>)</entry>
+ <entry>binary data (<quote>byte array</quote>)</entry>
</row>
<row>
@@ -354,45 +354,45 @@
<tbody>
<row>
- <entry><type>smallint</></entry>
+ <entry><type>smallint</type></entry>
<entry>2 bytes</entry>
<entry>small-range integer</entry>
<entry>-32768 to +32767</entry>
</row>
<row>
- <entry><type>integer</></entry>
+ <entry><type>integer</type></entry>
<entry>4 bytes</entry>
<entry>typical choice for integer</entry>
<entry>-2147483648 to +2147483647</entry>
</row>
<row>
- <entry><type>bigint</></entry>
+ <entry><type>bigint</type></entry>
<entry>8 bytes</entry>
<entry>large-range integer</entry>
<entry>-9223372036854775808 to +9223372036854775807</entry>
</row>
<row>
- <entry><type>decimal</></entry>
+ <entry><type>decimal</type></entry>
<entry>variable</entry>
<entry>user-specified precision, exact</entry>
<entry>up to 131072 digits before the decimal point; up to 16383 digits after the decimal point</entry>
</row>
<row>
- <entry><type>numeric</></entry>
+ <entry><type>numeric</type></entry>
<entry>variable</entry>
<entry>user-specified precision, exact</entry>
<entry>up to 131072 digits before the decimal point; up to 16383 digits after the decimal point</entry>
</row>
<row>
- <entry><type>real</></entry>
+ <entry><type>real</type></entry>
<entry>4 bytes</entry>
<entry>variable-precision, inexact</entry>
<entry>6 decimal digits precision</entry>
</row>
<row>
- <entry><type>double precision</></entry>
+ <entry><type>double precision</type></entry>
<entry>8 bytes</entry>
<entry>variable-precision, inexact</entry>
<entry>15 decimal digits precision</entry>
@@ -406,7 +406,7 @@
</row>
<row>
- <entry><type>serial</></entry>
+ <entry><type>serial</type></entry>
<entry>4 bytes</entry>
<entry>autoincrementing integer</entry>
<entry>1 to 2147483647</entry>
@@ -574,9 +574,9 @@ NUMERIC
<para>
Numeric values are physically stored without any extra leading or
trailing zeroes. Thus, the declared precision and scale of a column
- are maximums, not fixed allocations. (In this sense the <type>numeric</>
- type is more akin to <type>varchar(<replaceable>n</>)</type>
- than to <type>char(<replaceable>n</>)</type>.) The actual storage
+ are maximums, not fixed allocations. (In this sense the <type>numeric</type>
+ type is more akin to <type>varchar(<replaceable>n</replaceable>)</type>
+ than to <type>char(<replaceable>n</replaceable>)</type>.) The actual storage
requirement is two bytes for each group of four decimal digits,
plus three to eight bytes overhead.
</para>
@@ -593,22 +593,22 @@ NUMERIC
<para>
In addition to ordinary numeric values, the <type>numeric</type>
- type allows the special value <literal>NaN</>, meaning
- <quote>not-a-number</quote>. Any operation on <literal>NaN</>
- yields another <literal>NaN</>. When writing this value
+ type allows the special value <literal>NaN</literal>, meaning
+ <quote>not-a-number</quote>. Any operation on <literal>NaN</literal>
+ yields another <literal>NaN</literal>. When writing this value
as a constant in an SQL command, you must put quotes around it,
- for example <literal>UPDATE table SET x = 'NaN'</>. On input,
- the string <literal>NaN</> is recognized in a case-insensitive manner.
+ for example <literal>UPDATE table SET x = 'NaN'</literal>. On input,
+ the string <literal>NaN</literal> is recognized in a case-insensitive manner.
</para>
<note>
<para>
- In most implementations of the <quote>not-a-number</> concept,
- <literal>NaN</> is not considered equal to any other numeric
- value (including <literal>NaN</>). In order to allow
- <type>numeric</> values to be sorted and used in tree-based
- indexes, <productname>PostgreSQL</> treats <literal>NaN</>
- values as equal, and greater than all non-<literal>NaN</>
+ In most implementations of the <quote>not-a-number</quote> concept,
+ <literal>NaN</literal> is not considered equal to any other numeric
+ value (including <literal>NaN</literal>). In order to allow
+ <type>numeric</type> values to be sorted and used in tree-based
+ indexes, <productname>PostgreSQL</productname> treats <literal>NaN</literal>
+ values as equal, and greater than all non-<literal>NaN</literal>
values.
</para>
</note>
@@ -756,18 +756,18 @@ FROM generate_series(-3.5, 3.5, 1) as x;
floating-point arithmetic does not follow IEEE 754, these values
will probably not work as expected.) When writing these values
as constants in an SQL command, you must put quotes around them,
- for example <literal>UPDATE table SET x = '-Infinity'</>. On input,
+ for example <literal>UPDATE table SET x = '-Infinity'</literal>. On input,
these strings are recognized in a case-insensitive manner.
</para>
<note>
<para>
- IEEE754 specifies that <literal>NaN</> should not compare equal
- to any other floating-point value (including <literal>NaN</>).
+ IEEE754 specifies that <literal>NaN</literal> should not compare equal
+ to any other floating-point value (including <literal>NaN</literal>).
In order to allow floating-point values to be sorted and used
- in tree-based indexes, <productname>PostgreSQL</> treats
- <literal>NaN</> values as equal, and greater than all
- non-<literal>NaN</> values.
+ in tree-based indexes, <productname>PostgreSQL</productname> treats
+ <literal>NaN</literal> values as equal, and greater than all
+ non-<literal>NaN</literal> values.
</para>
</note>
@@ -776,7 +776,7 @@ FROM generate_series(-3.5, 3.5, 1) as x;
notations <type>float</type> and
<type>float(<replaceable>p</replaceable>)</type> for specifying
inexact numeric types. Here, <replaceable>p</replaceable> specifies
- the minimum acceptable precision in <emphasis>binary</> digits.
+ the minimum acceptable precision in <emphasis>binary</emphasis> digits.
<productname>PostgreSQL</productname> accepts
<type>float(1)</type> to <type>float(24)</type> as selecting the
<type>real</type> type, while
@@ -870,12 +870,12 @@ ALTER SEQUENCE <replaceable class="parameter">tablename</replaceable>_<replaceab
</programlisting>
Thus, we have created an integer column and arranged for its default
- values to be assigned from a sequence generator. A <literal>NOT NULL</>
+ values to be assigned from a sequence generator. A <literal>NOT NULL</literal>
constraint is applied to ensure that a null value cannot be
inserted. (In most cases you would also want to attach a
- <literal>UNIQUE</> or <literal>PRIMARY KEY</> constraint to prevent
+ <literal>UNIQUE</literal> or <literal>PRIMARY KEY</literal> constraint to prevent
duplicate values from being inserted by accident, but this is
- not automatic.) Lastly, the sequence is marked as <quote>owned by</>
+ not automatic.) Lastly, the sequence is marked as <quote>owned by</quote>
the column, so that it will be dropped if the column or table is dropped.
</para>
@@ -908,7 +908,7 @@ ALTER SEQUENCE <replaceable class="parameter">tablename</replaceable>_<replaceab
names <type>bigserial</type> and <type>serial8</type> work
the same way, except that they create a <type>bigint</type>
column. <type>bigserial</type> should be used if you anticipate
- the use of more than 2<superscript>31</> identifiers over the
+ the use of more than 2<superscript>31</superscript> identifiers over the
lifetime of the table. The type names <type>smallserial</type> and
<type>serial2</type> also work the same way, except that they
create a <type>smallint</type> column.
@@ -962,9 +962,9 @@ ALTER SEQUENCE <replaceable class="parameter">tablename</replaceable>_<replaceab
<para>
Since the output of this data type is locale-sensitive, it might not
- work to load <type>money</> data into a database that has a different
- setting of <varname>lc_monetary</>. To avoid problems, before
- restoring a dump into a new database make sure <varname>lc_monetary</> has
+ work to load <type>money</type> data into a database that has a different
+ setting of <varname>lc_monetary</varname>. To avoid problems, before
+ restoring a dump into a new database make sure <varname>lc_monetary</varname> has
the same or equivalent value as in the database that was dumped.
</para>
@@ -994,7 +994,7 @@ SELECT '52093.89'::money::numeric::float8;
Division of a <type>money</type> value by an integer value is performed
with truncation of the fractional part towards zero. To get a rounded
result, divide by a floating-point value, or cast the <type>money</type>
- value to <type>numeric</> before dividing and back to <type>money</type>
+ value to <type>numeric</type> before dividing and back to <type>money</type>
afterwards. (The latter is preferable to avoid risking precision loss.)
When a <type>money</type> value is divided by another <type>money</type>
value, the result is <type>double precision</type> (i.e., a pure number,
@@ -1047,11 +1047,11 @@ SELECT '52093.89'::money::numeric::float8;
</thead>
<tbody>
<row>
- <entry><type>character varying(<replaceable>n</>)</type>, <type>varchar(<replaceable>n</>)</type></entry>
+ <entry><type>character varying(<replaceable>n</replaceable>)</type>, <type>varchar(<replaceable>n</replaceable>)</type></entry>
<entry>variable-length with limit</entry>
</row>
<row>
- <entry><type>character(<replaceable>n</>)</type>, <type>char(<replaceable>n</>)</type></entry>
+ <entry><type>character(<replaceable>n</replaceable>)</type>, <type>char(<replaceable>n</replaceable>)</type></entry>
<entry>fixed-length, blank padded</entry>
</row>
<row>
@@ -1070,10 +1070,10 @@ SELECT '52093.89'::money::numeric::float8;
<para>
<acronym>SQL</acronym> defines two primary character types:
- <type>character varying(<replaceable>n</>)</type> and
- <type>character(<replaceable>n</>)</type>, where <replaceable>n</>
+ <type>character varying(<replaceable>n</replaceable>)</type> and
+ <type>character(<replaceable>n</replaceable>)</type>, where <replaceable>n</replaceable>
is a positive integer. Both of these types can store strings up to
- <replaceable>n</> characters (not bytes) in length. An attempt to store a
+ <replaceable>n</replaceable> characters (not bytes) in length. An attempt to store a
longer string into a column of these types will result in an
error, unless the excess characters are all spaces, in which case
the string will be truncated to the maximum length. (This somewhat
@@ -1087,22 +1087,22 @@ SELECT '52093.89'::money::numeric::float8;
<para>
If one explicitly casts a value to <type>character
- varying(<replaceable>n</>)</type> or
- <type>character(<replaceable>n</>)</type>, then an over-length
- value will be truncated to <replaceable>n</> characters without
+ varying(<replaceable>n</replaceable>)</type> or
+ <type>character(<replaceable>n</replaceable>)</type>, then an over-length
+ value will be truncated to <replaceable>n</replaceable> characters without
raising an error. (This too is required by the
<acronym>SQL</acronym> standard.)
</para>
<para>
- The notations <type>varchar(<replaceable>n</>)</type> and
- <type>char(<replaceable>n</>)</type> are aliases for <type>character
- varying(<replaceable>n</>)</type> and
- <type>character(<replaceable>n</>)</type>, respectively.
+ The notations <type>varchar(<replaceable>n</replaceable>)</type> and
+ <type>char(<replaceable>n</replaceable>)</type> are aliases for <type>character
+ varying(<replaceable>n</replaceable>)</type> and
+ <type>character(<replaceable>n</replaceable>)</type>, respectively.
<type>character</type> without length specifier is equivalent to
<type>character(1)</type>. If <type>character varying</type> is used
without length specifier, the type accepts strings of any size. The
- latter is a <productname>PostgreSQL</> extension.
+ latter is a <productname>PostgreSQL</productname> extension.
</para>
<para>
@@ -1115,19 +1115,19 @@ SELECT '52093.89'::money::numeric::float8;
<para>
Values of type <type>character</type> are physically padded
- with spaces to the specified width <replaceable>n</>, and are
+ with spaces to the specified width <replaceable>n</replaceable>, and are
stored and displayed that way. However, trailing spaces are treated as
semantically insignificant and disregarded when comparing two values
of type <type>character</type>. In collations where whitespace
is significant, this behavior can produce unexpected results;
for example <command>SELECT 'a '::CHAR(2) collate "C" &lt;
- E'a\n'::CHAR(2)</command> returns true, even though <literal>C</>
+ E'a\n'::CHAR(2)</command> returns true, even though <literal>C</literal>
locale would consider a space to be greater than a newline.
Trailing spaces are removed when converting a <type>character</type> value
to one of the other string types. Note that trailing spaces
- <emphasis>are</> semantically significant in
+ <emphasis>are</emphasis> semantically significant in
<type>character varying</type> and <type>text</type> values, and
- when using pattern matching, that is <literal>LIKE</> and
+ when using pattern matching, that is <literal>LIKE</literal> and
regular expressions.
</para>
@@ -1140,7 +1140,7 @@ SELECT '52093.89'::money::numeric::float8;
stored in background tables so that they do not interfere with rapid
access to shorter column values. In any case, the longest
possible character string that can be stored is about 1 GB. (The
- maximum value that will be allowed for <replaceable>n</> in the data
+ maximum value that will be allowed for <replaceable>n</replaceable> in the data
type declaration is less than that. It wouldn't be useful to
change this because with multibyte character encodings the number of
characters and bytes can be quite different. If you desire to
@@ -1155,10 +1155,10 @@ SELECT '52093.89'::money::numeric::float8;
apart from increased storage space when using the blank-padded
type, and a few extra CPU cycles to check the length when storing into
a length-constrained column. While
- <type>character(<replaceable>n</>)</type> has performance
+ <type>character(<replaceable>n</replaceable>)</type> has performance
advantages in some other database systems, there is no such advantage in
<productname>PostgreSQL</productname>; in fact
- <type>character(<replaceable>n</>)</type> is usually the slowest of
+ <type>character(<replaceable>n</replaceable>)</type> is usually the slowest of
the three because of its additional storage costs. In most situations
<type>text</type> or <type>character varying</type> should be used
instead.
@@ -1220,7 +1220,7 @@ SELECT b, char_length(b) FROM test2;
in the internal system catalogs and is not intended for use by the general user. Its
length is currently defined as 64 bytes (63 usable characters plus
terminator) but should be referenced using the constant
- <symbol>NAMEDATALEN</symbol> in <literal>C</> source code.
+ <symbol>NAMEDATALEN</symbol> in <literal>C</literal> source code.
The length is set at compile time (and
is therefore adjustable for special uses); the default maximum
length might change in a future release. The type <type>"char"</type>
@@ -1304,7 +1304,7 @@ SELECT b, char_length(b) FROM test2;
Second, operations on binary strings process the actual bytes,
whereas the processing of character strings depends on locale settings.
In short, binary strings are appropriate for storing data that the
- programmer thinks of as <quote>raw bytes</>, whereas character
+ programmer thinks of as <quote>raw bytes</quote>, whereas character
strings are appropriate for storing text.
</para>
@@ -1328,10 +1328,10 @@ SELECT b, char_length(b) FROM test2;
</para>
<sect2>
- <title><type>bytea</> Hex Format</title>
+ <title><type>bytea</type> Hex Format</title>
<para>
- The <quote>hex</> format encodes binary data as 2 hexadecimal digits
+ The <quote>hex</quote> format encodes binary data as 2 hexadecimal digits
per byte, most significant nibble first. The entire string is
preceded by the sequence <literal>\x</literal> (to distinguish it
from the escape format). In some contexts, the initial backslash may
@@ -1355,7 +1355,7 @@ SELECT E'\\xDEADBEEF';
</sect2>
<sect2>
- <title><type>bytea</> Escape Format</title>
+ <title><type>bytea</type> Escape Format</title>
<para>
The <quote>escape</quote> format is the traditional
@@ -1390,7 +1390,7 @@ SELECT E'\\xDEADBEEF';
</para>
<table id="datatype-binary-sqlesc">
- <title><type>bytea</> Literal Escaped Octets</title>
+ <title><type>bytea</type> Literal Escaped Octets</title>
<tgroup cols="5">
<thead>
<row>
@@ -1430,7 +1430,7 @@ SELECT E'\\xDEADBEEF';
<row>
<entry>0 to 31 and 127 to 255</entry>
<entry><quote>non-printable</quote> octets</entry>
- <entry><literal>E'\\<replaceable>xxx'</></literal> (octal value)</entry>
+ <entry><literal>E'\\<replaceable>xxx'</replaceable></literal> (octal value)</entry>
<entry><literal>SELECT E'\\001'::bytea;</literal></entry>
<entry><literal>\001</literal></entry>
</row>
@@ -1481,7 +1481,7 @@ SELECT E'\\xDEADBEEF';
</para>
<table id="datatype-binary-resesc">
- <title><type>bytea</> Output Escaped Octets</title>
+ <title><type>bytea</type> Output Escaped Octets</title>
<tgroup cols="5">
<thead>
<row>
@@ -1506,7 +1506,7 @@ SELECT E'\\xDEADBEEF';
<row>
<entry>0 to 31 and 127 to 255</entry>
<entry><quote>non-printable</quote> octets</entry>
- <entry><literal>\<replaceable>xxx</></literal> (octal value)</entry>
+ <entry><literal>\<replaceable>xxx</replaceable></literal> (octal value)</entry>
<entry><literal>SELECT E'\\001'::bytea;</literal></entry>
<entry><literal>\001</literal></entry>
</row>
@@ -1524,7 +1524,7 @@ SELECT E'\\xDEADBEEF';
</table>
<para>
- Depending on the front end to <productname>PostgreSQL</> you use,
+ Depending on the front end to <productname>PostgreSQL</productname> you use,
you might have additional work to do in terms of escaping and
unescaping <type>bytea</type> strings. For example, you might also
have to escape line feeds and carriage returns if your interface
@@ -1685,7 +1685,7 @@ MINUTE TO SECOND
</literallayout>
Note that if both <replaceable>fields</replaceable> and
<replaceable>p</replaceable> are specified, the
- <replaceable>fields</replaceable> must include <literal>SECOND</>,
+ <replaceable>fields</replaceable> must include <literal>SECOND</literal>,
since the precision applies only to the seconds.
</para>
@@ -1717,9 +1717,9 @@ MINUTE TO SECOND
For some formats, ordering of day, month, and year in date input is
ambiguous and there is support for specifying the expected
ordering of these fields. Set the <xref linkend="guc-datestyle"> parameter
- to <literal>MDY</> to select month-day-year interpretation,
- <literal>DMY</> to select day-month-year interpretation, or
- <literal>YMD</> to select year-month-day interpretation.
+ to <literal>MDY</literal> to select month-day-year interpretation,
+ <literal>DMY</literal> to select day-month-year interpretation, or
+ <literal>YMD</literal> to select year-month-day interpretation.
</para>
<para>
@@ -1784,19 +1784,19 @@ MINUTE TO SECOND
</row>
<row>
<entry>1/8/1999</entry>
- <entry>January 8 in <literal>MDY</> mode;
- August 1 in <literal>DMY</> mode</entry>
+ <entry>January 8 in <literal>MDY</literal> mode;
+ August 1 in <literal>DMY</literal> mode</entry>
</row>
<row>
<entry>1/18/1999</entry>
- <entry>January 18 in <literal>MDY</> mode;
+ <entry>January 18 in <literal>MDY</literal> mode;
rejected in other modes</entry>
</row>
<row>
<entry>01/02/03</entry>
- <entry>January 2, 2003 in <literal>MDY</> mode;
- February 1, 2003 in <literal>DMY</> mode;
- February 3, 2001 in <literal>YMD</> mode
+ <entry>January 2, 2003 in <literal>MDY</literal> mode;
+ February 1, 2003 in <literal>DMY</literal> mode;
+ February 3, 2001 in <literal>YMD</literal> mode
</entry>
</row>
<row>
@@ -1813,15 +1813,15 @@ MINUTE TO SECOND
</row>
<row>
<entry>99-Jan-08</entry>
- <entry>January 8 in <literal>YMD</> mode, else error</entry>
+ <entry>January 8 in <literal>YMD</literal> mode, else error</entry>
</row>
<row>
<entry>08-Jan-99</entry>
- <entry>January 8, except error in <literal>YMD</> mode</entry>
+ <entry>January 8, except error in <literal>YMD</literal> mode</entry>
</row>
<row>
<entry>Jan-08-99</entry>
- <entry>January 8, except error in <literal>YMD</> mode</entry>
+ <entry>January 8, except error in <literal>YMD</literal> mode</entry>
</row>
<row>
<entry>19990108</entry>
@@ -2070,20 +2070,20 @@ January 8 04:05:06 1999 PST
For <type>timestamp with time zone</type>, the internally stored
value is always in UTC (Universal
Coordinated Time, traditionally known as Greenwich Mean Time,
- <acronym>GMT</>). An input value that has an explicit
+ <acronym>GMT</acronym>). An input value that has an explicit
time zone specified is converted to UTC using the appropriate offset
for that time zone. If no time zone is stated in the input string,
then it is assumed to be in the time zone indicated by the system's
<xref linkend="guc-timezone"> parameter, and is converted to UTC using the
- offset for the <varname>timezone</> zone.
+ offset for the <varname>timezone</varname> zone.
</para>
<para>
When a <type>timestamp with time
zone</type> value is output, it is always converted from UTC to the
- current <varname>timezone</> zone, and displayed as local time in that
+ current <varname>timezone</varname> zone, and displayed as local time in that
zone. To see the time in another time zone, either change
- <varname>timezone</> or use the <literal>AT TIME ZONE</> construct
+ <varname>timezone</varname> or use the <literal>AT TIME ZONE</literal> construct
(see <xref linkend="functions-datetime-zoneconvert">).
</para>
@@ -2091,8 +2091,8 @@ January 8 04:05:06 1999 PST
Conversions between <type>timestamp without time zone</type> and
<type>timestamp with time zone</type> normally assume that the
<type>timestamp without time zone</type> value should be taken or given
- as <varname>timezone</> local time. A different time zone can
- be specified for the conversion using <literal>AT TIME ZONE</>.
+ as <varname>timezone</varname> local time. A different time zone can
+ be specified for the conversion using <literal>AT TIME ZONE</literal>.
</para>
</sect3>
@@ -2117,7 +2117,7 @@ January 8 04:05:06 1999 PST
are specially represented inside the system and will be displayed
unchanged; but the others are simply notational shorthands
that will be converted to ordinary date/time values when read.
- (In particular, <literal>now</> and related strings are converted
+ (In particular, <literal>now</literal> and related strings are converted
to a specific time value as soon as they are read.)
All of these values need to be enclosed in single quotes when used
as constants in SQL commands.
@@ -2187,7 +2187,7 @@ January 8 04:05:06 1999 PST
<literal>LOCALTIMESTAMP</literal>. The latter four accept an
optional subsecond precision specification. (See <xref
linkend="functions-datetime-current">.) Note that these are
- SQL functions and are <emphasis>not</> recognized in data input strings.
+ SQL functions and are <emphasis>not</emphasis> recognized in data input strings.
</para>
</sect3>
@@ -2211,8 +2211,8 @@ January 8 04:05:06 1999 PST
<para>
The output format of the date/time types can be set to one of the four
styles ISO 8601,
- <acronym>SQL</acronym> (Ingres), traditional <productname>POSTGRES</>
- (Unix <application>date</> format), or
+ <acronym>SQL</acronym> (Ingres), traditional <productname>POSTGRES</productname>
+ (Unix <application>date</application> format), or
German. The default
is the <acronym>ISO</acronym> format. (The
<acronym>SQL</acronym> standard requires the use of the ISO 8601
@@ -2222,7 +2222,7 @@ January 8 04:05:06 1999 PST
output style. The output of the <type>date</type> and
<type>time</type> types is generally only the date or time part
in accordance with the given examples. However, the
- <productname>POSTGRES</> style outputs date-only values in
+ <productname>POSTGRES</productname> style outputs date-only values in
<acronym>ISO</acronym> format.
</para>
@@ -2263,9 +2263,9 @@ January 8 04:05:06 1999 PST
<note>
<para>
- ISO 8601 specifies the use of uppercase letter <literal>T</> to separate
- the date and time. <productname>PostgreSQL</> accepts that format on
- input, but on output it uses a space rather than <literal>T</>, as shown
+ ISO 8601 specifies the use of uppercase letter <literal>T</literal> to separate
+ the date and time. <productname>PostgreSQL</productname> accepts that format on
+ input, but on output it uses a space rather than <literal>T</literal>, as shown
above. This is for readability and for consistency with RFC 3339 as
well as some other database systems.
</para>
@@ -2292,17 +2292,17 @@ January 8 04:05:06 1999 PST
</thead>
<tbody>
<row>
- <entry><literal>SQL, DMY</></entry>
+ <entry><literal>SQL, DMY</literal></entry>
<entry><replaceable>day</replaceable>/<replaceable>month</replaceable>/<replaceable>year</replaceable></entry>
<entry><literal>17/12/1997 15:37:16.00 CET</literal></entry>
</row>
<row>
- <entry><literal>SQL, MDY</></entry>
+ <entry><literal>SQL, MDY</literal></entry>
<entry><replaceable>month</replaceable>/<replaceable>day</replaceable>/<replaceable>year</replaceable></entry>
<entry><literal>12/17/1997 07:37:16.00 PST</literal></entry>
</row>
<row>
- <entry><literal>Postgres, DMY</></entry>
+ <entry><literal>Postgres, DMY</literal></entry>
<entry><replaceable>day</replaceable>/<replaceable>month</replaceable>/<replaceable>year</replaceable></entry>
<entry><literal>Wed 17 Dec 07:37:16 1997 PST</literal></entry>
</row>
@@ -2368,7 +2368,7 @@ January 8 04:05:06 1999 PST
<listitem>
<para>
The default time zone is specified as a constant numeric offset
- from <acronym>UTC</>. It is therefore impossible to adapt to
+ from <acronym>UTC</acronym>. It is therefore impossible to adapt to
daylight-saving time when doing date/time arithmetic across
<acronym>DST</acronym> boundaries.
</para>
@@ -2380,7 +2380,7 @@ January 8 04:05:06 1999 PST
<para>
To address these difficulties, we recommend using date/time types
that contain both date and time when using time zones. We
- do <emphasis>not</> recommend using the type <type>time with
+ do <emphasis>not</emphasis> recommend using the type <type>time with
time zone</type> (though it is supported by
<productname>PostgreSQL</productname> for legacy applications and
for compliance with the <acronym>SQL</acronym> standard).
@@ -2401,7 +2401,7 @@ January 8 04:05:06 1999 PST
<itemizedlist>
<listitem>
<para>
- A full time zone name, for example <literal>America/New_York</>.
+ A full time zone name, for example <literal>America/New_York</literal>.
The recognized time zone names are listed in the
<literal>pg_timezone_names</literal> view (see <xref
linkend="view-pg-timezone-names">).
@@ -2412,16 +2412,16 @@ January 8 04:05:06 1999 PST
</listitem>
<listitem>
<para>
- A time zone abbreviation, for example <literal>PST</>. Such a
+ A time zone abbreviation, for example <literal>PST</literal>. Such a
specification merely defines a particular offset from UTC, in
contrast to full time zone names which can imply a set of daylight
savings transition-date rules as well. The recognized abbreviations
- are listed in the <literal>pg_timezone_abbrevs</> view (see <xref
+ are listed in the <literal>pg_timezone_abbrevs</literal> view (see <xref
linkend="view-pg-timezone-abbrevs">). You cannot set the
configuration parameters <xref linkend="guc-timezone"> or
<xref linkend="guc-log-timezone"> to a time
zone abbreviation, but you can use abbreviations in
- date/time input values and with the <literal>AT TIME ZONE</>
+ date/time input values and with the <literal>AT TIME ZONE</literal>
operator.
</para>
</listitem>
@@ -2429,25 +2429,25 @@ January 8 04:05:06 1999 PST
<para>
In addition to the timezone names and abbreviations,
<productname>PostgreSQL</productname> will accept POSIX-style time zone
- specifications of the form <replaceable>STD</><replaceable>offset</> or
- <replaceable>STD</><replaceable>offset</><replaceable>DST</>, where
- <replaceable>STD</> is a zone abbreviation, <replaceable>offset</> is a
- numeric offset in hours west from UTC, and <replaceable>DST</> is an
+ specifications of the form <replaceable>STD</replaceable><replaceable>offset</replaceable> or
+ <replaceable>STD</replaceable><replaceable>offset</replaceable><replaceable>DST</replaceable>, where
+ <replaceable>STD</replaceable> is a zone abbreviation, <replaceable>offset</replaceable> is a
+ numeric offset in hours west from UTC, and <replaceable>DST</replaceable> is an
optional daylight-savings zone abbreviation, assumed to stand for one
- hour ahead of the given offset. For example, if <literal>EST5EDT</>
+ hour ahead of the given offset. For example, if <literal>EST5EDT</literal>
were not already a recognized zone name, it would be accepted and would
be functionally equivalent to United States East Coast time. In this
syntax, a zone abbreviation can be a string of letters, or an
- arbitrary string surrounded by angle brackets (<literal>&lt;&gt;</>).
+ arbitrary string surrounded by angle brackets (<literal>&lt;&gt;</literal>).
When a daylight-savings zone abbreviation is present,
it is assumed to be used
according to the same daylight-savings transition rules used in the
- IANA time zone database's <filename>posixrules</> entry.
+ IANA time zone database's <filename>posixrules</filename> entry.
In a standard <productname>PostgreSQL</productname> installation,
- <filename>posixrules</> is the same as <literal>US/Eastern</>, so
+ <filename>posixrules</filename> is the same as <literal>US/Eastern</literal>, so
that POSIX-style time zone specifications follow USA daylight-savings
rules. If needed, you can adjust this behavior by replacing the
- <filename>posixrules</> file.
+ <filename>posixrules</filename> file.
</para>
</listitem>
</itemizedlist>
@@ -2456,10 +2456,10 @@ January 8 04:05:06 1999 PST
and full names: abbreviations represent a specific offset from UTC,
whereas many of the full names imply a local daylight-savings time
rule, and so have two possible UTC offsets. As an example,
- <literal>2014-06-04 12:00 America/New_York</> represents noon local
+ <literal>2014-06-04 12:00 America/New_York</literal> represents noon local
time in New York, which for this particular date was Eastern Daylight
- Time (UTC-4). So <literal>2014-06-04 12:00 EDT</> specifies that
- same time instant. But <literal>2014-06-04 12:00 EST</> specifies
+ Time (UTC-4). So <literal>2014-06-04 12:00 EDT</literal> specifies that
+ same time instant. But <literal>2014-06-04 12:00 EST</literal> specifies
noon Eastern Standard Time (UTC-5), regardless of whether daylight
savings was nominally in effect on that date.
</para>
@@ -2467,10 +2467,10 @@ January 8 04:05:06 1999 PST
<para>
To complicate matters, some jurisdictions have used the same timezone
abbreviation to mean different UTC offsets at different times; for
- example, in Moscow <literal>MSK</> has meant UTC+3 in some years and
- UTC+4 in others. <application>PostgreSQL</> interprets such
+ example, in Moscow <literal>MSK</literal> has meant UTC+3 in some years and
+ UTC+4 in others. <application>PostgreSQL</application> interprets such
abbreviations according to whatever they meant (or had most recently
- meant) on the specified date; but, as with the <literal>EST</> example
+ meant) on the specified date; but, as with the <literal>EST</literal> example
above, this is not necessarily the same as local civil time on that date.
</para>
@@ -2478,18 +2478,18 @@ January 8 04:05:06 1999 PST
One should be wary that the POSIX-style time zone feature can
lead to silently accepting bogus input, since there is no check on the
reasonableness of the zone abbreviations. For example, <literal>SET
- TIMEZONE TO FOOBAR0</> will work, leaving the system effectively using
+ TIMEZONE TO FOOBAR0</literal> will work, leaving the system effectively using
a rather peculiar abbreviation for UTC.
Another issue to keep in mind is that in POSIX time zone names,
- positive offsets are used for locations <emphasis>west</> of Greenwich.
+ positive offsets are used for locations <emphasis>west</emphasis> of Greenwich.
Everywhere else, <productname>PostgreSQL</productname> follows the
- ISO-8601 convention that positive timezone offsets are <emphasis>east</>
+ ISO-8601 convention that positive timezone offsets are <emphasis>east</emphasis>
of Greenwich.
</para>
<para>
In all cases, timezone names and abbreviations are recognized
- case-insensitively. (This is a change from <productname>PostgreSQL</>
+ case-insensitively. (This is a change from <productname>PostgreSQL</productname>
versions prior to 8.2, which were case-sensitive in some contexts but
not others.)
</para>
@@ -2497,14 +2497,14 @@ January 8 04:05:06 1999 PST
<para>
Neither timezone names nor abbreviations are hard-wired into the server;
they are obtained from configuration files stored under
- <filename>.../share/timezone/</> and <filename>.../share/timezonesets/</>
+ <filename>.../share/timezone/</filename> and <filename>.../share/timezonesets/</filename>
of the installation directory
(see <xref linkend="datetime-config-files">).
</para>
<para>
The <xref linkend="guc-timezone"> configuration parameter can
- be set in the file <filename>postgresql.conf</>, or in any of the
+ be set in the file <filename>postgresql.conf</filename>, or in any of the
other standard ways described in <xref linkend="runtime-config">.
There are also some special ways to set it:
@@ -2513,7 +2513,7 @@ January 8 04:05:06 1999 PST
<para>
The <acronym>SQL</acronym> command <command>SET TIME ZONE</command>
sets the time zone for the session. This is an alternative spelling
- of <command>SET TIMEZONE TO</> with a more SQL-spec-compatible syntax.
+ of <command>SET TIMEZONE TO</command> with a more SQL-spec-compatible syntax.
</para>
</listitem>
@@ -2541,52 +2541,52 @@ January 8 04:05:06 1999 PST
verbose syntax:
<synopsis>
-<optional>@</> <replaceable>quantity</> <replaceable>unit</> <optional><replaceable>quantity</> <replaceable>unit</>...</> <optional><replaceable>direction</></optional>
+<optional>@</optional> <replaceable>quantity</replaceable> <replaceable>unit</replaceable> <optional><replaceable>quantity</replaceable> <replaceable>unit</replaceable>...</optional> <optional><replaceable>direction</replaceable></optional>
</synopsis>
- where <replaceable>quantity</> is a number (possibly signed);
- <replaceable>unit</> is <literal>microsecond</literal>,
+ where <replaceable>quantity</replaceable> is a number (possibly signed);
+ <replaceable>unit</replaceable> is <literal>microsecond</literal>,
<literal>millisecond</literal>, <literal>second</literal>,
<literal>minute</literal>, <literal>hour</literal>, <literal>day</literal>,
<literal>week</literal>, <literal>month</literal>, <literal>year</literal>,
<literal>decade</literal>, <literal>century</literal>, <literal>millennium</literal>,
or abbreviations or plurals of these units;
- <replaceable>direction</> can be <literal>ago</literal> or
- empty. The at sign (<literal>@</>) is optional noise. The amounts
+ <replaceable>direction</replaceable> can be <literal>ago</literal> or
+ empty. The at sign (<literal>@</literal>) is optional noise. The amounts
of the different units are implicitly added with appropriate
sign accounting. <literal>ago</literal> negates all the fields.
This syntax is also used for interval output, if
<xref linkend="guc-intervalstyle"> is set to
- <literal>postgres_verbose</>.
+ <literal>postgres_verbose</literal>.
</para>
<para>
Quantities of days, hours, minutes, and seconds can be specified without
- explicit unit markings. For example, <literal>'1 12:59:10'</> is read
- the same as <literal>'1 day 12 hours 59 min 10 sec'</>. Also,
+ explicit unit markings. For example, <literal>'1 12:59:10'</literal> is read
+ the same as <literal>'1 day 12 hours 59 min 10 sec'</literal>. Also,
a combination of years and months can be specified with a dash;
- for example <literal>'200-10'</> is read the same as <literal>'200 years
- 10 months'</>. (These shorter forms are in fact the only ones allowed
+ for example <literal>'200-10'</literal> is read the same as <literal>'200 years
+ 10 months'</literal>. (These shorter forms are in fact the only ones allowed
by the <acronym>SQL</acronym> standard, and are used for output when
- <varname>IntervalStyle</> is set to <literal>sql_standard</literal>.)
+ <varname>IntervalStyle</varname> is set to <literal>sql_standard</literal>.)
</para>
<para>
Interval values can also be written as ISO 8601 time intervals, using
- either the <quote>format with designators</> of the standard's section
- 4.4.3.2 or the <quote>alternative format</> of section 4.4.3.3. The
+ either the <quote>format with designators</quote> of the standard's section
+ 4.4.3.2 or the <quote>alternative format</quote> of section 4.4.3.3. The
format with designators looks like this:
<synopsis>
-P <replaceable>quantity</> <replaceable>unit</> <optional> <replaceable>quantity</> <replaceable>unit</> ...</optional> <optional> T <optional> <replaceable>quantity</> <replaceable>unit</> ...</optional></optional>
+P <replaceable>quantity</replaceable> <replaceable>unit</replaceable> <optional> <replaceable>quantity</replaceable> <replaceable>unit</replaceable> ...</optional> <optional> T <optional> <replaceable>quantity</replaceable> <replaceable>unit</replaceable> ...</optional></optional>
</synopsis>
- The string must start with a <literal>P</>, and may include a
- <literal>T</> that introduces the time-of-day units. The
+ The string must start with a <literal>P</literal>, and may include a
+ <literal>T</literal> that introduces the time-of-day units. The
available unit abbreviations are given in <xref
linkend="datatype-interval-iso8601-units">. Units may be
omitted, and may be specified in any order, but units smaller than
- a day must appear after <literal>T</>. In particular, the meaning of
- <literal>M</> depends on whether it is before or after
- <literal>T</>.
+ a day must appear after <literal>T</literal>. In particular, the meaning of
+ <literal>M</literal> depends on whether it is before or after
+ <literal>T</literal>.
</para>
<table id="datatype-interval-iso8601-units">
@@ -2634,51 +2634,51 @@ P <replaceable>quantity</> <replaceable>unit</> <optional> <replaceable>quantity
<para>
In the alternative format:
<synopsis>
-P <optional> <replaceable>years</>-<replaceable>months</>-<replaceable>days</> </optional> <optional> T <replaceable>hours</>:<replaceable>minutes</>:<replaceable>seconds</> </optional>
+P <optional> <replaceable>years</replaceable>-<replaceable>months</replaceable>-<replaceable>days</replaceable> </optional> <optional> T <replaceable>hours</replaceable>:<replaceable>minutes</replaceable>:<replaceable>seconds</replaceable> </optional>
</synopsis>
the string must begin with <literal>P</literal>, and a
- <literal>T</> separates the date and time parts of the interval.
+ <literal>T</literal> separates the date and time parts of the interval.
The values are given as numbers similar to ISO 8601 dates.
</para>
<para>
- When writing an interval constant with a <replaceable>fields</>
+ When writing an interval constant with a <replaceable>fields</replaceable>
specification, or when assigning a string to an interval column that was
- defined with a <replaceable>fields</> specification, the interpretation of
- unmarked quantities depends on the <replaceable>fields</>. For
- example <literal>INTERVAL '1' YEAR</> is read as 1 year, whereas
- <literal>INTERVAL '1'</> means 1 second. Also, field values
- <quote>to the right</> of the least significant field allowed by the
- <replaceable>fields</> specification are silently discarded. For
- example, writing <literal>INTERVAL '1 day 2:03:04' HOUR TO MINUTE</>
+ defined with a <replaceable>fields</replaceable> specification, the interpretation of
+ unmarked quantities depends on the <replaceable>fields</replaceable>. For
+ example <literal>INTERVAL '1' YEAR</literal> is read as 1 year, whereas
+ <literal>INTERVAL '1'</literal> means 1 second. Also, field values
+ <quote>to the right</quote> of the least significant field allowed by the
+ <replaceable>fields</replaceable> specification are silently discarded. For
+ example, writing <literal>INTERVAL '1 day 2:03:04' HOUR TO MINUTE</literal>
results in dropping the seconds field, but not the day field.
</para>
<para>
- According to the <acronym>SQL</> standard all fields of an interval
+ According to the <acronym>SQL</acronym> standard all fields of an interval
value must have the same sign, so a leading negative sign applies to all
fields; for example the negative sign in the interval literal
- <literal>'-1 2:03:04'</> applies to both the days and hour/minute/second
- parts. <productname>PostgreSQL</> allows the fields to have different
+ <literal>'-1 2:03:04'</literal> applies to both the days and hour/minute/second
+ parts. <productname>PostgreSQL</productname> allows the fields to have different
signs, and traditionally treats each field in the textual representation
as independently signed, so that the hour/minute/second part is
- considered positive in this example. If <varname>IntervalStyle</> is
+ considered positive in this example. If <varname>IntervalStyle</varname> is
set to <literal>sql_standard</literal> then a leading sign is considered
to apply to all fields (but only if no additional signs appear).
- Otherwise the traditional <productname>PostgreSQL</> interpretation is
+ Otherwise the traditional <productname>PostgreSQL</productname> interpretation is
used. To avoid ambiguity, it's recommended to attach an explicit sign
to each field if any field is negative.
</para>
<para>
- Internally <type>interval</> values are stored as months, days,
+ Internally <type>interval</type> values are stored as months, days,
and seconds. This is done because the number of days in a month
varies, and a day can have 23 or 25 hours if a daylight savings
time adjustment is involved. The months and days fields are integers
while the seconds field can store fractions. Because intervals are
- usually created from constant strings or <type>timestamp</> subtraction,
+ usually created from constant strings or <type>timestamp</type> subtraction,
this storage method works well in most cases. Functions
- <function>justify_days</> and <function>justify_hours</> are
+ <function>justify_days</function> and <function>justify_hours</function> are
available for adjusting days and hours that overflow their normal
ranges.
</para>
@@ -2686,18 +2686,18 @@ P <optional> <replaceable>years</>-<replaceable>months</>-<replaceable>days</> <
<para>
In the verbose input format, and in some fields of the more compact
input formats, field values can have fractional parts; for example
- <literal>'1.5 week'</> or <literal>'01:02:03.45'</>. Such input is
+ <literal>'1.5 week'</literal> or <literal>'01:02:03.45'</literal>. Such input is
converted to the appropriate number of months, days, and seconds
for storage. When this would result in a fractional number of
months or days, the fraction is added to the lower-order fields
using the conversion factors 1 month = 30 days and 1 day = 24 hours.
- For example, <literal>'1.5 month'</> becomes 1 month and 15 days.
+ For example, <literal>'1.5 month'</literal> becomes 1 month and 15 days.
Only seconds will ever be shown as fractional on output.
</para>
<para>
<xref linkend="datatype-interval-input-examples"> shows some examples
- of valid <type>interval</> input.
+ of valid <type>interval</type> input.
</para>
<table id="datatype-interval-input-examples">
@@ -2724,11 +2724,11 @@ P <optional> <replaceable>years</>-<replaceable>months</>-<replaceable>days</> <
</row>
<row>
<entry>P1Y2M3DT4H5M6S</entry>
- <entry>ISO 8601 <quote>format with designators</>: same meaning as above</entry>
+ <entry>ISO 8601 <quote>format with designators</quote>: same meaning as above</entry>
</row>
<row>
<entry>P0001-02-03T04:05:06</entry>
- <entry>ISO 8601 <quote>alternative format</>: same meaning as above</entry>
+ <entry>ISO 8601 <quote>alternative format</quote>: same meaning as above</entry>
</row>
</tbody>
</tgroup>
@@ -2747,16 +2747,16 @@ P <optional> <replaceable>years</>-<replaceable>months</>-<replaceable>days</> <
<para>
The output format of the interval type can be set to one of the
- four styles <literal>sql_standard</>, <literal>postgres</>,
- <literal>postgres_verbose</>, or <literal>iso_8601</>,
+ four styles <literal>sql_standard</literal>, <literal>postgres</literal>,
+ <literal>postgres_verbose</literal>, or <literal>iso_8601</literal>,
using the command <literal>SET intervalstyle</literal>.
- The default is the <literal>postgres</> format.
+ The default is the <literal>postgres</literal> format.
<xref linkend="interval-style-output-table"> shows examples of each
output style.
</para>
<para>
- The <literal>sql_standard</> style produces output that conforms to
+ The <literal>sql_standard</literal> style produces output that conforms to
the SQL standard's specification for interval literal strings, if
the interval value meets the standard's restrictions (either year-month
only or day-time only, with no mixing of positive
@@ -2766,20 +2766,20 @@ P <optional> <replaceable>years</>-<replaceable>months</>-<replaceable>days</> <
</para>
<para>
- The output of the <literal>postgres</> style matches the output of
- <productname>PostgreSQL</> releases prior to 8.4 when the
- <xref linkend="guc-datestyle"> parameter was set to <literal>ISO</>.
+ The output of the <literal>postgres</literal> style matches the output of
+ <productname>PostgreSQL</productname> releases prior to 8.4 when the
+ <xref linkend="guc-datestyle"> parameter was set to <literal>ISO</literal>.
</para>
<para>
- The output of the <literal>postgres_verbose</> style matches the output of
- <productname>PostgreSQL</> releases prior to 8.4 when the
- <varname>DateStyle</> parameter was set to non-<literal>ISO</> output.
+ The output of the <literal>postgres_verbose</literal> style matches the output of
+ <productname>PostgreSQL</productname> releases prior to 8.4 when the
+ <varname>DateStyle</varname> parameter was set to non-<literal>ISO</literal> output.
</para>
<para>
- The output of the <literal>iso_8601</> style matches the <quote>format
- with designators</> described in section 4.4.3.2 of the
+ The output of the <literal>iso_8601</literal> style matches the <quote>format
+ with designators</quote> described in section 4.4.3.2 of the
ISO 8601 standard.
</para>
@@ -2796,25 +2796,25 @@ P <optional> <replaceable>years</>-<replaceable>months</>-<replaceable>days</> <
</thead>
<tbody>
<row>
- <entry><literal>sql_standard</></entry>
+ <entry><literal>sql_standard</literal></entry>
<entry>1-2</entry>
<entry>3 4:05:06</entry>
<entry>-1-2 +3 -4:05:06</entry>
</row>
<row>
- <entry><literal>postgres</></entry>
+ <entry><literal>postgres</literal></entry>
<entry>1 year 2 mons</entry>
<entry>3 days 04:05:06</entry>
<entry>-1 year -2 mons +3 days -04:05:06</entry>
</row>
<row>
- <entry><literal>postgres_verbose</></entry>
+ <entry><literal>postgres_verbose</literal></entry>
<entry>@ 1 year 2 mons</entry>
<entry>@ 3 days 4 hours 5 mins 6 secs</entry>
<entry>@ 1 year 2 mons -3 days 4 hours 5 mins 6 secs ago</entry>
</row>
<row>
- <entry><literal>iso_8601</></entry>
+ <entry><literal>iso_8601</literal></entry>
<entry>P1Y2M</entry>
<entry>P3DT4H5M6S</entry>
<entry>P-1Y-2M3DT-4H-5M-6S</entry>
@@ -3178,7 +3178,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
<replaceable>x</replaceable> , <replaceable>y</replaceable>
</synopsis>
- where <replaceable>x</> and <replaceable>y</> are the respective
+ where <replaceable>x</replaceable> and <replaceable>y</replaceable> are the respective
coordinates, as floating-point numbers.
</para>
@@ -3196,8 +3196,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
<para>
Lines are represented by the linear
- equation <replaceable>A</>x + <replaceable>B</>y + <replaceable>C</> = 0,
- where <replaceable>A</> and <replaceable>B</> are not both zero. Values
+ equation <replaceable>A</replaceable>x + <replaceable>B</replaceable>y + <replaceable>C</replaceable> = 0,
+ where <replaceable>A</replaceable> and <replaceable>B</replaceable> are not both zero. Values
of type <type>line</type> are input and output in the following form:
<synopsis>
{ <replaceable>A</replaceable>, <replaceable>B</replaceable>, <replaceable>C</replaceable> }
@@ -3324,8 +3324,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
</synopsis>
where the points are the end points of the line segments
- comprising the path. Square brackets (<literal>[]</>) indicate
- an open path, while parentheses (<literal>()</>) indicate a
+ comprising the path. Square brackets (<literal>[]</literal>) indicate
+ an open path, while parentheses (<literal>()</literal>) indicate a
closed path. When the outermost parentheses are omitted, as
in the third through fifth syntaxes, a closed path is assumed.
</para>
@@ -3388,7 +3388,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
</synopsis>
where
- <literal>(<replaceable>x</replaceable>,<replaceable>y</replaceable>)</>
+ <literal>(<replaceable>x</replaceable>,<replaceable>y</replaceable>)</literal>
is the center point and <replaceable>r</replaceable> is the radius of the
circle.
</para>
@@ -3409,7 +3409,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
</indexterm>
<para>
- <productname>PostgreSQL</> offers data types to store IPv4, IPv6, and MAC
+ <productname>PostgreSQL</productname> offers data types to store IPv4, IPv6, and MAC
addresses, as shown in <xref linkend="datatype-net-types-table">. It
is better to use these types instead of plain text types to store
network addresses, because
@@ -3503,7 +3503,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
</sect2>
<sect2 id="datatype-cidr">
- <title><type>cidr</></title>
+ <title><type>cidr</type></title>
<indexterm>
<primary>cidr</primary>
@@ -3514,11 +3514,11 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
Input and output formats follow Classless Internet Domain Routing
conventions.
The format for specifying networks is <replaceable
- class="parameter">address/y</> where <replaceable
- class="parameter">address</> is the network represented as an
+ class="parameter">address/y</replaceable> where <replaceable
+ class="parameter">address</replaceable> is the network represented as an
IPv4 or IPv6 address, and <replaceable
- class="parameter">y</> is the number of bits in the netmask. If
- <replaceable class="parameter">y</> is omitted, it is calculated
+ class="parameter">y</replaceable> is the number of bits in the netmask. If
+ <replaceable class="parameter">y</replaceable> is omitted, it is calculated
using assumptions from the older classful network numbering system, except
it will be at least large enough to include all of the octets
written in the input. It is an error to specify a network address
@@ -3530,7 +3530,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
</para>
<table id="datatype-net-cidr-table">
- <title><type>cidr</> Type Input Examples</title>
+ <title><type>cidr</type> Type Input Examples</title>
<tgroup cols="3">
<thead>
<row>
@@ -3639,8 +3639,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
<tip>
<para>
If you do not like the output format for <type>inet</type> or
- <type>cidr</type> values, try the functions <function>host</>,
- <function>text</>, and <function>abbrev</>.
+ <type>cidr</type> values, try the functions <function>host</function>,
+ <function>text</function>, and <function>abbrev</function>.
</para>
</tip>
</sect2>
@@ -3658,24 +3658,24 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
</indexterm>
<para>
- The <type>macaddr</> type stores MAC addresses, known for example
+ The <type>macaddr</type> type stores MAC addresses, known for example
from Ethernet card hardware addresses (although MAC addresses are
used for other purposes as well). Input is accepted in the
following formats:
<simplelist>
- <member><literal>'08:00:2b:01:02:03'</></member>
- <member><literal>'08-00-2b-01-02-03'</></member>
- <member><literal>'08002b:010203'</></member>
- <member><literal>'08002b-010203'</></member>
- <member><literal>'0800.2b01.0203'</></member>
- <member><literal>'0800-2b01-0203'</></member>
- <member><literal>'08002b010203'</></member>
+ <member><literal>'08:00:2b:01:02:03'</literal></member>
+ <member><literal>'08-00-2b-01-02-03'</literal></member>
+ <member><literal>'08002b:010203'</literal></member>
+ <member><literal>'08002b-010203'</literal></member>
+ <member><literal>'0800.2b01.0203'</literal></member>
+ <member><literal>'0800-2b01-0203'</literal></member>
+ <member><literal>'08002b010203'</literal></member>
</simplelist>
These examples would all specify the same address. Upper and
lower case is accepted for the digits
- <literal>a</> through <literal>f</>. Output is always in the
+ <literal>a</literal> through <literal>f</literal>. Output is always in the
first of the forms shown.
</para>
@@ -3708,7 +3708,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
</indexterm>
<para>
- The <type>macaddr8</> type stores MAC addresses in EUI-64
+ The <type>macaddr8</type> type stores MAC addresses in EUI-64
format, known for example from Ethernet card hardware addresses
(although MAC addresses are used for other purposes as well).
This type can accept both 6 and 8 byte length MAC addresses
@@ -3718,31 +3718,31 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
Note that IPv6 uses a modified EUI-64 format where the 7th bit
should be set to one after the conversion from EUI-48. The
- function <function>macaddr8_set7bit</> is provided to make this
+ function <function>macaddr8_set7bit</function> is provided to make this
change.
Generally speaking, any input which is comprised of pairs of hex
digits (on byte boundaries), optionally separated consistently by
- one of <literal>':'</>, <literal>'-'</> or <literal>'.'</>, is
+ one of <literal>':'</literal>, <literal>'-'</literal> or <literal>'.'</literal>, is
accepted. The number of hex digits must be either 16 (8 bytes) or
12 (6 bytes). Leading and trailing whitespace is ignored.
The following are examples of input formats that are accepted:
<simplelist>
- <member><literal>'08:00:2b:01:02:03:04:05'</></member>
- <member><literal>'08-00-2b-01-02-03-04-05'</></member>
- <member><literal>'08002b:0102030405'</></member>
- <member><literal>'08002b-0102030405'</></member>
- <member><literal>'0800.2b01.0203.0405'</></member>
- <member><literal>'0800-2b01-0203-0405'</></member>
- <member><literal>'08002b01:02030405'</></member>
- <member><literal>'08002b0102030405'</></member>
+ <member><literal>'08:00:2b:01:02:03:04:05'</literal></member>
+ <member><literal>'08-00-2b-01-02-03-04-05'</literal></member>
+ <member><literal>'08002b:0102030405'</literal></member>
+ <member><literal>'08002b-0102030405'</literal></member>
+ <member><literal>'0800.2b01.0203.0405'</literal></member>
+ <member><literal>'0800-2b01-0203-0405'</literal></member>
+ <member><literal>'08002b01:02030405'</literal></member>
+ <member><literal>'08002b0102030405'</literal></member>
</simplelist>
These examples would all specify the same address. Upper and
lower case is accepted for the digits
- <literal>a</> through <literal>f</>. Output is always in the
+ <literal>a</literal> through <literal>f</literal>. Output is always in the
first of the forms shown.
The last six input formats that are mentioned above are not part
@@ -3750,7 +3750,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
To convert a traditional 48 bit MAC address in EUI-48 format to
modified EUI-64 format to be included as the host portion of an
- IPv6 address, use <function>macaddr8_set7bit</> as shown:
+ IPv6 address, use <function>macaddr8_set7bit</function> as shown:
<programlisting>
SELECT macaddr8_set7bit('08:00:2b:01:02:03');
@@ -3798,12 +3798,12 @@ SELECT macaddr8_set7bit('08:00:2b:01:02:03');
<note>
<para>
If one explicitly casts a bit-string value to
- <type>bit(<replaceable>n</>)</type>, it will be truncated or
- zero-padded on the right to be exactly <replaceable>n</> bits,
+ <type>bit(<replaceable>n</replaceable>)</type>, it will be truncated or
+ zero-padded on the right to be exactly <replaceable>n</replaceable> bits,
without raising an error. Similarly,
if one explicitly casts a bit-string value to
- <type>bit varying(<replaceable>n</>)</type>, it will be truncated
- on the right if it is more than <replaceable>n</> bits.
+ <type>bit varying(<replaceable>n</replaceable>)</type>, it will be truncated
+ on the right if it is more than <replaceable>n</replaceable> bits.
</para>
</note>
@@ -3860,8 +3860,8 @@ SELECT * FROM test;
<para>
<productname>PostgreSQL</productname> provides two data types that
are designed to support full text search, which is the activity of
- searching through a collection of natural-language <firstterm>documents</>
- to locate those that best match a <firstterm>query</>.
+ searching through a collection of natural-language <firstterm>documents</firstterm>
+ to locate those that best match a <firstterm>query</firstterm>.
The <type>tsvector</type> type represents a document in a form optimized
for text search; the <type>tsquery</type> type similarly represents
a text query.
@@ -3879,8 +3879,8 @@ SELECT * FROM test;
<para>
A <type>tsvector</type> value is a sorted list of distinct
- <firstterm>lexemes</>, which are words that have been
- <firstterm>normalized</> to merge different variants of the same word
+ <firstterm>lexemes</firstterm>, which are words that have been
+ <firstterm>normalized</firstterm> to merge different variants of the same word
(see <xref linkend="textsearch"> for details). Sorting and
duplicate-elimination are done automatically during input, as shown in
this example:
@@ -3913,7 +3913,7 @@ SELECT $$the lexeme 'Joe''s' contains a quote$$::tsvector;
'Joe''s' 'a' 'contains' 'lexeme' 'quote' 'the'
</programlisting>
- Optionally, integer <firstterm>positions</>
+ Optionally, integer <firstterm>positions</firstterm>
can be attached to lexemes:
<programlisting>
@@ -3932,7 +3932,7 @@ SELECT 'a:1 fat:2 cat:3 sat:4 on:5 a:6 mat:7 and:8 ate:9 a:10 fat:11 rat:12'::ts
<para>
Lexemes that have positions can further be labeled with a
- <firstterm>weight</>, which can be <literal>A</literal>,
+ <firstterm>weight</firstterm>, which can be <literal>A</literal>,
<literal>B</literal>, <literal>C</literal>, or <literal>D</literal>.
<literal>D</literal> is the default and hence is not shown on output:
@@ -3965,7 +3965,7 @@ SELECT 'The Fat Rats'::tsvector;
For most English-text-searching applications the above words would
be considered non-normalized, but <type>tsvector</type> doesn't care.
Raw document text should usually be passed through
- <function>to_tsvector</> to normalize the words appropriately
+ <function>to_tsvector</function> to normalize the words appropriately
for searching:
<programlisting>
@@ -3991,17 +3991,17 @@ SELECT to_tsvector('english', 'The Fat Rats');
A <type>tsquery</type> value stores lexemes that are to be
searched for, and can combine them using the Boolean operators
<literal>&amp;</literal> (AND), <literal>|</literal> (OR), and
- <literal>!</> (NOT), as well as the phrase search operator
- <literal>&lt;-&gt;</> (FOLLOWED BY). There is also a variant
- <literal>&lt;<replaceable>N</>&gt;</literal> of the FOLLOWED BY
- operator, where <replaceable>N</> is an integer constant that
+ <literal>!</literal> (NOT), as well as the phrase search operator
+ <literal>&lt;-&gt;</literal> (FOLLOWED BY). There is also a variant
+ <literal>&lt;<replaceable>N</replaceable>&gt;</literal> of the FOLLOWED BY
+ operator, where <replaceable>N</replaceable> is an integer constant that
specifies the distance between the two lexemes being searched
- for. <literal>&lt;-&gt;</> is equivalent to <literal>&lt;1&gt;</>.
+ for. <literal>&lt;-&gt;</literal> is equivalent to <literal>&lt;1&gt;</literal>.
</para>
<para>
Parentheses can be used to enforce grouping of these operators.
- In the absence of parentheses, <literal>!</> (NOT) binds most tightly,
+ In the absence of parentheses, <literal>!</literal> (NOT) binds most tightly,
<literal>&lt;-&gt;</literal> (FOLLOWED BY) next most tightly, then
<literal>&amp;</literal> (AND), with <literal>|</literal> (OR) binding
the least tightly.
@@ -4031,7 +4031,7 @@ SELECT 'fat &amp; rat &amp; ! cat'::tsquery;
<para>
Optionally, lexemes in a <type>tsquery</type> can be labeled with
one or more weight letters, which restricts them to match only
- <type>tsvector</> lexemes with one of those weights:
+ <type>tsvector</type> lexemes with one of those weights:
<programlisting>
SELECT 'fat:ab &amp; cat'::tsquery;
@@ -4042,7 +4042,7 @@ SELECT 'fat:ab &amp; cat'::tsquery;
</para>
<para>
- Also, lexemes in a <type>tsquery</type> can be labeled with <literal>*</>
+ Also, lexemes in a <type>tsquery</type> can be labeled with <literal>*</literal>
to specify prefix matching:
<programlisting>
SELECT 'super:*'::tsquery;
@@ -4050,15 +4050,15 @@ SELECT 'super:*'::tsquery;
-----------
'super':*
</programlisting>
- This query will match any word in a <type>tsvector</> that begins
- with <quote>super</>.
+ This query will match any word in a <type>tsvector</type> that begins
+ with <quote>super</quote>.
</para>
<para>
Quoting rules for lexemes are the same as described previously for
- lexemes in <type>tsvector</>; and, as with <type>tsvector</>,
+ lexemes in <type>tsvector</type>; and, as with <type>tsvector</type>,
any required normalization of words must be done before converting
- to the <type>tsquery</> type. The <function>to_tsquery</>
+ to the <type>tsquery</type> type. The <function>to_tsquery</function>
function is convenient for performing such normalization:
<programlisting>
@@ -4068,7 +4068,7 @@ SELECT to_tsquery('Fat:ab &amp; Cats');
'fat':AB &amp; 'cat'
</programlisting>
- Note that <function>to_tsquery</> will process prefixes in the same way
+ Note that <function>to_tsquery</function> will process prefixes in the same way
as other words, which means this comparison returns true:
<programlisting>
@@ -4077,14 +4077,14 @@ SELECT to_tsvector( 'postgraduate' ) @@ to_tsquery( 'postgres:*' );
----------
t
</programlisting>
- because <literal>postgres</> gets stemmed to <literal>postgr</>:
+ because <literal>postgres</literal> gets stemmed to <literal>postgr</literal>:
<programlisting>
SELECT to_tsvector( 'postgraduate' ), to_tsquery( 'postgres:*' );
to_tsvector | to_tsquery
---------------+------------
'postgradu':1 | 'postgr':*
</programlisting>
- which will match the stemmed form of <literal>postgraduate</>.
+ which will match the stemmed form of <literal>postgraduate</literal>.
</para>
</sect2>
@@ -4150,7 +4150,7 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11
</sect1>
<sect1 id="datatype-xml">
- <title><acronym>XML</> Type</title>
+ <title><acronym>XML</acronym> Type</title>
<indexterm zone="datatype-xml">
<primary>XML</primary>
@@ -4163,7 +4163,7 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11
functions to perform type-safe operations on it; see <xref
linkend="functions-xml">. Use of this data type requires the
installation to have been built with <command>configure
- --with-libxml</>.
+ --with-libxml</command>.
</para>
<para>
@@ -4311,7 +4311,7 @@ SET xmloption TO { DOCUMENT | CONTENT };
<para>
Some XML-related functions may not work at all on non-ASCII data
when the server encoding is not UTF-8. This is known to be an
- issue for <function>xmltable()</> and <function>xpath()</> in particular.
+ issue for <function>xmltable()</function> and <function>xpath()</function> in particular.
</para>
</caution>
</sect2>
@@ -4421,17 +4421,17 @@ SET xmloption TO { DOCUMENT | CONTENT };
system tables. OIDs are not added to user-created tables, unless
<literal>WITH OIDS</literal> is specified when the table is
created, or the <xref linkend="guc-default-with-oids">
- configuration variable is enabled. Type <type>oid</> represents
+ configuration variable is enabled. Type <type>oid</type> represents
an object identifier. There are also several alias types for
- <type>oid</>: <type>regproc</>, <type>regprocedure</>,
- <type>regoper</>, <type>regoperator</>, <type>regclass</>,
- <type>regtype</>, <type>regrole</>, <type>regnamespace</>,
- <type>regconfig</>, and <type>regdictionary</>.
+ <type>oid</type>: <type>regproc</type>, <type>regprocedure</type>,
+ <type>regoper</type>, <type>regoperator</type>, <type>regclass</type>,
+ <type>regtype</type>, <type>regrole</type>, <type>regnamespace</type>,
+ <type>regconfig</type>, and <type>regdictionary</type>.
<xref linkend="datatype-oid-table"> shows an overview.
</para>
<para>
- The <type>oid</> type is currently implemented as an unsigned
+ The <type>oid</type> type is currently implemented as an unsigned
four-byte integer. Therefore, it is not large enough to provide
database-wide uniqueness in large databases, or even in large
individual tables. So, using a user-created table's OID column as
@@ -4440,7 +4440,7 @@ SET xmloption TO { DOCUMENT | CONTENT };
</para>
<para>
- The <type>oid</> type itself has few operations beyond comparison.
+ The <type>oid</type> type itself has few operations beyond comparison.
It can be cast to integer, however, and then manipulated using the
standard integer operators. (Beware of possible
signed-versus-unsigned confusion if you do this.)
@@ -4450,10 +4450,10 @@ SET xmloption TO { DOCUMENT | CONTENT };
The OID alias types have no operations of their own except
for specialized input and output routines. These routines are able
to accept and display symbolic names for system objects, rather than
- the raw numeric value that type <type>oid</> would use. The alias
+ the raw numeric value that type <type>oid</type> would use. The alias
types allow simplified lookup of OID values for objects. For example,
- to examine the <structname>pg_attribute</> rows related to a table
- <literal>mytable</>, one could write:
+ to examine the <structname>pg_attribute</structname> rows related to a table
+ <literal>mytable</literal>, one could write:
<programlisting>
SELECT * FROM pg_attribute WHERE attrelid = 'mytable'::regclass;
</programlisting>
@@ -4465,11 +4465,11 @@ SELECT * FROM pg_attribute
While that doesn't look all that bad by itself, it's still oversimplified.
A far more complicated sub-select would be needed to
select the right OID if there are multiple tables named
- <literal>mytable</> in different schemas.
- The <type>regclass</> input converter handles the table lookup according
- to the schema path setting, and so it does the <quote>right thing</>
+ <literal>mytable</literal> in different schemas.
+ The <type>regclass</type> input converter handles the table lookup according
+ to the schema path setting, and so it does the <quote>right thing</quote>
automatically. Similarly, casting a table's OID to
- <type>regclass</> is handy for symbolic display of a numeric OID.
+ <type>regclass</type> is handy for symbolic display of a numeric OID.
</para>
<table id="datatype-oid-table">
@@ -4487,80 +4487,80 @@ SELECT * FROM pg_attribute
<tbody>
<row>
- <entry><type>oid</></entry>
+ <entry><type>oid</type></entry>
<entry>any</entry>
<entry>numeric object identifier</entry>
- <entry><literal>564182</></entry>
+ <entry><literal>564182</literal></entry>
</row>
<row>
- <entry><type>regproc</></entry>
- <entry><structname>pg_proc</></entry>
+ <entry><type>regproc</type></entry>
+ <entry><structname>pg_proc</structname></entry>
<entry>function name</entry>
- <entry><literal>sum</></entry>
+ <entry><literal>sum</literal></entry>
</row>
<row>
- <entry><type>regprocedure</></entry>
- <entry><structname>pg_proc</></entry>
+ <entry><type>regprocedure</type></entry>
+ <entry><structname>pg_proc</structname></entry>
<entry>function with argument types</entry>
- <entry><literal>sum(int4)</></entry>
+ <entry><literal>sum(int4)</literal></entry>
</row>
<row>
- <entry><type>regoper</></entry>
- <entry><structname>pg_operator</></entry>
+ <entry><type>regoper</type></entry>
+ <entry><structname>pg_operator</structname></entry>
<entry>operator name</entry>
- <entry><literal>+</></entry>
+ <entry><literal>+</literal></entry>
</row>
<row>
- <entry><type>regoperator</></entry>
- <entry><structname>pg_operator</></entry>
+ <entry><type>regoperator</type></entry>
+ <entry><structname>pg_operator</structname></entry>
<entry>operator with argument types</entry>
- <entry><literal>*(integer,integer)</> or <literal>-(NONE,integer)</></entry>
+ <entry><literal>*(integer,integer)</literal> or <literal>-(NONE,integer)</literal></entry>
</row>
<row>
- <entry><type>regclass</></entry>
- <entry><structname>pg_class</></entry>
+ <entry><type>regclass</type></entry>
+ <entry><structname>pg_class</structname></entry>
<entry>relation name</entry>
- <entry><literal>pg_type</></entry>
+ <entry><literal>pg_type</literal></entry>
</row>
<row>
- <entry><type>regtype</></entry>
- <entry><structname>pg_type</></entry>
+ <entry><type>regtype</type></entry>
+ <entry><structname>pg_type</structname></entry>
<entry>data type name</entry>
- <entry><literal>integer</></entry>
+ <entry><literal>integer</literal></entry>
</row>
<row>
- <entry><type>regrole</></entry>
- <entry><structname>pg_authid</></entry>
+ <entry><type>regrole</type></entry>
+ <entry><structname>pg_authid</structname></entry>
<entry>role name</entry>
- <entry><literal>smithee</></entry>
+ <entry><literal>smithee</literal></entry>
</row>
<row>
- <entry><type>regnamespace</></entry>
- <entry><structname>pg_namespace</></entry>
+ <entry><type>regnamespace</type></entry>
+ <entry><structname>pg_namespace</structname></entry>
<entry>namespace name</entry>
- <entry><literal>pg_catalog</></entry>
+ <entry><literal>pg_catalog</literal></entry>
</row>
<row>
- <entry><type>regconfig</></entry>
- <entry><structname>pg_ts_config</></entry>
+ <entry><type>regconfig</type></entry>
+ <entry><structname>pg_ts_config</structname></entry>
<entry>text search configuration</entry>
- <entry><literal>english</></entry>
+ <entry><literal>english</literal></entry>
</row>
<row>
- <entry><type>regdictionary</></entry>
- <entry><structname>pg_ts_dict</></entry>
+ <entry><type>regdictionary</type></entry>
+ <entry><structname>pg_ts_dict</structname></entry>
<entry>text search dictionary</entry>
- <entry><literal>simple</></entry>
+ <entry><literal>simple</literal></entry>
</row>
</tbody>
</tgroup>
@@ -4571,11 +4571,11 @@ SELECT * FROM pg_attribute
schema-qualified names, and will
display schema-qualified names on output if the object would not
be found in the current search path without being qualified.
- The <type>regproc</> and <type>regoper</> alias types will only
+ The <type>regproc</type> and <type>regoper</type> alias types will only
accept input names that are unique (not overloaded), so they are
- of limited use; for most uses <type>regprocedure</> or
- <type>regoperator</> are more appropriate. For <type>regoperator</>,
- unary operators are identified by writing <literal>NONE</> for the unused
+ of limited use; for most uses <type>regprocedure</type> or
+ <type>regoperator</type> are more appropriate. For <type>regoperator</type>,
+ unary operators are identified by writing <literal>NONE</literal> for the unused
operand.
</para>
@@ -4585,12 +4585,12 @@ SELECT * FROM pg_attribute
constant of one of these types appears in a stored expression
(such as a column default expression or view), it creates a dependency
on the referenced object. For example, if a column has a default
- expression <literal>nextval('my_seq'::regclass)</>,
+ expression <literal>nextval('my_seq'::regclass)</literal>,
<productname>PostgreSQL</productname>
understands that the default expression depends on the sequence
- <literal>my_seq</>; the system will not let the sequence be dropped
+ <literal>my_seq</literal>; the system will not let the sequence be dropped
without first removing the default expression.
- <type>regrole</> is the only exception for the property. Constants of this
+ <type>regrole</type> is the only exception for the property. Constants of this
type are not allowed in such expressions.
</para>
@@ -4603,21 +4603,21 @@ SELECT * FROM pg_attribute
</note>
<para>
- Another identifier type used by the system is <type>xid</>, or transaction
- (abbreviated <abbrev>xact</>) identifier. This is the data type of the system columns
- <structfield>xmin</> and <structfield>xmax</>. Transaction identifiers are 32-bit quantities.
+ Another identifier type used by the system is <type>xid</type>, or transaction
+ (abbreviated <abbrev>xact</abbrev>) identifier. This is the data type of the system columns
+ <structfield>xmin</structfield> and <structfield>xmax</structfield>. Transaction identifiers are 32-bit quantities.
</para>
<para>
- A third identifier type used by the system is <type>cid</>, or
+ A third identifier type used by the system is <type>cid</type>, or
command identifier. This is the data type of the system columns
- <structfield>cmin</> and <structfield>cmax</>. Command identifiers are also 32-bit quantities.
+ <structfield>cmin</structfield> and <structfield>cmax</structfield>. Command identifiers are also 32-bit quantities.
</para>
<para>
- A final identifier type used by the system is <type>tid</>, or tuple
+ A final identifier type used by the system is <type>tid</type>, or tuple
identifier (row identifier). This is the data type of the system column
- <structfield>ctid</>. A tuple ID is a pair
+ <structfield>ctid</structfield>. A tuple ID is a pair
(block number, tuple index within block) that identifies the
physical location of the row within its table.
</para>
@@ -4646,7 +4646,7 @@ SELECT * FROM pg_attribute
Internally, an LSN is a 64-bit integer, representing a byte position in
the write-ahead log stream. It is printed as two hexadecimal numbers of
up to 8 digits each, separated by a slash; for example,
- <literal>16/B374D848</>. The <type>pg_lsn</type> type supports the
+ <literal>16/B374D848</literal>. The <type>pg_lsn</type> type supports the
standard comparison operators, like <literal>=</literal> and
<literal>&gt;</literal>. Two LSNs can be subtracted using the
<literal>-</literal> operator; the result is the number of bytes separating
@@ -4736,7 +4736,7 @@ SELECT * FROM pg_attribute
<para>
The <productname>PostgreSQL</productname> type system contains a
number of special-purpose entries that are collectively called
- <firstterm>pseudo-types</>. A pseudo-type cannot be used as a
+ <firstterm>pseudo-types</firstterm>. A pseudo-type cannot be used as a
column data type, but it can be used to declare a function's
argument or result type. Each of the available pseudo-types is
useful in situations where a function's behavior does not
@@ -4758,106 +4758,106 @@ SELECT * FROM pg_attribute
<tbody>
<row>
- <entry><type>any</></entry>
+ <entry><type>any</type></entry>
<entry>Indicates that a function accepts any input data type.</entry>
</row>
<row>
- <entry><type>anyelement</></entry>
+ <entry><type>anyelement</type></entry>
<entry>Indicates that a function accepts any data type
(see <xref linkend="extend-types-polymorphic">).</entry>
</row>
<row>
- <entry><type>anyarray</></entry>
+ <entry><type>anyarray</type></entry>
<entry>Indicates that a function accepts any array data type
(see <xref linkend="extend-types-polymorphic">).</entry>
</row>
<row>
- <entry><type>anynonarray</></entry>
+ <entry><type>anynonarray</type></entry>
<entry>Indicates that a function accepts any non-array data type
(see <xref linkend="extend-types-polymorphic">).</entry>
</row>
<row>
- <entry><type>anyenum</></entry>
+ <entry><type>anyenum</type></entry>
<entry>Indicates that a function accepts any enum data type
(see <xref linkend="extend-types-polymorphic"> and
<xref linkend="datatype-enum">).</entry>
</row>
<row>
- <entry><type>anyrange</></entry>
+ <entry><type>anyrange</type></entry>
<entry>Indicates that a function accepts any range data type
(see <xref linkend="extend-types-polymorphic"> and
<xref linkend="rangetypes">).</entry>
</row>
<row>
- <entry><type>cstring</></entry>
+ <entry><type>cstring</type></entry>
<entry>Indicates that a function accepts or returns a null-terminated C string.</entry>
</row>
<row>
- <entry><type>internal</></entry>
+ <entry><type>internal</type></entry>
<entry>Indicates that a function accepts or returns a server-internal
data type.</entry>
</row>
<row>
- <entry><type>language_handler</></entry>
- <entry>A procedural language call handler is declared to return <type>language_handler</>.</entry>
+ <entry><type>language_handler</type></entry>
+ <entry>A procedural language call handler is declared to return <type>language_handler</type>.</entry>
</row>
<row>
- <entry><type>fdw_handler</></entry>
- <entry>A foreign-data wrapper handler is declared to return <type>fdw_handler</>.</entry>
+ <entry><type>fdw_handler</type></entry>
+ <entry>A foreign-data wrapper handler is declared to return <type>fdw_handler</type>.</entry>
</row>
<row>
- <entry><type>index_am_handler</></entry>
- <entry>An index access method handler is declared to return <type>index_am_handler</>.</entry>
+ <entry><type>index_am_handler</type></entry>
+ <entry>An index access method handler is declared to return <type>index_am_handler</type>.</entry>
</row>
<row>
- <entry><type>tsm_handler</></entry>
- <entry>A tablesample method handler is declared to return <type>tsm_handler</>.</entry>
+ <entry><type>tsm_handler</type></entry>
+ <entry>A tablesample method handler is declared to return <type>tsm_handler</type>.</entry>
</row>
<row>
- <entry><type>record</></entry>
+ <entry><type>record</type></entry>
<entry>Identifies a function taking or returning an unspecified row type.</entry>
</row>
<row>
- <entry><type>trigger</></entry>
- <entry>A trigger function is declared to return <type>trigger.</></entry>
+ <entry><type>trigger</type></entry>
+ <entry>A trigger function is declared to return <type>trigger.</type></entry>
</row>
<row>
- <entry><type>event_trigger</></entry>
- <entry>An event trigger function is declared to return <type>event_trigger.</></entry>
+ <entry><type>event_trigger</type></entry>
+ <entry>An event trigger function is declared to return <type>event_trigger.</type></entry>
</row>
<row>
- <entry><type>pg_ddl_command</></entry>
+ <entry><type>pg_ddl_command</type></entry>
<entry>Identifies a representation of DDL commands that is available to event triggers.</entry>
</row>
<row>
- <entry><type>void</></entry>
+ <entry><type>void</type></entry>
<entry>Indicates that a function returns no value.</entry>
</row>
<row>
- <entry><type>unknown</></entry>
+ <entry><type>unknown</type></entry>
<entry>Identifies a not-yet-resolved type, e.g. of an undecorated
string literal.</entry>
</row>
<row>
- <entry><type>opaque</></entry>
+ <entry><type>opaque</type></entry>
<entry>An obsolete type name that formerly served many of the above
purposes.</entry>
</row>
@@ -4876,24 +4876,24 @@ SELECT * FROM pg_attribute
Functions coded in procedural languages can use pseudo-types only as
allowed by their implementation languages. At present most procedural
languages forbid use of a pseudo-type as an argument type, and allow
- only <type>void</> and <type>record</> as a result type (plus
- <type>trigger</> or <type>event_trigger</> when the function is used
+ only <type>void</type> and <type>record</type> as a result type (plus
+ <type>trigger</type> or <type>event_trigger</type> when the function is used
as a trigger or event trigger). Some also
- support polymorphic functions using the types <type>anyelement</>,
- <type>anyarray</>, <type>anynonarray</>, <type>anyenum</>, and
- <type>anyrange</>.
+ support polymorphic functions using the types <type>anyelement</type>,
+ <type>anyarray</type>, <type>anynonarray</type>, <type>anyenum</type>, and
+ <type>anyrange</type>.
</para>
<para>
- The <type>internal</> pseudo-type is used to declare functions
+ The <type>internal</type> pseudo-type is used to declare functions
that are meant only to be called internally by the database
system, and not by direct invocation in an <acronym>SQL</acronym>
- query. If a function has at least one <type>internal</>-type
+ query. If a function has at least one <type>internal</type>-type
argument then it cannot be called from <acronym>SQL</acronym>. To
preserve the type safety of this restriction it is important to
follow this coding rule: do not create any function that is
- declared to return <type>internal</> unless it has at least one
- <type>internal</> argument.
+ declared to return <type>internal</type> unless it has at least one
+ <type>internal</type> argument.
</para>
</sect1>
diff --git a/doc/src/sgml/datetime.sgml b/doc/src/sgml/datetime.sgml
index ef9139f9e3..a533bbf8d2 100644
--- a/doc/src/sgml/datetime.sgml
+++ b/doc/src/sgml/datetime.sgml
@@ -37,18 +37,18 @@
<substeps>
<step>
<para>
- If the numeric token contains a colon (<literal>:</>), this is
+ If the numeric token contains a colon (<literal>:</literal>), this is
a time string. Include all subsequent digits and colons.
</para>
</step>
<step>
<para>
- If the numeric token contains a dash (<literal>-</>), slash
- (<literal>/</>), or two or more dots (<literal>.</>), this is
+ If the numeric token contains a dash (<literal>-</literal>), slash
+ (<literal>/</literal>), or two or more dots (<literal>.</literal>), this is
a date string which might have a text month. If a date token has
already been seen, it is instead interpreted as a time zone
- name (e.g., <literal>America/New_York</>).
+ name (e.g., <literal>America/New_York</literal>).
</para>
</step>
@@ -63,8 +63,8 @@
<step>
<para>
- If the token starts with a plus (<literal>+</>) or minus
- (<literal>-</>), then it is either a numeric time zone or a special
+ If the token starts with a plus (<literal>+</literal>) or minus
+ (<literal>-</literal>), then it is either a numeric time zone or a special
field.
</para>
</step>
@@ -114,7 +114,7 @@
and if no other date fields have been previously read, then interpret
as a <quote>concatenated date</quote> (e.g.,
<literal>19990118</literal> or <literal>990118</literal>).
- The interpretation is <literal>YYYYMMDD</> or <literal>YYMMDD</>.
+ The interpretation is <literal>YYYYMMDD</literal> or <literal>YYMMDD</literal>.
</para>
</step>
@@ -128,7 +128,7 @@
<step>
<para>
If four or six digits and a year has already been read, then
- interpret as a time (<literal>HHMM</> or <literal>HHMMSS</>).
+ interpret as a time (<literal>HHMM</literal> or <literal>HHMMSS</literal>).
</para>
</step>
@@ -143,7 +143,7 @@
<step>
<para>
Otherwise the date field ordering is assumed to follow the
- <varname>DateStyle</> setting: mm-dd-yy, dd-mm-yy, or yy-mm-dd.
+ <varname>DateStyle</varname> setting: mm-dd-yy, dd-mm-yy, or yy-mm-dd.
Throw an error if a month or day field is found to be out of range.
</para>
</step>
@@ -167,7 +167,7 @@
<tip>
<para>
Gregorian years AD 1-99 can be entered by using 4 digits with leading
- zeros (e.g., <literal>0099</> is AD 99).
+ zeros (e.g., <literal>0099</literal> is AD 99).
</para>
</tip>
</para>
@@ -317,7 +317,7 @@
<entry>Ignored</entry>
</row>
<row>
- <entry><literal>JULIAN</>, <literal>JD</>, <literal>J</></entry>
+ <entry><literal>JULIAN</literal>, <literal>JD</literal>, <literal>J</literal></entry>
<entry>Next field is Julian Date</entry>
</row>
<row>
@@ -354,23 +354,23 @@
can be altered by any database user, the possible values for it
are under the control of the database administrator &mdash; they
are in fact names of configuration files stored in
- <filename>.../share/timezonesets/</> of the installation directory.
+ <filename>.../share/timezonesets/</filename> of the installation directory.
By adding or altering files in that directory, the administrator
can set local policy for timezone abbreviations.
</para>
<para>
- <varname>timezone_abbreviations</> can be set to any file name
- found in <filename>.../share/timezonesets/</>, if the file's name
+ <varname>timezone_abbreviations</varname> can be set to any file name
+ found in <filename>.../share/timezonesets/</filename>, if the file's name
is entirely alphabetic. (The prohibition against non-alphabetic
- characters in <varname>timezone_abbreviations</> prevents reading
+ characters in <varname>timezone_abbreviations</varname> prevents reading
files outside the intended directory, as well as reading editor
backup files and other extraneous files.)
</para>
<para>
A timezone abbreviation file can contain blank lines and comments
- beginning with <literal>#</>. Non-comment lines must have one of
+ beginning with <literal>#</literal>. Non-comment lines must have one of
these formats:
<synopsis>
@@ -388,12 +388,12 @@
the equivalent offset in seconds from UTC, positive being east from
Greenwich and negative being west. For example, -18000 would be five
hours west of Greenwich, or North American east coast standard time.
- <literal>D</> indicates that the zone name represents local
+ <literal>D</literal> indicates that the zone name represents local
daylight-savings time rather than standard time.
</para>
<para>
- Alternatively, a <replaceable>time_zone_name</> can be given, referencing
+ Alternatively, a <replaceable>time_zone_name</replaceable> can be given, referencing
a zone name defined in the IANA timezone database. The zone's definition
is consulted to see whether the abbreviation is or has been in use in
that zone, and if so, the appropriate meaning is used &mdash; that is,
@@ -417,34 +417,34 @@
</tip>
<para>
- The <literal>@INCLUDE</> syntax allows inclusion of another file in the
- <filename>.../share/timezonesets/</> directory. Inclusion can be nested,
+ The <literal>@INCLUDE</literal> syntax allows inclusion of another file in the
+ <filename>.../share/timezonesets/</filename> directory. Inclusion can be nested,
to a limited depth.
</para>
<para>
- The <literal>@OVERRIDE</> syntax indicates that subsequent entries in the
+ The <literal>@OVERRIDE</literal> syntax indicates that subsequent entries in the
file can override previous entries (typically, entries obtained from
included files). Without this, conflicting definitions of the same
timezone abbreviation are considered an error.
</para>
<para>
- In an unmodified installation, the file <filename>Default</> contains
+ In an unmodified installation, the file <filename>Default</filename> contains
all the non-conflicting time zone abbreviations for most of the world.
- Additional files <filename>Australia</> and <filename>India</> are
+ Additional files <filename>Australia</filename> and <filename>India</filename> are
provided for those regions: these files first include the
- <literal>Default</> file and then add or modify abbreviations as needed.
+ <literal>Default</literal> file and then add or modify abbreviations as needed.
</para>
<para>
For reference purposes, a standard installation also contains files
- <filename>Africa.txt</>, <filename>America.txt</>, etc, containing
+ <filename>Africa.txt</filename>, <filename>America.txt</filename>, etc, containing
information about every time zone abbreviation known to be in use
according to the IANA timezone database. The zone name
definitions found in these files can be copied and pasted into a custom
configuration file as needed. Note that these files cannot be directly
- referenced as <varname>timezone_abbreviations</> settings, because of
+ referenced as <varname>timezone_abbreviations</varname> settings, because of
the dot embedded in their names.
</para>
@@ -460,16 +460,16 @@
<para>
Time zone abbreviations defined in the configuration file override
non-timezone meanings built into <productname>PostgreSQL</productname>.
- For example, the <filename>Australia</> configuration file defines
- <literal>SAT</> (for South Australian Standard Time). When this
- file is active, <literal>SAT</> will not be recognized as an abbreviation
+ For example, the <filename>Australia</filename> configuration file defines
+ <literal>SAT</literal> (for South Australian Standard Time). When this
+ file is active, <literal>SAT</literal> will not be recognized as an abbreviation
for Saturday.
</para>
</caution>
<caution>
<para>
- If you modify files in <filename>.../share/timezonesets/</>,
+ If you modify files in <filename>.../share/timezonesets/</filename>,
it is up to you to make backups &mdash; a normal database dump
will not include this directory.
</para>
@@ -492,10 +492,10 @@
<quote>datetime literal</quote>, the <quote>datetime
values</quote> are constrained by the natural rules for dates and
times according to the Gregorian calendar</quote>.
- <productname>PostgreSQL</> follows the SQL
+ <productname>PostgreSQL</productname> follows the SQL
standard's lead by counting dates exclusively in the Gregorian
calendar, even for years before that calendar was in use.
- This rule is known as the <firstterm>proleptic Gregorian calendar</>.
+ This rule is known as the <firstterm>proleptic Gregorian calendar</firstterm>.
</para>
<para>
@@ -569,7 +569,7 @@ $ <userinput>cal 9 1752</userinput>
dominions, not other places.
Since it would be difficult and confusing to try to track the actual
calendars that were in use in various places at various times,
- <productname>PostgreSQL</> does not try, but rather follows the Gregorian
+ <productname>PostgreSQL</productname> does not try, but rather follows the Gregorian
calendar rules for all dates, even though this method is not historically
accurate.
</para>
@@ -597,7 +597,7 @@ $ <userinput>cal 9 1752</userinput>
and probably takes its name from Scaliger's father,
the Italian scholar Julius Caesar Scaliger (1484-1558).
In the Julian Date system, each day has a sequential number, starting
- from JD 0 (which is sometimes called <emphasis>the</> Julian Date).
+ from JD 0 (which is sometimes called <emphasis>the</emphasis> Julian Date).
JD 0 corresponds to 1 January 4713 BC in the Julian calendar, or
24 November 4714 BC in the Gregorian calendar. Julian Date counting
is most often used by astronomers for labeling their nightly observations,
@@ -607,10 +607,10 @@ $ <userinput>cal 9 1752</userinput>
</para>
<para>
- Although <productname>PostgreSQL</> supports Julian Date notation for
+ Although <productname>PostgreSQL</productname> supports Julian Date notation for
input and output of dates (and also uses Julian dates for some internal
datetime calculations), it does not observe the nicety of having dates
- run from noon to noon. <productname>PostgreSQL</> treats a Julian Date
+ run from noon to noon. <productname>PostgreSQL</productname> treats a Julian Date
as running from midnight to midnight.
</para>
diff --git a/doc/src/sgml/dblink.sgml b/doc/src/sgml/dblink.sgml
index f19c6b19f5..1f17d3ad2d 100644
--- a/doc/src/sgml/dblink.sgml
+++ b/doc/src/sgml/dblink.sgml
@@ -8,8 +8,8 @@
</indexterm>
<para>
- <filename>dblink</> is a module that supports connections to
- other <productname>PostgreSQL</> databases from within a database
+ <filename>dblink</filename> is a module that supports connections to
+ other <productname>PostgreSQL</productname> databases from within a database
session.
</para>
@@ -44,9 +44,9 @@ dblink_connect(text connname, text connstr) returns text
<title>Description</title>
<para>
- <function>dblink_connect()</> establishes a connection to a remote
- <productname>PostgreSQL</> database. The server and database to
- be contacted are identified through a standard <application>libpq</>
+ <function>dblink_connect()</function> establishes a connection to a remote
+ <productname>PostgreSQL</productname> database. The server and database to
+ be contacted are identified through a standard <application>libpq</application>
connection string. Optionally, a name can be assigned to the
connection. Multiple named connections can be open at once, but
only one unnamed connection is permitted at a time. The connection
@@ -81,9 +81,9 @@ dblink_connect(text connname, text connstr) returns text
<varlistentry>
<term><parameter>connstr</parameter></term>
<listitem>
- <para><application>libpq</>-style connection info string, for example
+ <para><application>libpq</application>-style connection info string, for example
<literal>hostaddr=127.0.0.1 port=5432 dbname=mydb user=postgres
- password=mypasswd</>.
+ password=mypasswd</literal>.
For details see <xref linkend="libpq-connstring">.
Alternatively, the name of a foreign server.
</para>
@@ -96,7 +96,7 @@ dblink_connect(text connname, text connstr) returns text
<title>Return Value</title>
<para>
- Returns status, which is always <literal>OK</> (since any error
+ Returns status, which is always <literal>OK</literal> (since any error
causes the function to throw an error instead of returning).
</para>
</refsect1>
@@ -105,15 +105,15 @@ dblink_connect(text connname, text connstr) returns text
<title>Notes</title>
<para>
- Only superusers may use <function>dblink_connect</> to create
+ Only superusers may use <function>dblink_connect</function> to create
non-password-authenticated connections. If non-superusers need this
- capability, use <function>dblink_connect_u</> instead.
+ capability, use <function>dblink_connect_u</function> instead.
</para>
<para>
It is unwise to choose connection names that contain equal signs,
as this opens a risk of confusion with connection info strings
- in other <filename>dblink</> functions.
+ in other <filename>dblink</filename> functions.
</para>
</refsect1>
@@ -208,8 +208,8 @@ dblink_connect_u(text connname, text connstr) returns text
<title>Description</title>
<para>
- <function>dblink_connect_u()</> is identical to
- <function>dblink_connect()</>, except that it will allow non-superusers
+ <function>dblink_connect_u()</function> is identical to
+ <function>dblink_connect()</function>, except that it will allow non-superusers
to connect using any authentication method.
</para>
@@ -217,24 +217,24 @@ dblink_connect_u(text connname, text connstr) returns text
If the remote server selects an authentication method that does not
involve a password, then impersonation and subsequent escalation of
privileges can occur, because the session will appear to have
- originated from the user as which the local <productname>PostgreSQL</>
+ originated from the user as which the local <productname>PostgreSQL</productname>
server runs. Also, even if the remote server does demand a password,
it is possible for the password to be supplied from the server
- environment, such as a <filename>~/.pgpass</> file belonging to the
+ environment, such as a <filename>~/.pgpass</filename> file belonging to the
server's user. This opens not only a risk of impersonation, but the
possibility of exposing a password to an untrustworthy remote server.
- Therefore, <function>dblink_connect_u()</> is initially
- installed with all privileges revoked from <literal>PUBLIC</>,
+ Therefore, <function>dblink_connect_u()</function> is initially
+ installed with all privileges revoked from <literal>PUBLIC</literal>,
making it un-callable except by superusers. In some situations
- it may be appropriate to grant <literal>EXECUTE</> permission for
- <function>dblink_connect_u()</> to specific users who are considered
+ it may be appropriate to grant <literal>EXECUTE</literal> permission for
+ <function>dblink_connect_u()</function> to specific users who are considered
trustworthy, but this should be done with care. It is also recommended
- that any <filename>~/.pgpass</> file belonging to the server's user
- <emphasis>not</> contain any records specifying a wildcard host name.
+ that any <filename>~/.pgpass</filename> file belonging to the server's user
+ <emphasis>not</emphasis> contain any records specifying a wildcard host name.
</para>
<para>
- For further details see <function>dblink_connect()</>.
+ For further details see <function>dblink_connect()</function>.
</para>
</refsect1>
</refentry>
@@ -265,8 +265,8 @@ dblink_disconnect(text connname) returns text
<title>Description</title>
<para>
- <function>dblink_disconnect()</> closes a connection previously opened
- by <function>dblink_connect()</>. The form with no arguments closes
+ <function>dblink_disconnect()</function> closes a connection previously opened
+ by <function>dblink_connect()</function>. The form with no arguments closes
an unnamed connection.
</para>
</refsect1>
@@ -290,7 +290,7 @@ dblink_disconnect(text connname) returns text
<title>Return Value</title>
<para>
- Returns status, which is always <literal>OK</> (since any error
+ Returns status, which is always <literal>OK</literal> (since any error
causes the function to throw an error instead of returning).
</para>
</refsect1>
@@ -341,15 +341,15 @@ dblink(text sql [, bool fail_on_error]) returns setof record
<title>Description</title>
<para>
- <function>dblink</> executes a query (usually a <command>SELECT</>,
+ <function>dblink</function> executes a query (usually a <command>SELECT</command>,
but it can be any SQL statement that returns rows) in a remote database.
</para>
<para>
- When two <type>text</> arguments are given, the first one is first
+ When two <type>text</type> arguments are given, the first one is first
looked up as a persistent connection's name; if found, the command
is executed on that connection. If not found, the first argument
- is treated as a connection info string as for <function>dblink_connect</>,
+ is treated as a connection info string as for <function>dblink_connect</function>,
and the indicated connection is made just for the duration of this command.
</para>
</refsect1>
@@ -373,7 +373,7 @@ dblink(text sql [, bool fail_on_error]) returns setof record
<listitem>
<para>
A connection info string, as previously described for
- <function>dblink_connect</>.
+ <function>dblink_connect</function>.
</para>
</listitem>
</varlistentry>
@@ -383,7 +383,7 @@ dblink(text sql [, bool fail_on_error]) returns setof record
<listitem>
<para>
The SQL query that you wish to execute in the remote database,
- for example <literal>select * from foo</>.
+ for example <literal>select * from foo</literal>.
</para>
</listitem>
</varlistentry>
@@ -407,11 +407,11 @@ dblink(text sql [, bool fail_on_error]) returns setof record
<para>
The function returns the row(s) produced by the query. Since
- <function>dblink</> can be used with any query, it is declared
- to return <type>record</>, rather than specifying any particular
+ <function>dblink</function> can be used with any query, it is declared
+ to return <type>record</type>, rather than specifying any particular
set of columns. This means that you must specify the expected
set of columns in the calling query &mdash; otherwise
- <productname>PostgreSQL</> would not know what to expect.
+ <productname>PostgreSQL</productname> would not know what to expect.
Here is an example:
<programlisting>
@@ -421,20 +421,20 @@ SELECT *
WHERE proname LIKE 'bytea%';
</programlisting>
- The <quote>alias</> part of the <literal>FROM</> clause must
+ The <quote>alias</quote> part of the <literal>FROM</literal> clause must
specify the column names and types that the function will return.
(Specifying column names in an alias is actually standard SQL
- syntax, but specifying column types is a <productname>PostgreSQL</>
+ syntax, but specifying column types is a <productname>PostgreSQL</productname>
extension.) This allows the system to understand what
- <literal>*</> should expand to, and what <structname>proname</>
- in the <literal>WHERE</> clause refers to, in advance of trying
+ <literal>*</literal> should expand to, and what <structname>proname</structname>
+ in the <literal>WHERE</literal> clause refers to, in advance of trying
to execute the function. At run time, an error will be thrown
if the actual query result from the remote database does not
- have the same number of columns shown in the <literal>FROM</> clause.
- The column names need not match, however, and <function>dblink</>
+ have the same number of columns shown in the <literal>FROM</literal> clause.
+ The column names need not match, however, and <function>dblink</function>
does not insist on exact type matches either. It will succeed
so long as the returned data strings are valid input for the
- column type declared in the <literal>FROM</> clause.
+ column type declared in the <literal>FROM</literal> clause.
</para>
</refsect1>
@@ -442,7 +442,7 @@ SELECT *
<title>Notes</title>
<para>
- A convenient way to use <function>dblink</> with predetermined
+ A convenient way to use <function>dblink</function> with predetermined
queries is to create a view.
This allows the column type information to be buried in the view,
instead of having to spell it out in every query. For example,
@@ -559,15 +559,15 @@ dblink_exec(text sql [, bool fail_on_error]) returns text
<title>Description</title>
<para>
- <function>dblink_exec</> executes a command (that is, any SQL statement
+ <function>dblink_exec</function> executes a command (that is, any SQL statement
that doesn't return rows) in a remote database.
</para>
<para>
- When two <type>text</> arguments are given, the first one is first
+ When two <type>text</type> arguments are given, the first one is first
looked up as a persistent connection's name; if found, the command
is executed on that connection. If not found, the first argument
- is treated as a connection info string as for <function>dblink_connect</>,
+ is treated as a connection info string as for <function>dblink_connect</function>,
and the indicated connection is made just for the duration of this command.
</para>
</refsect1>
@@ -591,7 +591,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text
<listitem>
<para>
A connection info string, as previously described for
- <function>dblink_connect</>.
+ <function>dblink_connect</function>.
</para>
</listitem>
</varlistentry>
@@ -602,7 +602,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text
<para>
The SQL command that you wish to execute in the remote database,
for example
- <literal>insert into foo values(0,'a','{"a0","b0","c0"}')</>.
+ <literal>insert into foo values(0,'a','{"a0","b0","c0"}')</literal>.
</para>
</listitem>
</varlistentry>
@@ -614,7 +614,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text
If true (the default when omitted) then an error thrown on the
remote side of the connection causes an error to also be thrown
locally. If false, the remote error is locally reported as a NOTICE,
- and the function's return value is set to <literal>ERROR</>.
+ and the function's return value is set to <literal>ERROR</literal>.
</para>
</listitem>
</varlistentry>
@@ -625,7 +625,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text
<title>Return Value</title>
<para>
- Returns status, either the command's status string or <literal>ERROR</>.
+ Returns status, either the command's status string or <literal>ERROR</literal>.
</para>
</refsect1>
@@ -695,9 +695,9 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret
<title>Description</title>
<para>
- <function>dblink_open()</> opens a cursor in a remote database.
+ <function>dblink_open()</function> opens a cursor in a remote database.
The cursor can subsequently be manipulated with
- <function>dblink_fetch()</> and <function>dblink_close()</>.
+ <function>dblink_fetch()</function> and <function>dblink_close()</function>.
</para>
</refsect1>
@@ -728,8 +728,8 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret
<term><parameter>sql</parameter></term>
<listitem>
<para>
- The <command>SELECT</> statement that you wish to execute in the remote
- database, for example <literal>select * from pg_class</>.
+ The <command>SELECT</command> statement that you wish to execute in the remote
+ database, for example <literal>select * from pg_class</literal>.
</para>
</listitem>
</varlistentry>
@@ -741,7 +741,7 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret
If true (the default when omitted) then an error thrown on the
remote side of the connection causes an error to also be thrown
locally. If false, the remote error is locally reported as a NOTICE,
- and the function's return value is set to <literal>ERROR</>.
+ and the function's return value is set to <literal>ERROR</literal>.
</para>
</listitem>
</varlistentry>
@@ -752,7 +752,7 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret
<title>Return Value</title>
<para>
- Returns status, either <literal>OK</> or <literal>ERROR</>.
+ Returns status, either <literal>OK</literal> or <literal>ERROR</literal>.
</para>
</refsect1>
@@ -761,16 +761,16 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret
<para>
Since a cursor can only persist within a transaction,
- <function>dblink_open</> starts an explicit transaction block
- (<command>BEGIN</>) on the remote side, if the remote side was
+ <function>dblink_open</function> starts an explicit transaction block
+ (<command>BEGIN</command>) on the remote side, if the remote side was
not already within a transaction. This transaction will be
- closed again when the matching <function>dblink_close</> is
+ closed again when the matching <function>dblink_close</function> is
executed. Note that if
- you use <function>dblink_exec</> to change data between
- <function>dblink_open</> and <function>dblink_close</>,
- and then an error occurs or you use <function>dblink_disconnect</> before
- <function>dblink_close</>, your change <emphasis>will be
- lost</> because the transaction will be aborted.
+ you use <function>dblink_exec</function> to change data between
+ <function>dblink_open</function> and <function>dblink_close</function>,
+ and then an error occurs or you use <function>dblink_disconnect</function> before
+ <function>dblink_close</function>, your change <emphasis>will be
+ lost</emphasis> because the transaction will be aborted.
</para>
</refsect1>
@@ -819,8 +819,8 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error])
<title>Description</title>
<para>
- <function>dblink_fetch</> fetches rows from a cursor previously
- established by <function>dblink_open</>.
+ <function>dblink_fetch</function> fetches rows from a cursor previously
+ established by <function>dblink_open</function>.
</para>
</refsect1>
@@ -851,7 +851,7 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error])
<term><parameter>howmany</parameter></term>
<listitem>
<para>
- The maximum number of rows to retrieve. The next <parameter>howmany</>
+ The maximum number of rows to retrieve. The next <parameter>howmany</parameter>
rows are fetched, starting at the current cursor position, moving
forward. Once the cursor has reached its end, no more rows are produced.
</para>
@@ -878,7 +878,7 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error])
<para>
The function returns the row(s) fetched from the cursor. To use this
function, you will need to specify the expected set of columns,
- as previously discussed for <function>dblink</>.
+ as previously discussed for <function>dblink</function>.
</para>
</refsect1>
@@ -887,11 +887,11 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error])
<para>
On a mismatch between the number of return columns specified in the
- <literal>FROM</> clause, and the actual number of columns returned by the
+ <literal>FROM</literal> clause, and the actual number of columns returned by the
remote cursor, an error will be thrown. In this event, the remote cursor
is still advanced by as many rows as it would have been if the error had
not occurred. The same is true for any other error occurring in the local
- query after the remote <command>FETCH</> has been done.
+ query after the remote <command>FETCH</command> has been done.
</para>
</refsect1>
@@ -972,8 +972,8 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text
<title>Description</title>
<para>
- <function>dblink_close</> closes a cursor previously opened with
- <function>dblink_open</>.
+ <function>dblink_close</function> closes a cursor previously opened with
+ <function>dblink_open</function>.
</para>
</refsect1>
@@ -1007,7 +1007,7 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text
If true (the default when omitted) then an error thrown on the
remote side of the connection causes an error to also be thrown
locally. If false, the remote error is locally reported as a NOTICE,
- and the function's return value is set to <literal>ERROR</>.
+ and the function's return value is set to <literal>ERROR</literal>.
</para>
</listitem>
</varlistentry>
@@ -1018,7 +1018,7 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text
<title>Return Value</title>
<para>
- Returns status, either <literal>OK</> or <literal>ERROR</>.
+ Returns status, either <literal>OK</literal> or <literal>ERROR</literal>.
</para>
</refsect1>
@@ -1026,9 +1026,9 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text
<title>Notes</title>
<para>
- If <function>dblink_open</> started an explicit transaction block,
+ If <function>dblink_open</function> started an explicit transaction block,
and this is the last remaining open cursor in this connection,
- <function>dblink_close</> will issue the matching <command>COMMIT</>.
+ <function>dblink_close</function> will issue the matching <command>COMMIT</command>.
</para>
</refsect1>
@@ -1082,8 +1082,8 @@ dblink_get_connections() returns text[]
<title>Description</title>
<para>
- <function>dblink_get_connections</> returns an array of the names
- of all open named <filename>dblink</> connections.
+ <function>dblink_get_connections</function> returns an array of the names
+ of all open named <filename>dblink</filename> connections.
</para>
</refsect1>
@@ -1127,7 +1127,7 @@ dblink_error_message(text connname) returns text
<title>Description</title>
<para>
- <function>dblink_error_message</> fetches the most recent remote
+ <function>dblink_error_message</function> fetches the most recent remote
error message for a given connection.
</para>
</refsect1>
@@ -1190,7 +1190,7 @@ dblink_send_query(text connname, text sql) returns int
<title>Description</title>
<para>
- <function>dblink_send_query</> sends a query to be executed
+ <function>dblink_send_query</function> sends a query to be executed
asynchronously, that is, without immediately waiting for the result.
There must not be an async query already in progress on the
connection.
@@ -1198,10 +1198,10 @@ dblink_send_query(text connname, text sql) returns int
<para>
After successfully dispatching an async query, completion status
- can be checked with <function>dblink_is_busy</>, and the results
- are ultimately collected with <function>dblink_get_result</>.
+ can be checked with <function>dblink_is_busy</function>, and the results
+ are ultimately collected with <function>dblink_get_result</function>.
It is also possible to attempt to cancel an active async query
- using <function>dblink_cancel_query</>.
+ using <function>dblink_cancel_query</function>.
</para>
</refsect1>
@@ -1223,7 +1223,7 @@ dblink_send_query(text connname, text sql) returns int
<listitem>
<para>
The SQL statement that you wish to execute in the remote database,
- for example <literal>select * from pg_class</>.
+ for example <literal>select * from pg_class</literal>.
</para>
</listitem>
</varlistentry>
@@ -1272,7 +1272,7 @@ dblink_is_busy(text connname) returns int
<title>Description</title>
<para>
- <function>dblink_is_busy</> tests whether an async query is in progress.
+ <function>dblink_is_busy</function> tests whether an async query is in progress.
</para>
</refsect1>
@@ -1297,7 +1297,7 @@ dblink_is_busy(text connname) returns int
<para>
Returns 1 if connection is busy, 0 if it is not busy.
If this function returns 0, it is guaranteed that
- <function>dblink_get_result</> will not block.
+ <function>dblink_get_result</function> will not block.
</para>
</refsect1>
@@ -1336,10 +1336,10 @@ dblink_get_notify(text connname) returns setof (notify_name text, be_pid int, ex
<title>Description</title>
<para>
- <function>dblink_get_notify</> retrieves notifications on either
+ <function>dblink_get_notify</function> retrieves notifications on either
the unnamed connection, or on a named connection if specified.
- To receive notifications via dblink, <function>LISTEN</> must
- first be issued, using <function>dblink_exec</>.
+ To receive notifications via dblink, <function>LISTEN</function> must
+ first be issued, using <function>dblink_exec</function>.
For details see <xref linkend="sql-listen"> and <xref linkend="sql-notify">.
</para>
@@ -1417,9 +1417,9 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record
<title>Description</title>
<para>
- <function>dblink_get_result</> collects the results of an
- asynchronous query previously sent with <function>dblink_send_query</>.
- If the query is not already completed, <function>dblink_get_result</>
+ <function>dblink_get_result</function> collects the results of an
+ asynchronous query previously sent with <function>dblink_send_query</function>.
+ If the query is not already completed, <function>dblink_get_result</function>
will wait until it is.
</para>
</refsect1>
@@ -1458,14 +1458,14 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record
For an async query (that is, a SQL statement returning rows),
the function returns the row(s) produced by the query. To use this
function, you will need to specify the expected set of columns,
- as previously discussed for <function>dblink</>.
+ as previously discussed for <function>dblink</function>.
</para>
<para>
For an async command (that is, a SQL statement not returning rows),
the function returns a single row with a single text column containing
the command's status string. It is still necessary to specify that
- the result will have a single text column in the calling <literal>FROM</>
+ the result will have a single text column in the calling <literal>FROM</literal>
clause.
</para>
</refsect1>
@@ -1474,22 +1474,22 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record
<title>Notes</title>
<para>
- This function <emphasis>must</> be called if
- <function>dblink_send_query</> returned 1.
+ This function <emphasis>must</emphasis> be called if
+ <function>dblink_send_query</function> returned 1.
It must be called once for each query
sent, and one additional time to obtain an empty set result,
before the connection can be used again.
</para>
<para>
- When using <function>dblink_send_query</> and
- <function>dblink_get_result</>, <application>dblink</> fetches the entire
+ When using <function>dblink_send_query</function> and
+ <function>dblink_get_result</function>, <application>dblink</application> fetches the entire
remote query result before returning any of it to the local query
processor. If the query returns a large number of rows, this can result
in transient memory bloat in the local session. It may be better to open
- such a query as a cursor with <function>dblink_open</> and then fetch a
+ such a query as a cursor with <function>dblink_open</function> and then fetch a
manageable number of rows at a time. Alternatively, use plain
- <function>dblink()</>, which avoids memory bloat by spooling large result
+ <function>dblink()</function>, which avoids memory bloat by spooling large result
sets to disk.
</para>
</refsect1>
@@ -1581,13 +1581,13 @@ dblink_cancel_query(text connname) returns text
<title>Description</title>
<para>
- <function>dblink_cancel_query</> attempts to cancel any query that
+ <function>dblink_cancel_query</function> attempts to cancel any query that
is in progress on the named connection. Note that this is not
certain to succeed (since, for example, the remote query might
already have finished). A cancel request simply improves the
odds that the query will fail soon. You must still complete the
normal query protocol, for example by calling
- <function>dblink_get_result</>.
+ <function>dblink_get_result</function>.
</para>
</refsect1>
@@ -1610,7 +1610,7 @@ dblink_cancel_query(text connname) returns text
<title>Return Value</title>
<para>
- Returns <literal>OK</> if the cancel request has been sent, or
+ Returns <literal>OK</literal> if the cancel request has been sent, or
the text of an error message on failure.
</para>
</refsect1>
@@ -1651,7 +1651,7 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results
<title>Description</title>
<para>
- <function>dblink_get_pkey</> provides information about the primary
+ <function>dblink_get_pkey</function> provides information about the primary
key of a relation in the local database. This is sometimes useful
in generating queries to be sent to remote databases.
</para>
@@ -1665,10 +1665,10 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results
<term><parameter>relname</parameter></term>
<listitem>
<para>
- Name of a local relation, for example <literal>foo</> or
- <literal>myschema.mytab</>. Include double quotes if the
+ Name of a local relation, for example <literal>foo</literal> or
+ <literal>myschema.mytab</literal>. Include double quotes if the
name is mixed-case or contains special characters, for
- example <literal>"FooBar"</>; without quotes, the string
+ example <literal>"FooBar"</literal>; without quotes, the string
will be folded to lower case.
</para>
</listitem>
@@ -1687,7 +1687,7 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results
CREATE TYPE dblink_pkey_results AS (position int, colname text);
</programlisting>
- The <literal>position</> column simply runs from 1 to <replaceable>N</>;
+ The <literal>position</literal> column simply runs from 1 to <replaceable>N</replaceable>;
it is the number of the field within the primary key, not the number
within the table's columns.
</para>
@@ -1748,10 +1748,10 @@ dblink_build_sql_insert(text relname,
<title>Description</title>
<para>
- <function>dblink_build_sql_insert</> can be useful in doing selective
+ <function>dblink_build_sql_insert</function> can be useful in doing selective
replication of a local table to a remote database. It selects a row
from the local table based on primary key, and then builds a SQL
- <command>INSERT</> command that will duplicate that row, but with
+ <command>INSERT</command> command that will duplicate that row, but with
the primary key values replaced by the values in the last argument.
(To make an exact copy of the row, just specify the same values for
the last two arguments.)
@@ -1766,10 +1766,10 @@ dblink_build_sql_insert(text relname,
<term><parameter>relname</parameter></term>
<listitem>
<para>
- Name of a local relation, for example <literal>foo</> or
- <literal>myschema.mytab</>. Include double quotes if the
+ Name of a local relation, for example <literal>foo</literal> or
+ <literal>myschema.mytab</literal>. Include double quotes if the
name is mixed-case or contains special characters, for
- example <literal>"FooBar"</>; without quotes, the string
+ example <literal>"FooBar"</literal>; without quotes, the string
will be folded to lower case.
</para>
</listitem>
@@ -1780,7 +1780,7 @@ dblink_build_sql_insert(text relname,
<listitem>
<para>
Attribute numbers (1-based) of the primary key fields,
- for example <literal>1 2</>.
+ for example <literal>1 2</literal>.
</para>
</listitem>
</varlistentry>
@@ -1811,7 +1811,7 @@ dblink_build_sql_insert(text relname,
<listitem>
<para>
Values of the primary key fields to be placed in the resulting
- <command>INSERT</> command. Each field is represented in text form.
+ <command>INSERT</command> command. Each field is represented in text form.
</para>
</listitem>
</varlistentry>
@@ -1828,10 +1828,10 @@ dblink_build_sql_insert(text relname,
<title>Notes</title>
<para>
- As of <productname>PostgreSQL</> 9.0, the attribute numbers in
+ As of <productname>PostgreSQL</productname> 9.0, the attribute numbers in
<parameter>primary_key_attnums</parameter> are interpreted as logical
column numbers, corresponding to the column's position in
- <literal>SELECT * FROM relname</>. Previous versions interpreted the
+ <literal>SELECT * FROM relname</literal>. Previous versions interpreted the
numbers as physical column positions. There is a difference if any
column(s) to the left of the indicated column have been dropped during
the lifetime of the table.
@@ -1881,9 +1881,9 @@ dblink_build_sql_delete(text relname,
<title>Description</title>
<para>
- <function>dblink_build_sql_delete</> can be useful in doing selective
+ <function>dblink_build_sql_delete</function> can be useful in doing selective
replication of a local table to a remote database. It builds a SQL
- <command>DELETE</> command that will delete the row with the given
+ <command>DELETE</command> command that will delete the row with the given
primary key values.
</para>
</refsect1>
@@ -1896,10 +1896,10 @@ dblink_build_sql_delete(text relname,
<term><parameter>relname</parameter></term>
<listitem>
<para>
- Name of a local relation, for example <literal>foo</> or
- <literal>myschema.mytab</>. Include double quotes if the
+ Name of a local relation, for example <literal>foo</literal> or
+ <literal>myschema.mytab</literal>. Include double quotes if the
name is mixed-case or contains special characters, for
- example <literal>"FooBar"</>; without quotes, the string
+ example <literal>"FooBar"</literal>; without quotes, the string
will be folded to lower case.
</para>
</listitem>
@@ -1910,7 +1910,7 @@ dblink_build_sql_delete(text relname,
<listitem>
<para>
Attribute numbers (1-based) of the primary key fields,
- for example <literal>1 2</>.
+ for example <literal>1 2</literal>.
</para>
</listitem>
</varlistentry>
@@ -1929,7 +1929,7 @@ dblink_build_sql_delete(text relname,
<listitem>
<para>
Values of the primary key fields to be used in the resulting
- <command>DELETE</> command. Each field is represented in text form.
+ <command>DELETE</command> command. Each field is represented in text form.
</para>
</listitem>
</varlistentry>
@@ -1946,10 +1946,10 @@ dblink_build_sql_delete(text relname,
<title>Notes</title>
<para>
- As of <productname>PostgreSQL</> 9.0, the attribute numbers in
+ As of <productname>PostgreSQL</productname> 9.0, the attribute numbers in
<parameter>primary_key_attnums</parameter> are interpreted as logical
column numbers, corresponding to the column's position in
- <literal>SELECT * FROM relname</>. Previous versions interpreted the
+ <literal>SELECT * FROM relname</literal>. Previous versions interpreted the
numbers as physical column positions. There is a difference if any
column(s) to the left of the indicated column have been dropped during
the lifetime of the table.
@@ -2000,15 +2000,15 @@ dblink_build_sql_update(text relname,
<title>Description</title>
<para>
- <function>dblink_build_sql_update</> can be useful in doing selective
+ <function>dblink_build_sql_update</function> can be useful in doing selective
replication of a local table to a remote database. It selects a row
from the local table based on primary key, and then builds a SQL
- <command>UPDATE</> command that will duplicate that row, but with
+ <command>UPDATE</command> command that will duplicate that row, but with
the primary key values replaced by the values in the last argument.
(To make an exact copy of the row, just specify the same values for
- the last two arguments.) The <command>UPDATE</> command always assigns
+ the last two arguments.) The <command>UPDATE</command> command always assigns
all fields of the row &mdash; the main difference between this and
- <function>dblink_build_sql_insert</> is that it's assumed that
+ <function>dblink_build_sql_insert</function> is that it's assumed that
the target row already exists in the remote table.
</para>
</refsect1>
@@ -2021,10 +2021,10 @@ dblink_build_sql_update(text relname,
<term><parameter>relname</parameter></term>
<listitem>
<para>
- Name of a local relation, for example <literal>foo</> or
- <literal>myschema.mytab</>. Include double quotes if the
+ Name of a local relation, for example <literal>foo</literal> or
+ <literal>myschema.mytab</literal>. Include double quotes if the
name is mixed-case or contains special characters, for
- example <literal>"FooBar"</>; without quotes, the string
+ example <literal>"FooBar"</literal>; without quotes, the string
will be folded to lower case.
</para>
</listitem>
@@ -2035,7 +2035,7 @@ dblink_build_sql_update(text relname,
<listitem>
<para>
Attribute numbers (1-based) of the primary key fields,
- for example <literal>1 2</>.
+ for example <literal>1 2</literal>.
</para>
</listitem>
</varlistentry>
@@ -2066,7 +2066,7 @@ dblink_build_sql_update(text relname,
<listitem>
<para>
Values of the primary key fields to be placed in the resulting
- <command>UPDATE</> command. Each field is represented in text form.
+ <command>UPDATE</command> command. Each field is represented in text form.
</para>
</listitem>
</varlistentry>
@@ -2083,10 +2083,10 @@ dblink_build_sql_update(text relname,
<title>Notes</title>
<para>
- As of <productname>PostgreSQL</> 9.0, the attribute numbers in
+ As of <productname>PostgreSQL</productname> 9.0, the attribute numbers in
<parameter>primary_key_attnums</parameter> are interpreted as logical
column numbers, corresponding to the column's position in
- <literal>SELECT * FROM relname</>. Previous versions interpreted the
+ <literal>SELECT * FROM relname</literal>. Previous versions interpreted the
numbers as physical column positions. There is a difference if any
column(s) to the left of the indicated column have been dropped during
the lifetime of the table.
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index b05a9c2150..817db92af2 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -149,7 +149,7 @@ DROP TABLE products;
Nevertheless, it is common in SQL script files to unconditionally
try to drop each table before creating it, ignoring any error
messages, so that the script works whether or not the table exists.
- (If you like, you can use the <literal>DROP TABLE IF EXISTS</> variant
+ (If you like, you can use the <literal>DROP TABLE IF EXISTS</literal> variant
to avoid the error messages, but this is not standard SQL.)
</para>
@@ -207,9 +207,9 @@ CREATE TABLE products (
The default value can be an expression, which will be
evaluated whenever the default value is inserted
(<emphasis>not</emphasis> when the table is created). A common example
- is for a <type>timestamp</type> column to have a default of <literal>CURRENT_TIMESTAMP</>,
+ is for a <type>timestamp</type> column to have a default of <literal>CURRENT_TIMESTAMP</literal>,
so that it gets set to the time of row insertion. Another common
- example is generating a <quote>serial number</> for each row.
+ example is generating a <quote>serial number</quote> for each row.
In <productname>PostgreSQL</productname> this is typically done by
something like:
<programlisting>
@@ -218,8 +218,8 @@ CREATE TABLE products (
...
);
</programlisting>
- where the <literal>nextval()</> function supplies successive values
- from a <firstterm>sequence object</> (see <xref
+ where the <literal>nextval()</literal> function supplies successive values
+ from a <firstterm>sequence object</firstterm> (see <xref
linkend="functions-sequence">). This arrangement is sufficiently common
that there's a special shorthand for it:
<programlisting>
@@ -228,7 +228,7 @@ CREATE TABLE products (
...
);
</programlisting>
- The <literal>SERIAL</> shorthand is discussed further in <xref
+ The <literal>SERIAL</literal> shorthand is discussed further in <xref
linkend="datatype-serial">.
</para>
</sect1>
@@ -385,7 +385,7 @@ CREATE TABLE products (
CHECK (price &gt; 0),
discounted_price numeric,
CHECK (discounted_price &gt; 0),
- <emphasis>CONSTRAINT valid_discount</> CHECK (price &gt; discounted_price)
+ <emphasis>CONSTRAINT valid_discount</emphasis> CHECK (price &gt; discounted_price)
);
</programlisting>
</para>
@@ -623,7 +623,7 @@ CREATE TABLE example (
<para>
Adding a primary key will automatically create a unique B-tree index
on the column or group of columns listed in the primary key, and will
- force the column(s) to be marked <literal>NOT NULL</>.
+ force the column(s) to be marked <literal>NOT NULL</literal>.
</para>
<para>
@@ -828,7 +828,7 @@ CREATE TABLE order_items (
(The essential difference between these two choices is that
<literal>NO ACTION</literal> allows the check to be deferred until
later in the transaction, whereas <literal>RESTRICT</literal> does not.)
- <literal>CASCADE</> specifies that when a referenced row is deleted,
+ <literal>CASCADE</literal> specifies that when a referenced row is deleted,
row(s) referencing it should be automatically deleted as well.
There are two other options:
<literal>SET NULL</literal> and <literal>SET DEFAULT</literal>.
@@ -845,19 +845,19 @@ CREATE TABLE order_items (
Analogous to <literal>ON DELETE</literal> there is also
<literal>ON UPDATE</literal> which is invoked when a referenced
column is changed (updated). The possible actions are the same.
- In this case, <literal>CASCADE</> means that the updated values of the
+ In this case, <literal>CASCADE</literal> means that the updated values of the
referenced column(s) should be copied into the referencing row(s).
</para>
<para>
Normally, a referencing row need not satisfy the foreign key constraint
- if any of its referencing columns are null. If <literal>MATCH FULL</>
+ if any of its referencing columns are null. If <literal>MATCH FULL</literal>
is added to the foreign key declaration, a referencing row escapes
satisfying the constraint only if all its referencing columns are null
(so a mix of null and non-null values is guaranteed to fail a
- <literal>MATCH FULL</> constraint). If you don't want referencing rows
+ <literal>MATCH FULL</literal> constraint). If you don't want referencing rows
to be able to avoid satisfying the foreign key constraint, declare the
- referencing column(s) as <literal>NOT NULL</>.
+ referencing column(s) as <literal>NOT NULL</literal>.
</para>
<para>
@@ -909,7 +909,7 @@ CREATE TABLE circles (
<para>
See also <link linkend="SQL-CREATETABLE-EXCLUDE"><command>CREATE
- TABLE ... CONSTRAINT ... EXCLUDE</></link> for details.
+ TABLE ... CONSTRAINT ... EXCLUDE</command></link> for details.
</para>
<para>
@@ -923,7 +923,7 @@ CREATE TABLE circles (
<title>System Columns</title>
<para>
- Every table has several <firstterm>system columns</> that are
+ Every table has several <firstterm>system columns</firstterm> that are
implicitly defined by the system. Therefore, these names cannot be
used as names of user-defined columns. (Note that these
restrictions are separate from whether the name is a key word or
@@ -939,7 +939,7 @@ CREATE TABLE circles (
<variablelist>
<varlistentry>
- <term><structfield>oid</></term>
+ <term><structfield>oid</structfield></term>
<listitem>
<para>
<indexterm>
@@ -957,7 +957,7 @@ CREATE TABLE circles (
</varlistentry>
<varlistentry>
- <term><structfield>tableoid</></term>
+ <term><structfield>tableoid</structfield></term>
<listitem>
<indexterm>
<primary>tableoid</primary>
@@ -976,7 +976,7 @@ CREATE TABLE circles (
</varlistentry>
<varlistentry>
- <term><structfield>xmin</></term>
+ <term><structfield>xmin</structfield></term>
<listitem>
<indexterm>
<primary>xmin</primary>
@@ -992,7 +992,7 @@ CREATE TABLE circles (
</varlistentry>
<varlistentry>
- <term><structfield>cmin</></term>
+ <term><structfield>cmin</structfield></term>
<listitem>
<indexterm>
<primary>cmin</primary>
@@ -1006,7 +1006,7 @@ CREATE TABLE circles (
</varlistentry>
<varlistentry>
- <term><structfield>xmax</></term>
+ <term><structfield>xmax</structfield></term>
<listitem>
<indexterm>
<primary>xmax</primary>
@@ -1023,7 +1023,7 @@ CREATE TABLE circles (
</varlistentry>
<varlistentry>
- <term><structfield>cmax</></term>
+ <term><structfield>cmax</structfield></term>
<listitem>
<indexterm>
<primary>cmax</primary>
@@ -1036,7 +1036,7 @@ CREATE TABLE circles (
</varlistentry>
<varlistentry>
- <term><structfield>ctid</></term>
+ <term><structfield>ctid</structfield></term>
<listitem>
<indexterm>
<primary>ctid</primary>
@@ -1047,7 +1047,7 @@ CREATE TABLE circles (
although the <structfield>ctid</structfield> can be used to
locate the row version very quickly, a row's
<structfield>ctid</structfield> will change if it is
- updated or moved by <command>VACUUM FULL</>. Therefore
+ updated or moved by <command>VACUUM FULL</command>. Therefore
<structfield>ctid</structfield> is useless as a long-term row
identifier. The OID, or even better a user-defined serial
number, should be used to identify logical rows.
@@ -1074,7 +1074,7 @@ CREATE TABLE circles (
a unique constraint (or unique index) exists, the system takes
care not to generate an OID matching an already-existing row.
(Of course, this is only possible if the table contains fewer
- than 2<superscript>32</> (4 billion) rows, and in practice the
+ than 2<superscript>32</superscript> (4 billion) rows, and in practice the
table size had better be much less than that, or performance
might suffer.)
</para>
@@ -1082,7 +1082,7 @@ CREATE TABLE circles (
<listitem>
<para>
OIDs should never be assumed to be unique across tables; use
- the combination of <structfield>tableoid</> and row OID if you
+ the combination of <structfield>tableoid</structfield> and row OID if you
need a database-wide identifier.
</para>
</listitem>
@@ -1090,7 +1090,7 @@ CREATE TABLE circles (
<para>
Of course, the tables in question must be created <literal>WITH
OIDS</literal>. As of <productname>PostgreSQL</productname> 8.1,
- <literal>WITHOUT OIDS</> is the default.
+ <literal>WITHOUT OIDS</literal> is the default.
</para>
</listitem>
</itemizedlist>
@@ -1107,7 +1107,7 @@ CREATE TABLE circles (
<para>
Command identifiers are also 32-bit quantities. This creates a hard limit
- of 2<superscript>32</> (4 billion) <acronym>SQL</acronym> commands
+ of 2<superscript>32</superscript> (4 billion) <acronym>SQL</acronym> commands
within a single transaction. In practice this limit is not a
problem &mdash; note that the limit is on the number of
<acronym>SQL</acronym> commands, not the number of rows processed.
@@ -1186,7 +1186,7 @@ CREATE TABLE circles (
ALTER TABLE products ADD COLUMN description text;
</programlisting>
The new column is initially filled with whatever default
- value is given (null if you don't specify a <literal>DEFAULT</> clause).
+ value is given (null if you don't specify a <literal>DEFAULT</literal> clause).
</para>
<para>
@@ -1196,9 +1196,9 @@ ALTER TABLE products ADD COLUMN description text;
ALTER TABLE products ADD COLUMN description text CHECK (description &lt;&gt; '');
</programlisting>
In fact all the options that can be applied to a column description
- in <command>CREATE TABLE</> can be used here. Keep in mind however
+ in <command>CREATE TABLE</command> can be used here. Keep in mind however
that the default value must satisfy the given constraints, or the
- <literal>ADD</> will fail. Alternatively, you can add
+ <literal>ADD</literal> will fail. Alternatively, you can add
constraints later (see below) after you've filled in the new column
correctly.
</para>
@@ -1210,7 +1210,7 @@ ALTER TABLE products ADD COLUMN description text CHECK (description &lt;&gt; '')
specified, <productname>PostgreSQL</productname> is able to avoid
the physical update. So if you intend to fill the column with
mostly nondefault values, it's best to add the column with no default,
- insert the correct values using <command>UPDATE</>, and then add any
+ insert the correct values using <command>UPDATE</command>, and then add any
desired default as described below.
</para>
</tip>
@@ -1234,7 +1234,7 @@ ALTER TABLE products DROP COLUMN description;
foreign key constraint of another table,
<productname>PostgreSQL</productname> will not silently drop that
constraint. You can authorize dropping everything that depends on
- the column by adding <literal>CASCADE</>:
+ the column by adding <literal>CASCADE</literal>:
<programlisting>
ALTER TABLE products DROP COLUMN description CASCADE;
</programlisting>
@@ -1290,13 +1290,13 @@ ALTER TABLE products ALTER COLUMN product_no SET NOT NULL;
<programlisting>
ALTER TABLE products DROP CONSTRAINT some_name;
</programlisting>
- (If you are dealing with a generated constraint name like <literal>$2</>,
+ (If you are dealing with a generated constraint name like <literal>$2</literal>,
don't forget that you'll need to double-quote it to make it a valid
identifier.)
</para>
<para>
- As with dropping a column, you need to add <literal>CASCADE</> if you
+ As with dropping a column, you need to add <literal>CASCADE</literal> if you
want to drop a constraint that something else depends on. An example
is that a foreign key constraint depends on a unique or primary key
constraint on the referenced column(s).
@@ -1326,7 +1326,7 @@ ALTER TABLE products ALTER COLUMN product_no DROP NOT NULL;
ALTER TABLE products ALTER COLUMN price SET DEFAULT 7.77;
</programlisting>
Note that this doesn't affect any existing rows in the table, it
- just changes the default for future <command>INSERT</> commands.
+ just changes the default for future <command>INSERT</command> commands.
</para>
<para>
@@ -1356,12 +1356,12 @@ ALTER TABLE products ALTER COLUMN price TYPE numeric(10,2);
</programlisting>
This will succeed only if each existing entry in the column can be
converted to the new type by an implicit cast. If a more complex
- conversion is needed, you can add a <literal>USING</> clause that
+ conversion is needed, you can add a <literal>USING</literal> clause that
specifies how to compute the new values from the old.
</para>
<para>
- <productname>PostgreSQL</> will attempt to convert the column's
+ <productname>PostgreSQL</productname> will attempt to convert the column's
default value (if any) to the new type, as well as any constraints
that involve the column. But these conversions might fail, or might
produce surprising results. It's often best to drop any constraints
@@ -1437,11 +1437,11 @@ ALTER TABLE products RENAME TO items;
</para>
<para>
- There are different kinds of privileges: <literal>SELECT</>,
- <literal>INSERT</>, <literal>UPDATE</>, <literal>DELETE</>,
- <literal>TRUNCATE</>, <literal>REFERENCES</>, <literal>TRIGGER</>,
- <literal>CREATE</>, <literal>CONNECT</>, <literal>TEMPORARY</>,
- <literal>EXECUTE</>, and <literal>USAGE</>.
+ There are different kinds of privileges: <literal>SELECT</literal>,
+ <literal>INSERT</literal>, <literal>UPDATE</literal>, <literal>DELETE</literal>,
+ <literal>TRUNCATE</literal>, <literal>REFERENCES</literal>, <literal>TRIGGER</literal>,
+ <literal>CREATE</literal>, <literal>CONNECT</literal>, <literal>TEMPORARY</literal>,
+ <literal>EXECUTE</literal>, and <literal>USAGE</literal>.
The privileges applicable to a particular
object vary depending on the object's type (table, function, etc).
For complete information on the different types of privileges
@@ -1480,7 +1480,7 @@ GRANT UPDATE ON accounts TO joe;
<para>
The special <quote>role</quote> name <literal>PUBLIC</literal> can
be used to grant a privilege to every role on the system. Also,
- <quote>group</> roles can be set up to help manage privileges when
+ <quote>group</quote> roles can be set up to help manage privileges when
there are many users of a database &mdash; for details see
<xref linkend="user-manag">.
</para>
@@ -1492,7 +1492,7 @@ GRANT UPDATE ON accounts TO joe;
REVOKE ALL ON accounts FROM PUBLIC;
</programlisting>
The special privileges of the object owner (i.e., the right to do
- <command>DROP</>, <command>GRANT</>, <command>REVOKE</>, etc.)
+ <command>DROP</command>, <command>GRANT</command>, <command>REVOKE</command>, etc.)
are always implicit in being the owner,
and cannot be granted or revoked. But the object owner can choose
to revoke their own ordinary privileges, for example to make a
@@ -1502,7 +1502,7 @@ REVOKE ALL ON accounts FROM PUBLIC;
<para>
Ordinarily, only the object's owner (or a superuser) can grant or
revoke privileges on an object. However, it is possible to grant a
- privilege <quote>with grant option</>, which gives the recipient
+ privilege <quote>with grant option</quote>, which gives the recipient
the right to grant it in turn to others. If the grant option is
subsequently revoked then all who received the privilege from that
recipient (directly or through a chain of grants) will lose the
@@ -1525,10 +1525,10 @@ REVOKE ALL ON accounts FROM PUBLIC;
<para>
In addition to the SQL-standard <link linkend="ddl-priv">privilege
system</link> available through <xref linkend="sql-grant">,
- tables can have <firstterm>row security policies</> that restrict,
+ tables can have <firstterm>row security policies</firstterm> that restrict,
on a per-user basis, which rows can be returned by normal queries
or inserted, updated, or deleted by data modification commands.
- This feature is also known as <firstterm>Row-Level Security</>.
+ This feature is also known as <firstterm>Row-Level Security</firstterm>.
By default, tables do not have any policies, so that if a user has
access privileges to a table according to the SQL privilege system,
all rows within it are equally available for querying or updating.
@@ -1537,20 +1537,20 @@ REVOKE ALL ON accounts FROM PUBLIC;
<para>
When row security is enabled on a table (with
<link linkend="sql-altertable">ALTER TABLE ... ENABLE ROW LEVEL
- SECURITY</>), all normal access to the table for selecting rows or
+ SECURITY</link>), all normal access to the table for selecting rows or
modifying rows must be allowed by a row security policy. (However, the
table's owner is typically not subject to row security policies.) If no
policy exists for the table, a default-deny policy is used, meaning that
no rows are visible or can be modified. Operations that apply to the
- whole table, such as <command>TRUNCATE</> and <literal>REFERENCES</>,
+ whole table, such as <command>TRUNCATE</command> and <literal>REFERENCES</literal>,
are not subject to row security.
</para>
<para>
Row security policies can be specific to commands, or to roles, or to
both. A policy can be specified to apply to <literal>ALL</literal>
- commands, or to <literal>SELECT</>, <literal>INSERT</>, <literal>UPDATE</>,
- or <literal>DELETE</>. Multiple roles can be assigned to a given
+ commands, or to <literal>SELECT</literal>, <literal>INSERT</literal>, <literal>UPDATE</literal>,
+ or <literal>DELETE</literal>. Multiple roles can be assigned to a given
policy, and normal role membership and inheritance rules apply.
</para>
@@ -1562,7 +1562,7 @@ REVOKE ALL ON accounts FROM PUBLIC;
rule are <literal>leakproof</literal> functions, which are guaranteed to
not leak information; the optimizer may choose to apply such functions
ahead of the row-security check.) Rows for which the expression does
- not return <literal>true</> will not be processed. Separate expressions
+ not return <literal>true</literal> will not be processed. Separate expressions
may be specified to provide independent control over the rows which are
visible and the rows which are allowed to be modified. Policy
expressions are run as part of the query and with the privileges of the
@@ -1571,11 +1571,11 @@ REVOKE ALL ON accounts FROM PUBLIC;
</para>
<para>
- Superusers and roles with the <literal>BYPASSRLS</> attribute always
+ Superusers and roles with the <literal>BYPASSRLS</literal> attribute always
bypass the row security system when accessing a table. Table owners
normally bypass row security as well, though a table owner can choose to
be subject to row security with <link linkend="sql-altertable">ALTER
- TABLE ... FORCE ROW LEVEL SECURITY</>.
+ TABLE ... FORCE ROW LEVEL SECURITY</link>.
</para>
<para>
@@ -1609,8 +1609,8 @@ REVOKE ALL ON accounts FROM PUBLIC;
<para>
As a simple example, here is how to create a policy on
- the <literal>account</> relation to allow only members of
- the <literal>managers</> role to access rows, and only rows of their
+ the <literal>account</literal> relation to allow only members of
+ the <literal>managers</literal> role to access rows, and only rows of their
accounts:
</para>
@@ -1627,7 +1627,7 @@ CREATE POLICY account_managers ON accounts TO managers
If no role is specified, or the special user name
<literal>PUBLIC</literal> is used, then the policy applies to all
users on the system. To allow all users to access their own row in
- a <literal>users</> table, a simple policy can be used:
+ a <literal>users</literal> table, a simple policy can be used:
</para>
<programlisting>
@@ -1637,9 +1637,9 @@ CREATE POLICY user_policy ON users
<para>
To use a different policy for rows that are being added to the table
- compared to those rows that are visible, the <literal>WITH CHECK</>
+ compared to those rows that are visible, the <literal>WITH CHECK</literal>
clause can be used. This policy would allow all users to view all rows
- in the <literal>users</> table, but only modify their own:
+ in the <literal>users</literal> table, but only modify their own:
</para>
<programlisting>
@@ -1649,7 +1649,7 @@ CREATE POLICY user_policy ON users
</programlisting>
<para>
- Row security can also be disabled with the <command>ALTER TABLE</>
+ Row security can also be disabled with the <command>ALTER TABLE</command>
command. Disabling row security does not remove any policies that are
defined on the table; they are simply ignored. Then all rows in the
table are visible and modifiable, subject to the standard SQL privileges
@@ -1658,7 +1658,7 @@ CREATE POLICY user_policy ON users
<para>
Below is a larger example of how this feature can be used in production
- environments. The table <literal>passwd</> emulates a Unix password
+ environments. The table <literal>passwd</literal> emulates a Unix password
file:
</para>
@@ -1820,7 +1820,7 @@ UPDATE 0
Referential integrity checks, such as unique or primary key constraints
and foreign key references, always bypass row security to ensure that
data integrity is maintained. Care must be taken when developing
- schemas and row level policies to avoid <quote>covert channel</> leaks of
+ schemas and row level policies to avoid <quote>covert channel</quote> leaks of
information through such referential integrity checks.
</para>
@@ -1830,7 +1830,7 @@ UPDATE 0
disastrous if row security silently caused some rows to be omitted
from the backup. In such a situation, you can set the
<xref linkend="guc-row-security"> configuration parameter
- to <literal>off</>. This does not in itself bypass row security;
+ to <literal>off</literal>. This does not in itself bypass row security;
what it does is throw an error if any query's results would get filtered
by a policy. The reason for the error can then be investigated and
fixed.
@@ -1842,7 +1842,7 @@ UPDATE 0
best-performing case; when possible, it's best to design row security
applications to work this way. If it is necessary to consult other rows
or other tables to make a policy decision, that can be accomplished using
- sub-<command>SELECT</>s, or functions that contain <command>SELECT</>s,
+ sub-<command>SELECT</command>s, or functions that contain <command>SELECT</command>s,
in the policy expressions. Be aware however that such accesses can
create race conditions that could allow information leakage if care is
not taken. As an example, consider the following table design:
@@ -1896,8 +1896,8 @@ GRANT ALL ON information TO public;
</programlisting>
<para>
- Now suppose that <literal>alice</> wishes to change the <quote>slightly
- secret</> information, but decides that <literal>mallory</> should not
+ Now suppose that <literal>alice</literal> wishes to change the <quote>slightly
+ secret</quote> information, but decides that <literal>mallory</literal> should not
be trusted with the new content of that row, so she does:
</para>
@@ -1909,36 +1909,36 @@ COMMIT;
</programlisting>
<para>
- That looks safe; there is no window wherein <literal>mallory</> should be
- able to see the <quote>secret from mallory</> string. However, there is
- a race condition here. If <literal>mallory</> is concurrently doing,
+ That looks safe; there is no window wherein <literal>mallory</literal> should be
+ able to see the <quote>secret from mallory</quote> string. However, there is
+ a race condition here. If <literal>mallory</literal> is concurrently doing,
say,
<programlisting>
SELECT * FROM information WHERE group_id = 2 FOR UPDATE;
</programlisting>
- and her transaction is in <literal>READ COMMITTED</> mode, it is possible
- for her to see <quote>secret from mallory</>. That happens if her
- transaction reaches the <structname>information</> row just
- after <literal>alice</>'s does. It blocks waiting
- for <literal>alice</>'s transaction to commit, then fetches the updated
- row contents thanks to the <literal>FOR UPDATE</> clause. However, it
- does <emphasis>not</> fetch an updated row for the
- implicit <command>SELECT</> from <structname>users</>, because that
- sub-<command>SELECT</> did not have <literal>FOR UPDATE</>; instead
- the <structname>users</> row is read with the snapshot taken at the start
+ and her transaction is in <literal>READ COMMITTED</literal> mode, it is possible
+ for her to see <quote>secret from mallory</quote>. That happens if her
+ transaction reaches the <structname>information</structname> row just
+ after <literal>alice</literal>'s does. It blocks waiting
+ for <literal>alice</literal>'s transaction to commit, then fetches the updated
+ row contents thanks to the <literal>FOR UPDATE</literal> clause. However, it
+ does <emphasis>not</emphasis> fetch an updated row for the
+ implicit <command>SELECT</command> from <structname>users</structname>, because that
+ sub-<command>SELECT</command> did not have <literal>FOR UPDATE</literal>; instead
+ the <structname>users</structname> row is read with the snapshot taken at the start
of the query. Therefore, the policy expression tests the old value
- of <literal>mallory</>'s privilege level and allows her to see the
+ of <literal>mallory</literal>'s privilege level and allows her to see the
updated row.
</para>
<para>
There are several ways around this problem. One simple answer is to use
- <literal>SELECT ... FOR SHARE</> in sub-<command>SELECT</>s in row
- security policies. However, that requires granting <literal>UPDATE</>
- privilege on the referenced table (here <structname>users</>) to the
+ <literal>SELECT ... FOR SHARE</literal> in sub-<command>SELECT</command>s in row
+ security policies. However, that requires granting <literal>UPDATE</literal>
+ privilege on the referenced table (here <structname>users</structname>) to the
affected users, which might be undesirable. (But another row security
policy could be applied to prevent them from actually exercising that
- privilege; or the sub-<command>SELECT</> could be embedded into a security
+ privilege; or the sub-<command>SELECT</command> could be embedded into a security
definer function.) Also, heavy concurrent use of row share locks on the
referenced table could pose a performance problem, especially if updates
of it are frequent. Another solution, practical if updates of the
@@ -1977,19 +1977,19 @@ SELECT * FROM information WHERE group_id = 2 FOR UPDATE;
<para>
Users of a cluster do not necessarily have the privilege to access every
database in the cluster. Sharing of user names means that there
- cannot be different users named, say, <literal>joe</> in two databases
+ cannot be different users named, say, <literal>joe</literal> in two databases
in the same cluster; but the system can be configured to allow
- <literal>joe</> access to only some of the databases.
+ <literal>joe</literal> access to only some of the databases.
</para>
</note>
<para>
- A database contains one or more named <firstterm>schemas</>, which
+ A database contains one or more named <firstterm>schemas</firstterm>, which
in turn contain tables. Schemas also contain other kinds of named
objects, including data types, functions, and operators. The same
object name can be used in different schemas without conflict; for
- example, both <literal>schema1</> and <literal>myschema</> can
- contain tables named <literal>mytable</>. Unlike databases,
+ example, both <literal>schema1</literal> and <literal>myschema</literal> can
+ contain tables named <literal>mytable</literal>. Unlike databases,
schemas are not rigidly separated: a user can access objects in any
of the schemas in the database they are connected to, if they have
privileges to do so.
@@ -2053,10 +2053,10 @@ CREATE SCHEMA myschema;
<para>
To create or access objects in a schema, write a
- <firstterm>qualified name</> consisting of the schema name and
+ <firstterm>qualified name</firstterm> consisting of the schema name and
table name separated by a dot:
<synopsis>
-<replaceable>schema</><literal>.</><replaceable>table</>
+<replaceable>schema</replaceable><literal>.</literal><replaceable>table</replaceable>
</synopsis>
This works anywhere a table name is expected, including the table
modification commands and the data access commands discussed in
@@ -2068,10 +2068,10 @@ CREATE SCHEMA myschema;
<para>
Actually, the even more general syntax
<synopsis>
-<replaceable>database</><literal>.</><replaceable>schema</><literal>.</><replaceable>table</>
+<replaceable>database</replaceable><literal>.</literal><replaceable>schema</replaceable><literal>.</literal><replaceable>table</replaceable>
</synopsis>
can be used too, but at present this is just for <foreignphrase>pro
- forma</> compliance with the SQL standard. If you write a database name,
+ forma</foreignphrase> compliance with the SQL standard. If you write a database name,
it must be the same as the database you are connected to.
</para>
@@ -2116,7 +2116,7 @@ CREATE SCHEMA <replaceable>schema_name</replaceable> AUTHORIZATION <replaceable>
</para>
<para>
- Schema names beginning with <literal>pg_</> are reserved for
+ Schema names beginning with <literal>pg_</literal> are reserved for
system purposes and cannot be created by users.
</para>
</sect2>
@@ -2163,9 +2163,9 @@ CREATE TABLE public.products ( ... );
<para>
Qualified names are tedious to write, and it's often best not to
wire a particular schema name into applications anyway. Therefore
- tables are often referred to by <firstterm>unqualified names</>,
+ tables are often referred to by <firstterm>unqualified names</firstterm>,
which consist of just the table name. The system determines which table
- is meant by following a <firstterm>search path</>, which is a list
+ is meant by following a <firstterm>search path</firstterm>, which is a list
of schemas to look in. The first matching table in the search path
is taken to be the one wanted. If there is no match in the search
path, an error is reported, even if matching table names exist
@@ -2180,7 +2180,7 @@ CREATE TABLE public.products ( ... );
<para>
The first schema named in the search path is called the current schema.
Aside from being the first schema searched, it is also the schema in
- which new tables will be created if the <command>CREATE TABLE</>
+ which new tables will be created if the <command>CREATE TABLE</command>
command does not specify a schema name.
</para>
@@ -2253,7 +2253,7 @@ SET search_path TO myschema;
need to write a qualified operator name in an expression, there is a
special provision: you must write
<synopsis>
-<literal>OPERATOR(</><replaceable>schema</><literal>.</><replaceable>operator</><literal>)</>
+<literal>OPERATOR(</literal><replaceable>schema</replaceable><literal>.</literal><replaceable>operator</replaceable><literal>)</literal>
</synopsis>
This is needed to avoid syntactic ambiguity. An example is:
<programlisting>
@@ -2310,28 +2310,28 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC;
</indexterm>
<para>
- In addition to <literal>public</> and user-created schemas, each
- database contains a <literal>pg_catalog</> schema, which contains
+ In addition to <literal>public</literal> and user-created schemas, each
+ database contains a <literal>pg_catalog</literal> schema, which contains
the system tables and all the built-in data types, functions, and
- operators. <literal>pg_catalog</> is always effectively part of
+ operators. <literal>pg_catalog</literal> is always effectively part of
the search path. If it is not named explicitly in the path then
- it is implicitly searched <emphasis>before</> searching the path's
+ it is implicitly searched <emphasis>before</emphasis> searching the path's
schemas. This ensures that built-in names will always be
findable. However, you can explicitly place
- <literal>pg_catalog</> at the end of your search path if you
+ <literal>pg_catalog</literal> at the end of your search path if you
prefer to have user-defined names override built-in names.
</para>
<para>
- Since system table names begin with <literal>pg_</>, it is best to
+ Since system table names begin with <literal>pg_</literal>, it is best to
avoid such names to ensure that you won't suffer a conflict if some
future version defines a system table named the same as your
table. (With the default search path, an unqualified reference to
your table name would then be resolved as the system table instead.)
System tables will continue to follow the convention of having
- names beginning with <literal>pg_</>, so that they will not
+ names beginning with <literal>pg_</literal>, so that they will not
conflict with unqualified user-table names so long as users avoid
- the <literal>pg_</> prefix.
+ the <literal>pg_</literal> prefix.
</para>
</sect2>
@@ -2397,15 +2397,15 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC;
implements only the basic schema support specified in the
standard. Therefore, many users consider qualified names to
really consist of
- <literal><replaceable>user_name</>.<replaceable>table_name</></literal>.
+ <literal><replaceable>user_name</replaceable>.<replaceable>table_name</replaceable></literal>.
This is how <productname>PostgreSQL</productname> will effectively
behave if you create a per-user schema for every user.
</para>
<para>
- Also, there is no concept of a <literal>public</> schema in the
+ Also, there is no concept of a <literal>public</literal> schema in the
SQL standard. For maximum conformance to the standard, you should
- not use (perhaps even remove) the <literal>public</> schema.
+ not use (perhaps even remove) the <literal>public</literal> schema.
</para>
<para>
@@ -2461,9 +2461,9 @@ CREATE TABLE capitals (
) INHERITS (cities);
</programlisting>
- In this case, the <structname>capitals</> table <firstterm>inherits</>
- all the columns of its parent table, <structname>cities</>. State
- capitals also have an extra column, <structfield>state</>, that shows
+ In this case, the <structname>capitals</structname> table <firstterm>inherits</firstterm>
+ all the columns of its parent table, <structname>cities</structname>. State
+ capitals also have an extra column, <structfield>state</structfield>, that shows
their state.
</para>
@@ -2521,7 +2521,7 @@ SELECT name, altitude
</para>
<para>
- You can also write the table name with a trailing <literal>*</>
+ You can also write the table name with a trailing <literal>*</literal>
to explicitly specify that descendant tables are included:
<programlisting>
@@ -2530,7 +2530,7 @@ SELECT name, altitude
WHERE altitude &gt; 500;
</programlisting>
- Writing <literal>*</> is not necessary, since this behavior is always
+ Writing <literal>*</literal> is not necessary, since this behavior is always
the default. However, this syntax is still supported for
compatibility with older releases where the default could be changed.
</para>
@@ -2559,7 +2559,7 @@ WHERE c.altitude &gt; 500;
(If you try to reproduce this example, you will probably get
different numeric OIDs.) By doing a join with
- <structname>pg_class</> you can see the actual table names:
+ <structname>pg_class</structname> you can see the actual table names:
<programlisting>
SELECT p.relname, c.name, c.altitude
@@ -2579,7 +2579,7 @@ WHERE c.altitude &gt; 500 AND c.tableoid = p.oid;
</para>
<para>
- Another way to get the same effect is to use the <type>regclass</>
+ Another way to get the same effect is to use the <type>regclass</type>
alias type, which will print the table OID symbolically:
<programlisting>
@@ -2603,15 +2603,15 @@ VALUES ('Albany', NULL, NULL, 'NY');
<command>INSERT</command> always inserts into exactly the table
specified. In some cases it is possible to redirect the insertion
using a rule (see <xref linkend="rules">). However that does not
- help for the above case because the <structname>cities</> table
- does not contain the column <structfield>state</>, and so the
+ help for the above case because the <structname>cities</structname> table
+ does not contain the column <structfield>state</structfield>, and so the
command will be rejected before the rule can be applied.
</para>
<para>
All check constraints and not-null constraints on a parent table are
automatically inherited by its children, unless explicitly specified
- otherwise with <literal>NO INHERIT</> clauses. Other types of constraints
+ otherwise with <literal>NO INHERIT</literal> clauses. Other types of constraints
(unique, primary key, and foreign key constraints) are not inherited.
</para>
@@ -2620,7 +2620,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
the union of the columns defined by the parent tables. Any columns
declared in the child table's definition are added to these. If the
same column name appears in multiple parent tables, or in both a parent
- table and the child's definition, then these columns are <quote>merged</>
+ table and the child's definition, then these columns are <quote>merged</quote>
so that there is only one such column in the child table. To be merged,
columns must have the same data types, else an error is raised.
Inheritable check constraints and not-null constraints are merged in a
@@ -2632,7 +2632,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
<para>
Table inheritance is typically established when the child table is
- created, using the <literal>INHERITS</> clause of the
+ created, using the <literal>INHERITS</literal> clause of the
<xref linkend="sql-createtable">
statement.
Alternatively, a table which is already defined in a compatible way can
@@ -2642,7 +2642,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
the same names and types as the columns of the parent. It must also include
check constraints with the same names and check expressions as those of the
parent. Similarly an inheritance link can be removed from a child using the
- <literal>NO INHERIT</literal> variant of <command>ALTER TABLE</>.
+ <literal>NO INHERIT</literal> variant of <command>ALTER TABLE</command>.
Dynamically adding and removing inheritance links like this can be useful
when the inheritance relationship is being used for table
partitioning (see <xref linkend="ddl-partitioning">).
@@ -2680,10 +2680,10 @@ VALUES ('Albany', NULL, NULL, 'NY');
<para>
Inherited queries perform access permission checks on the parent table
- only. Thus, for example, granting <literal>UPDATE</> permission on
- the <structname>cities</> table implies permission to update rows in
+ only. Thus, for example, granting <literal>UPDATE</literal> permission on
+ the <structname>cities</structname> table implies permission to update rows in
the <structname>capitals</structname> table as well, when they are
- accessed through <structname>cities</>. This preserves the appearance
+ accessed through <structname>cities</structname>. This preserves the appearance
that the data is (also) in the parent table. But
the <structname>capitals</structname> table could not be updated directly
without an additional grant. In a similar way, the parent table's row
@@ -2732,33 +2732,33 @@ VALUES ('Albany', NULL, NULL, 'NY');
<itemizedlist>
<listitem>
<para>
- If we declared <structname>cities</>.<structfield>name</> to be
- <literal>UNIQUE</> or a <literal>PRIMARY KEY</>, this would not stop the
- <structname>capitals</> table from having rows with names duplicating
- rows in <structname>cities</>. And those duplicate rows would by
- default show up in queries from <structname>cities</>. In fact, by
- default <structname>capitals</> would have no unique constraint at all,
+ If we declared <structname>cities</structname>.<structfield>name</structfield> to be
+ <literal>UNIQUE</literal> or a <literal>PRIMARY KEY</literal>, this would not stop the
+ <structname>capitals</structname> table from having rows with names duplicating
+ rows in <structname>cities</structname>. And those duplicate rows would by
+ default show up in queries from <structname>cities</structname>. In fact, by
+ default <structname>capitals</structname> would have no unique constraint at all,
and so could contain multiple rows with the same name.
- You could add a unique constraint to <structname>capitals</>, but this
- would not prevent duplication compared to <structname>cities</>.
+ You could add a unique constraint to <structname>capitals</structname>, but this
+ would not prevent duplication compared to <structname>cities</structname>.
</para>
</listitem>
<listitem>
<para>
Similarly, if we were to specify that
- <structname>cities</>.<structfield>name</> <literal>REFERENCES</> some
+ <structname>cities</structname>.<structfield>name</structfield> <literal>REFERENCES</literal> some
other table, this constraint would not automatically propagate to
- <structname>capitals</>. In this case you could work around it by
- manually adding the same <literal>REFERENCES</> constraint to
- <structname>capitals</>.
+ <structname>capitals</structname>. In this case you could work around it by
+ manually adding the same <literal>REFERENCES</literal> constraint to
+ <structname>capitals</structname>.
</para>
</listitem>
<listitem>
<para>
Specifying that another table's column <literal>REFERENCES
- cities(name)</> would allow the other table to contain city names, but
+ cities(name)</literal> would allow the other table to contain city names, but
not capital names. There is no good workaround for this case.
</para>
</listitem>
@@ -2825,10 +2825,10 @@ VALUES ('Albany', NULL, NULL, 'NY');
<para>
Bulk loads and deletes can be accomplished by adding or removing
partitions, if that requirement is planned into the partitioning design.
- Doing <command>ALTER TABLE DETACH PARTITION</> or dropping an individual
- partition using <command>DROP TABLE</> is far faster than a bulk
+ Doing <command>ALTER TABLE DETACH PARTITION</command> or dropping an individual
+ partition using <command>DROP TABLE</command> is far faster than a bulk
operation. These commands also entirely avoid the
- <command>VACUUM</command> overhead caused by a bulk <command>DELETE</>.
+ <command>VACUUM</command> overhead caused by a bulk <command>DELETE</command>.
</para>
</listitem>
@@ -2921,7 +2921,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
containing data as a partition of a partitioned table, or remove a
partition from a partitioned table turning it into a standalone table;
see <xref linkend="sql-altertable"> to learn more about the
- <command>ATTACH PARTITION</> and <command>DETACH PARTITION</>
+ <command>ATTACH PARTITION</command> and <command>DETACH PARTITION</command>
sub-commands.
</para>
@@ -2968,9 +2968,9 @@ VALUES ('Albany', NULL, NULL, 'NY');
<para>
Partitions cannot have columns that are not present in the parent. It
is neither possible to specify columns when creating partitions with
- <command>CREATE TABLE</> nor is it possible to add columns to
- partitions after-the-fact using <command>ALTER TABLE</>. Tables may be
- added as a partition with <command>ALTER TABLE ... ATTACH PARTITION</>
+ <command>CREATE TABLE</command> nor is it possible to add columns to
+ partitions after-the-fact using <command>ALTER TABLE</command>. Tables may be
+ added as a partition with <command>ALTER TABLE ... ATTACH PARTITION</command>
only if their columns exactly match the parent, including any
<literal>oid</literal> column.
</para>
@@ -3049,7 +3049,7 @@ CREATE TABLE measurement (
accessing the partitioned table will have to scan fewer partitions if
the conditions involve some or all of these columns.
For example, consider a table range partitioned using columns
- <structfield>lastname</> and <structfield>firstname</> (in that order)
+ <structfield>lastname</structfield> and <structfield>firstname</structfield> (in that order)
as the partition key.
</para>
</listitem>
@@ -3067,7 +3067,7 @@ CREATE TABLE measurement (
<para>
Partitions thus created are in every way normal
- <productname>PostgreSQL</>
+ <productname>PostgreSQL</productname>
tables (or, possibly, foreign tables). It is possible to specify a
tablespace and storage parameters for each partition separately.
</para>
@@ -3111,12 +3111,12 @@ CREATE TABLE measurement_y2006m02 PARTITION OF measurement
PARTITION BY RANGE (peaktemp);
</programlisting>
- After creating partitions of <structname>measurement_y2006m02</>,
- any data inserted into <structname>measurement</> that is mapped to
- <structname>measurement_y2006m02</> (or data that is directly inserted
- into <structname>measurement_y2006m02</>, provided it satisfies its
+ After creating partitions of <structname>measurement_y2006m02</structname>,
+ any data inserted into <structname>measurement</structname> that is mapped to
+ <structname>measurement_y2006m02</structname> (or data that is directly inserted
+ into <structname>measurement_y2006m02</structname>, provided it satisfies its
partition constraint) will be further redirected to one of its
- partitions based on the <structfield>peaktemp</> column. The partition
+ partitions based on the <structfield>peaktemp</structfield> column. The partition
key specified may overlap with the parent's partition key, although
care should be taken when specifying the bounds of a sub-partition
such that the set of data it accepts constitutes a subset of what
@@ -3147,7 +3147,7 @@ CREATE INDEX ON measurement_y2008m01 (logdate);
<listitem>
<para>
Ensure that the <xref linkend="guc-constraint-exclusion">
- configuration parameter is not disabled in <filename>postgresql.conf</>.
+ configuration parameter is not disabled in <filename>postgresql.conf</filename>.
If it is, queries will not be optimized as desired.
</para>
</listitem>
@@ -3197,7 +3197,7 @@ ALTER TABLE measurement DETACH PARTITION measurement_y2006m02;
This allows further operations to be performed on the data before
it is dropped. For example, this is often a useful time to back up
- the data using <command>COPY</>, <application>pg_dump</>, or
+ the data using <command>COPY</command>, <application>pg_dump</application>, or
similar tools. It might also be a useful time to aggregate data
into smaller formats, perform other data manipulations, or run
reports.
@@ -3236,14 +3236,14 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
</para>
<para>
- Before running the <command>ATTACH PARTITION</> command, it is
- recommended to create a <literal>CHECK</> constraint on the table to
+ Before running the <command>ATTACH PARTITION</command> command, it is
+ recommended to create a <literal>CHECK</literal> constraint on the table to
be attached describing the desired partition constraint. That way,
the system will be able to skip the scan to validate the implicit
partition constraint. Without such a constraint, the table will be
scanned to validate the partition constraint while holding an
<literal>ACCESS EXCLUSIVE</literal> lock on the parent table.
- One may then drop the constraint after <command>ATTACH PARTITION</>
+ One may then drop the constraint after <command>ATTACH PARTITION</command>
is finished, because it is no longer necessary.
</para>
</sect3>
@@ -3285,7 +3285,7 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
<listitem>
<para>
- An <command>UPDATE</> that causes a row to move from one partition to
+ An <command>UPDATE</command> that causes a row to move from one partition to
another fails, because the new value of the row fails to satisfy the
implicit partition constraint of the original partition.
</para>
@@ -3376,7 +3376,7 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
the master table. Normally, these tables will not add any columns
to the set inherited from the master. Just as with declarative
partitioning, these partitions are in every way normal
- <productname>PostgreSQL</> tables (or foreign tables).
+ <productname>PostgreSQL</productname> tables (or foreign tables).
</para>
<para>
@@ -3460,7 +3460,7 @@ CREATE INDEX measurement_y2008m01_logdate ON measurement_y2008m01 (logdate);
<listitem>
<para>
We want our application to be able to say <literal>INSERT INTO
- measurement ...</> and have the data be redirected into the
+ measurement ...</literal> and have the data be redirected into the
appropriate partition table. We can arrange that by attaching
a suitable trigger function to the master table.
If data will be added only to the latest partition, we can
@@ -3567,9 +3567,9 @@ DO INSTEAD
</para>
<para>
- Be aware that <command>COPY</> ignores rules. If you want to
- use <command>COPY</> to insert data, you'll need to copy into the
- correct partition table rather than into the master. <command>COPY</>
+ Be aware that <command>COPY</command> ignores rules. If you want to
+ use <command>COPY</command> to insert data, you'll need to copy into the
+ correct partition table rather than into the master. <command>COPY</command>
does fire triggers, so you can use it normally if you use the trigger
approach.
</para>
@@ -3585,7 +3585,7 @@ DO INSTEAD
<para>
Ensure that the <xref linkend="guc-constraint-exclusion">
configuration parameter is not disabled in
- <filename>postgresql.conf</>.
+ <filename>postgresql.conf</filename>.
If it is, queries will not be optimized as desired.
</para>
</listitem>
@@ -3666,8 +3666,8 @@ ALTER TABLE measurement_y2008m02 INHERIT measurement;
<para>
The schemes shown here assume that the partition key column(s)
of a row never change, or at least do not change enough to require
- it to move to another partition. An <command>UPDATE</> that attempts
- to do that will fail because of the <literal>CHECK</> constraints.
+ it to move to another partition. An <command>UPDATE</command> that attempts
+ to do that will fail because of the <literal>CHECK</literal> constraints.
If you need to handle such cases, you can put suitable update triggers
on the partition tables, but it makes management of the structure
much more complicated.
@@ -3688,8 +3688,8 @@ ANALYZE measurement;
<listitem>
<para>
- <command>INSERT</command> statements with <literal>ON CONFLICT</>
- clauses are unlikely to work as expected, as the <literal>ON CONFLICT</>
+ <command>INSERT</command> statements with <literal>ON CONFLICT</literal>
+ clauses are unlikely to work as expected, as the <literal>ON CONFLICT</literal>
action is only taken in case of unique violations on the specified
target relation, not its child relations.
</para>
@@ -3717,7 +3717,7 @@ ANALYZE measurement;
</indexterm>
<para>
- <firstterm>Constraint exclusion</> is a query optimization technique
+ <firstterm>Constraint exclusion</firstterm> is a query optimization technique
that improves performance for partitioned tables defined in the
fashion described above (both declaratively partitioned tables and those
implemented using inheritance). As an example:
@@ -3728,17 +3728,17 @@ SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
</programlisting>
Without constraint exclusion, the above query would scan each of
- the partitions of the <structname>measurement</> table. With constraint
+ the partitions of the <structname>measurement</structname> table. With constraint
exclusion enabled, the planner will examine the constraints of each
partition and try to prove that the partition need not
be scanned because it could not contain any rows meeting the query's
- <literal>WHERE</> clause. When the planner can prove this, it
+ <literal>WHERE</literal> clause. When the planner can prove this, it
excludes the partition from the query plan.
</para>
<para>
- You can use the <command>EXPLAIN</> command to show the difference
- between a plan with <varname>constraint_exclusion</> on and a plan
+ You can use the <command>EXPLAIN</command> command to show the difference
+ between a plan with <varname>constraint_exclusion</varname> on and a plan
with it off. A typical unoptimized plan for this type of table setup is:
<programlisting>
@@ -3783,7 +3783,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
</para>
<para>
- Note that constraint exclusion is driven only by <literal>CHECK</>
+ Note that constraint exclusion is driven only by <literal>CHECK</literal>
constraints, not by the presence of indexes. Therefore it isn't
necessary to define indexes on the key columns. Whether an index
needs to be created for a given partition depends on whether you
@@ -3795,11 +3795,11 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<para>
The default (and recommended) setting of
<xref linkend="guc-constraint-exclusion"> is actually neither
- <literal>on</> nor <literal>off</>, but an intermediate setting
- called <literal>partition</>, which causes the technique to be
+ <literal>on</literal> nor <literal>off</literal>, but an intermediate setting
+ called <literal>partition</literal>, which causes the technique to be
applied only to queries that are likely to be working on partitioned
- tables. The <literal>on</> setting causes the planner to examine
- <literal>CHECK</> constraints in all queries, even simple ones that
+ tables. The <literal>on</literal> setting causes the planner to examine
+ <literal>CHECK</literal> constraints in all queries, even simple ones that
are unlikely to benefit.
</para>
@@ -3810,7 +3810,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<itemizedlist>
<listitem>
<para>
- Constraint exclusion only works when the query's <literal>WHERE</>
+ Constraint exclusion only works when the query's <literal>WHERE</literal>
clause contains constants (or externally supplied parameters).
For example, a comparison against a non-immutable function such as
<function>CURRENT_TIMESTAMP</function> cannot be optimized, since the
@@ -3867,7 +3867,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<productname>PostgreSQL</productname> implements portions of the SQL/MED
specification, allowing you to access data that resides outside
PostgreSQL using regular SQL queries. Such data is referred to as
- <firstterm>foreign data</>. (Note that this usage is not to be confused
+ <firstterm>foreign data</firstterm>. (Note that this usage is not to be confused
with foreign keys, which are a type of constraint within the database.)
</para>
@@ -3876,7 +3876,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<firstterm>foreign data wrapper</firstterm>. A foreign data wrapper is a
library that can communicate with an external data source, hiding the
details of connecting to the data source and obtaining data from it.
- There are some foreign data wrappers available as <filename>contrib</>
+ There are some foreign data wrappers available as <filename>contrib</filename>
modules; see <xref linkend="contrib">. Other kinds of foreign data
wrappers might be found as third party products. If none of the existing
foreign data wrappers suit your needs, you can write your own; see <xref
@@ -3884,7 +3884,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
</para>
<para>
- To access foreign data, you need to create a <firstterm>foreign server</>
+ To access foreign data, you need to create a <firstterm>foreign server</firstterm>
object, which defines how to connect to a particular external data source
according to the set of options used by its supporting foreign data
wrapper. Then you need to create one or more <firstterm>foreign
@@ -3899,7 +3899,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<para>
Accessing remote data may require authenticating to the external
data source. This information can be provided by a
- <firstterm>user mapping</>, which can provide additional data
+ <firstterm>user mapping</firstterm>, which can provide additional data
such as user names and passwords based
on the current <productname>PostgreSQL</productname> role.
</para>
@@ -4002,13 +4002,13 @@ DROP TABLE products CASCADE;
that depend on them, recursively. In this case, it doesn't remove
the orders table, it only removes the foreign key constraint.
It stops there because nothing depends on the foreign key constraint.
- (If you want to check what <command>DROP ... CASCADE</> will do,
- run <command>DROP</> without <literal>CASCADE</> and read the
- <literal>DETAIL</> output.)
+ (If you want to check what <command>DROP ... CASCADE</command> will do,
+ run <command>DROP</command> without <literal>CASCADE</literal> and read the
+ <literal>DETAIL</literal> output.)
</para>
<para>
- Almost all <command>DROP</> commands in <productname>PostgreSQL</> support
+ Almost all <command>DROP</command> commands in <productname>PostgreSQL</productname> support
specifying <literal>CASCADE</literal>. Of course, the nature of
the possible dependencies varies with the type of the object. You
can also write <literal>RESTRICT</literal> instead of
@@ -4020,7 +4020,7 @@ DROP TABLE products CASCADE;
<para>
According to the SQL standard, specifying either
<literal>RESTRICT</literal> or <literal>CASCADE</literal> is
- required in a <command>DROP</> command. No database system actually
+ required in a <command>DROP</command> command. No database system actually
enforces that rule, but whether the default behavior
is <literal>RESTRICT</literal> or <literal>CASCADE</literal> varies
across systems.
@@ -4028,18 +4028,18 @@ DROP TABLE products CASCADE;
</note>
<para>
- If a <command>DROP</> command lists multiple
+ If a <command>DROP</command> command lists multiple
objects, <literal>CASCADE</literal> is only required when there are
dependencies outside the specified group. For example, when saying
<literal>DROP TABLE tab1, tab2</literal> the existence of a foreign
- key referencing <literal>tab1</> from <literal>tab2</> would not mean
+ key referencing <literal>tab1</literal> from <literal>tab2</literal> would not mean
that <literal>CASCADE</literal> is needed to succeed.
</para>
<para>
For user-defined functions, <productname>PostgreSQL</productname> tracks
dependencies associated with a function's externally-visible properties,
- such as its argument and result types, but <emphasis>not</> dependencies
+ such as its argument and result types, but <emphasis>not</emphasis> dependencies
that could only be known by examining the function body. As an example,
consider this situation:
@@ -4056,11 +4056,11 @@ CREATE FUNCTION get_color_note (rainbow) RETURNS text AS
(See <xref linkend="xfunc-sql"> for an explanation of SQL-language
functions.) <productname>PostgreSQL</productname> will be aware that
- the <function>get_color_note</> function depends on the <type>rainbow</>
+ the <function>get_color_note</function> function depends on the <type>rainbow</type>
type: dropping the type would force dropping the function, because its
- argument type would no longer be defined. But <productname>PostgreSQL</>
- will not consider <function>get_color_note</> to depend on
- the <structname>my_colors</> table, and so will not drop the function if
+ argument type would no longer be defined. But <productname>PostgreSQL</productname>
+ will not consider <function>get_color_note</function> to depend on
+ the <structname>my_colors</structname> table, and so will not drop the function if
the table is dropped. While there are disadvantages to this approach,
there are also benefits. The function is still valid in some sense if the
table is missing, though executing it would cause an error; creating a new
diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml
index 23af270e32..7ef996b51f 100644
--- a/doc/src/sgml/dfunc.sgml
+++ b/doc/src/sgml/dfunc.sgml
@@ -9,7 +9,7 @@
C, they must be compiled and linked in a special way to produce a
file that can be dynamically loaded by the server. To be precise, a
<firstterm>shared library</firstterm> needs to be
- created.<indexterm><primary>shared library</></indexterm>
+ created.<indexterm><primary>shared library</primary></indexterm>
</para>
@@ -30,7 +30,7 @@
executables: first the source files are compiled into object files,
then the object files are linked together. The object files need to
be created as <firstterm>position-independent code</firstterm>
- (<acronym>PIC</acronym>),<indexterm><primary>PIC</></> which
+ (<acronym>PIC</acronym>),<indexterm><primary>PIC</primary></indexterm> which
conceptually means that they can be placed at an arbitrary location
in memory when they are loaded by the executable. (Object files
intended for executables are usually not compiled that way.) The
@@ -57,8 +57,8 @@
<variablelist>
<varlistentry>
<term>
- <systemitem class="osname">FreeBSD</>
- <indexterm><primary>FreeBSD</><secondary>shared library</></>
+ <systemitem class="osname">FreeBSD</systemitem>
+ <indexterm><primary>FreeBSD</primary><secondary>shared library</secondary></indexterm>
</term>
<listitem>
<para>
@@ -70,15 +70,15 @@ gcc -fPIC -c foo.c
gcc -shared -o foo.so foo.o
</programlisting>
This is applicable as of version 3.0 of
- <systemitem class="osname">FreeBSD</>.
+ <systemitem class="osname">FreeBSD</systemitem>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
- <systemitem class="osname">HP-UX</>
- <indexterm><primary>HP-UX</><secondary>shared library</></>
+ <systemitem class="osname">HP-UX</systemitem>
+ <indexterm><primary>HP-UX</primary><secondary>shared library</secondary></indexterm>
</term>
<listitem>
<para>
@@ -97,7 +97,7 @@ gcc -fPIC -c foo.c
<programlisting>
ld -b -o foo.sl foo.o
</programlisting>
- <systemitem class="osname">HP-UX</> uses the extension
+ <systemitem class="osname">HP-UX</systemitem> uses the extension
<filename>.sl</filename> for shared libraries, unlike most other
systems.
</para>
@@ -106,8 +106,8 @@ ld -b -o foo.sl foo.o
<varlistentry>
<term>
- <systemitem class="osname">Linux</>
- <indexterm><primary>Linux</><secondary>shared library</></>
+ <systemitem class="osname">Linux</systemitem>
+ <indexterm><primary>Linux</primary><secondary>shared library</secondary></indexterm>
</term>
<listitem>
<para>
@@ -125,8 +125,8 @@ cc -shared -o foo.so foo.o
<varlistentry>
<term>
- <systemitem class="osname">macOS</>
- <indexterm><primary>macOS</><secondary>shared library</></>
+ <systemitem class="osname">macOS</systemitem>
+ <indexterm><primary>macOS</primary><secondary>shared library</secondary></indexterm>
</term>
<listitem>
<para>
@@ -141,8 +141,8 @@ cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o
<varlistentry>
<term>
- <systemitem class="osname">NetBSD</>
- <indexterm><primary>NetBSD</><secondary>shared library</></>
+ <systemitem class="osname">NetBSD</systemitem>
+ <indexterm><primary>NetBSD</primary><secondary>shared library</secondary></indexterm>
</term>
<listitem>
<para>
@@ -161,8 +161,8 @@ gcc -shared -o foo.so foo.o
<varlistentry>
<term>
- <systemitem class="osname">OpenBSD</>
- <indexterm><primary>OpenBSD</><secondary>shared library</></>
+ <systemitem class="osname">OpenBSD</systemitem>
+ <indexterm><primary>OpenBSD</primary><secondary>shared library</secondary></indexterm>
</term>
<listitem>
<para>
@@ -179,17 +179,17 @@ ld -Bshareable -o foo.so foo.o
<varlistentry>
<term>
- <systemitem class="osname">Solaris</>
- <indexterm><primary>Solaris</><secondary>shared library</></>
+ <systemitem class="osname">Solaris</systemitem>
+ <indexterm><primary>Solaris</primary><secondary>shared library</secondary></indexterm>
</term>
<listitem>
<para>
The compiler flag to create <acronym>PIC</acronym> is
<option>-KPIC</option> with the Sun compiler and
- <option>-fPIC</option> with <application>GCC</>. To
+ <option>-fPIC</option> with <application>GCC</application>. To
link shared libraries, the compiler option is
<option>-G</option> with either compiler or alternatively
- <option>-shared</option> with <application>GCC</>.
+ <option>-shared</option> with <application>GCC</application>.
<programlisting>
cc -KPIC -c foo.c
cc -G -o foo.so foo.o
diff --git a/doc/src/sgml/dict-int.sgml b/doc/src/sgml/dict-int.sgml
index d49f3e2a3a..04cf14a73d 100644
--- a/doc/src/sgml/dict-int.sgml
+++ b/doc/src/sgml/dict-int.sgml
@@ -8,7 +8,7 @@
</indexterm>
<para>
- <filename>dict_int</> is an example of an add-on dictionary template
+ <filename>dict_int</filename> is an example of an add-on dictionary template
for full-text search. The motivation for this example dictionary is to
control the indexing of integers (signed and unsigned), allowing such
numbers to be indexed while preventing excessive growth in the number of
@@ -25,17 +25,17 @@
<itemizedlist>
<listitem>
<para>
- The <literal>maxlen</> parameter specifies the maximum number of
+ The <literal>maxlen</literal> parameter specifies the maximum number of
digits allowed in an integer word. The default value is 6.
</para>
</listitem>
<listitem>
<para>
- The <literal>rejectlong</> parameter specifies whether an overlength
- integer should be truncated or ignored. If <literal>rejectlong</> is
- <literal>false</> (the default), the dictionary returns the first
- <literal>maxlen</> digits of the integer. If <literal>rejectlong</> is
- <literal>true</>, the dictionary treats an overlength integer as a stop
+ The <literal>rejectlong</literal> parameter specifies whether an overlength
+ integer should be truncated or ignored. If <literal>rejectlong</literal> is
+ <literal>false</literal> (the default), the dictionary returns the first
+ <literal>maxlen</literal> digits of the integer. If <literal>rejectlong</literal> is
+ <literal>true</literal>, the dictionary treats an overlength integer as a stop
word, so that it will not be indexed. Note that this also means that
such an integer cannot be searched for.
</para>
@@ -47,8 +47,8 @@
<title>Usage</title>
<para>
- Installing the <literal>dict_int</> extension creates a text search
- template <literal>intdict_template</> and a dictionary <literal>intdict</>
+ Installing the <literal>dict_int</literal> extension creates a text search
+ template <literal>intdict_template</literal> and a dictionary <literal>intdict</literal>
based on it, with the default parameters. You can alter the
parameters, for example
diff --git a/doc/src/sgml/dict-xsyn.sgml b/doc/src/sgml/dict-xsyn.sgml
index 42362ffbc8..bf4965c36f 100644
--- a/doc/src/sgml/dict-xsyn.sgml
+++ b/doc/src/sgml/dict-xsyn.sgml
@@ -8,7 +8,7 @@
</indexterm>
<para>
- <filename>dict_xsyn</> (Extended Synonym Dictionary) is an example of an
+ <filename>dict_xsyn</filename> (Extended Synonym Dictionary) is an example of an
add-on dictionary template for full-text search. This dictionary type
replaces words with groups of their synonyms, and so makes it possible to
search for a word using any of its synonyms.
@@ -18,41 +18,41 @@
<title>Configuration</title>
<para>
- A <literal>dict_xsyn</> dictionary accepts the following options:
+ A <literal>dict_xsyn</literal> dictionary accepts the following options:
</para>
<itemizedlist>
<listitem>
<para>
- <literal>matchorig</> controls whether the original word is accepted by
- the dictionary. Default is <literal>true</>.
+ <literal>matchorig</literal> controls whether the original word is accepted by
+ the dictionary. Default is <literal>true</literal>.
</para>
</listitem>
<listitem>
<para>
- <literal>matchsynonyms</> controls whether the synonyms are
- accepted by the dictionary. Default is <literal>false</>.
+ <literal>matchsynonyms</literal> controls whether the synonyms are
+ accepted by the dictionary. Default is <literal>false</literal>.
</para>
</listitem>
<listitem>
<para>
- <literal>keeporig</> controls whether the original word is included in
- the dictionary's output. Default is <literal>true</>.
+ <literal>keeporig</literal> controls whether the original word is included in
+ the dictionary's output. Default is <literal>true</literal>.
</para>
</listitem>
<listitem>
<para>
- <literal>keepsynonyms</> controls whether the synonyms are included in
- the dictionary's output. Default is <literal>true</>.
+ <literal>keepsynonyms</literal> controls whether the synonyms are included in
+ the dictionary's output. Default is <literal>true</literal>.
</para>
</listitem>
<listitem>
<para>
- <literal>rules</> is the base name of the file containing the list of
+ <literal>rules</literal> is the base name of the file containing the list of
synonyms. This file must be stored in
- <filename>$SHAREDIR/tsearch_data/</> (where <literal>$SHAREDIR</> means
- the <productname>PostgreSQL</> installation's shared-data directory).
- Its name must end in <literal>.rules</> (which is not to be included in
- the <literal>rules</> parameter).
+ <filename>$SHAREDIR/tsearch_data/</filename> (where <literal>$SHAREDIR</literal> means
+ the <productname>PostgreSQL</productname> installation's shared-data directory).
+ Its name must end in <literal>.rules</literal> (which is not to be included in
+ the <literal>rules</literal> parameter).
</para>
</listitem>
</itemizedlist>
@@ -71,15 +71,15 @@ word syn1 syn2 syn3
</listitem>
<listitem>
<para>
- The sharp (<literal>#</>) sign is a comment delimiter. It may appear at
+ The sharp (<literal>#</literal>) sign is a comment delimiter. It may appear at
any position in a line. The rest of the line will be skipped.
</para>
</listitem>
</itemizedlist>
<para>
- Look at <filename>xsyn_sample.rules</>, which is installed in
- <filename>$SHAREDIR/tsearch_data/</>, for an example.
+ Look at <filename>xsyn_sample.rules</filename>, which is installed in
+ <filename>$SHAREDIR/tsearch_data/</filename>, for an example.
</para>
</sect2>
@@ -87,8 +87,8 @@ word syn1 syn2 syn3
<title>Usage</title>
<para>
- Installing the <literal>dict_xsyn</> extension creates a text search
- template <literal>xsyn_template</> and a dictionary <literal>xsyn</>
+ Installing the <literal>dict_xsyn</literal> extension creates a text search
+ template <literal>xsyn_template</literal> and a dictionary <literal>xsyn</literal>
based on it, with default parameters. You can alter the
parameters, for example
diff --git a/doc/src/sgml/diskusage.sgml b/doc/src/sgml/diskusage.sgml
index 461deb9dba..ba23084354 100644
--- a/doc/src/sgml/diskusage.sgml
+++ b/doc/src/sgml/diskusage.sgml
@@ -5,7 +5,7 @@
<para>
This chapter discusses how to monitor the disk usage of a
- <productname>PostgreSQL</> database system.
+ <productname>PostgreSQL</productname> database system.
</para>
<sect1 id="disk-usage">
@@ -18,10 +18,10 @@
<para>
Each table has a primary heap disk file where most of the data is
stored. If the table has any columns with potentially-wide values,
- there also might be a <acronym>TOAST</> file associated with the table,
+ there also might be a <acronym>TOAST</acronym> file associated with the table,
which is used to store values too wide to fit comfortably in the main
table (see <xref linkend="storage-toast">). There will be one valid index
- on the <acronym>TOAST</> table, if present. There also might be indexes
+ on the <acronym>TOAST</acronym> table, if present. There also might be indexes
associated with the base table. Each table and index is stored in a
separate disk file &mdash; possibly more than one file, if the file would
exceed one gigabyte. Naming conventions for these files are described
@@ -39,7 +39,7 @@
</para>
<para>
- Using <application>psql</> on a recently vacuumed or analyzed database,
+ Using <application>psql</application> on a recently vacuumed or analyzed database,
you can issue queries to see the disk usage of any table:
<programlisting>
SELECT pg_relation_filepath(oid), relpages FROM pg_class WHERE relname = 'customer';
@@ -49,14 +49,14 @@ SELECT pg_relation_filepath(oid), relpages FROM pg_class WHERE relname = 'custom
base/16384/16806 | 60
(1 row)
</programlisting>
- Each page is typically 8 kilobytes. (Remember, <structfield>relpages</>
- is only updated by <command>VACUUM</>, <command>ANALYZE</>, and
- a few DDL commands such as <command>CREATE INDEX</>.) The file path name
+ Each page is typically 8 kilobytes. (Remember, <structfield>relpages</structfield>
+ is only updated by <command>VACUUM</command>, <command>ANALYZE</command>, and
+ a few DDL commands such as <command>CREATE INDEX</command>.) The file path name
is of interest if you want to examine the table's disk file directly.
</para>
<para>
- To show the space used by <acronym>TOAST</> tables, use a query
+ To show the space used by <acronym>TOAST</acronym> tables, use a query
like the following:
<programlisting>
SELECT relname, relpages
diff --git a/doc/src/sgml/dml.sgml b/doc/src/sgml/dml.sgml
index 071cdb610f..bc016d3cae 100644
--- a/doc/src/sgml/dml.sgml
+++ b/doc/src/sgml/dml.sgml
@@ -285,42 +285,42 @@ DELETE FROM products;
<para>
Sometimes it is useful to obtain data from modified rows while they are
- being manipulated. The <command>INSERT</>, <command>UPDATE</>,
- and <command>DELETE</> commands all have an
- optional <literal>RETURNING</> clause that supports this. Use
- of <literal>RETURNING</> avoids performing an extra database query to
+ being manipulated. The <command>INSERT</command>, <command>UPDATE</command>,
+ and <command>DELETE</command> commands all have an
+ optional <literal>RETURNING</literal> clause that supports this. Use
+ of <literal>RETURNING</literal> avoids performing an extra database query to
collect the data, and is especially valuable when it would otherwise be
difficult to identify the modified rows reliably.
</para>
<para>
- The allowed contents of a <literal>RETURNING</> clause are the same as
- a <command>SELECT</> command's output list
+ The allowed contents of a <literal>RETURNING</literal> clause are the same as
+ a <command>SELECT</command> command's output list
(see <xref linkend="queries-select-lists">). It can contain column
names of the command's target table, or value expressions using those
- columns. A common shorthand is <literal>RETURNING *</>, which selects
+ columns. A common shorthand is <literal>RETURNING *</literal>, which selects
all columns of the target table in order.
</para>
<para>
- In an <command>INSERT</>, the data available to <literal>RETURNING</> is
+ In an <command>INSERT</command>, the data available to <literal>RETURNING</literal> is
the row as it was inserted. This is not so useful in trivial inserts,
since it would just repeat the data provided by the client. But it can
be very handy when relying on computed default values. For example,
- when using a <link linkend="datatype-serial"><type>serial</></link>
- column to provide unique identifiers, <literal>RETURNING</> can return
+ when using a <link linkend="datatype-serial"><type>serial</type></link>
+ column to provide unique identifiers, <literal>RETURNING</literal> can return
the ID assigned to a new row:
<programlisting>
CREATE TABLE users (firstname text, lastname text, id serial primary key);
INSERT INTO users (firstname, lastname) VALUES ('Joe', 'Cool') RETURNING id;
</programlisting>
- The <literal>RETURNING</> clause is also very useful
- with <literal>INSERT ... SELECT</>.
+ The <literal>RETURNING</literal> clause is also very useful
+ with <literal>INSERT ... SELECT</literal>.
</para>
<para>
- In an <command>UPDATE</>, the data available to <literal>RETURNING</> is
+ In an <command>UPDATE</command>, the data available to <literal>RETURNING</literal> is
the new content of the modified row. For example:
<programlisting>
UPDATE products SET price = price * 1.10
@@ -330,7 +330,7 @@ UPDATE products SET price = price * 1.10
</para>
<para>
- In a <command>DELETE</>, the data available to <literal>RETURNING</> is
+ In a <command>DELETE</command>, the data available to <literal>RETURNING</literal> is
the content of the deleted row. For example:
<programlisting>
DELETE FROM products
@@ -341,9 +341,9 @@ DELETE FROM products
<para>
If there are triggers (<xref linkend="triggers">) on the target table,
- the data available to <literal>RETURNING</> is the row as modified by
+ the data available to <literal>RETURNING</literal> is the row as modified by
the triggers. Thus, inspecting columns computed by triggers is another
- common use-case for <literal>RETURNING</>.
+ common use-case for <literal>RETURNING</literal>.
</para>
</sect1>
diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml
index ff58a17335..3a5b88ca1c 100644
--- a/doc/src/sgml/docguide.sgml
+++ b/doc/src/sgml/docguide.sgml
@@ -449,7 +449,7 @@ checking for fop... fop
<para>
To produce HTML documentation with the stylesheet used on <ulink
- url="https://www.postgresql.org/docs/current">postgresql.org</> instead of the
+ url="https://www.postgresql.org/docs/current">postgresql.org</ulink> instead of the
default simple style use:
<screen>
<prompt>doc/src/sgml$ </prompt><userinput>make STYLE=website html</userinput>
diff --git a/doc/src/sgml/earthdistance.sgml b/doc/src/sgml/earthdistance.sgml
index 6dedc4a5f4..1bdcf64629 100644
--- a/doc/src/sgml/earthdistance.sgml
+++ b/doc/src/sgml/earthdistance.sgml
@@ -8,18 +8,18 @@
</indexterm>
<para>
- The <filename>earthdistance</> module provides two different approaches to
+ The <filename>earthdistance</filename> module provides two different approaches to
calculating great circle distances on the surface of the Earth. The one
- described first depends on the <filename>cube</> module (which
- <emphasis>must</> be installed before <filename>earthdistance</> can be
- installed). The second one is based on the built-in <type>point</> data type,
+ described first depends on the <filename>cube</filename> module (which
+ <emphasis>must</emphasis> be installed before <filename>earthdistance</filename> can be
+ installed). The second one is based on the built-in <type>point</type> data type,
using longitude and latitude for the coordinates.
</para>
<para>
In this module, the Earth is assumed to be perfectly spherical.
(If that's too inaccurate for you, you might want to look at the
- <application><ulink url="http://postgis.net/">PostGIS</ulink></>
+ <application><ulink url="http://postgis.net/">PostGIS</ulink></application>
project.)
</para>
@@ -29,13 +29,13 @@
<para>
Data is stored in cubes that are points (both corners are the same) using 3
coordinates representing the x, y, and z distance from the center of the
- Earth. A domain <type>earth</> over <type>cube</> is provided, which
+ Earth. A domain <type>earth</type> over <type>cube</type> is provided, which
includes constraint checks that the value meets these restrictions and
is reasonably close to the actual surface of the Earth.
</para>
<para>
- The radius of the Earth is obtained from the <function>earth()</>
+ The radius of the Earth is obtained from the <function>earth()</function>
function. It is given in meters. But by changing this one function you can
change the module to use some other units, or to use a different value of
the radius that you feel is more appropriate.
@@ -43,8 +43,8 @@
<para>
This package has applications to astronomical databases as well.
- Astronomers will probably want to change <function>earth()</> to return a
- radius of <literal>180/pi()</> so that distances are in degrees.
+ Astronomers will probably want to change <function>earth()</function> to return a
+ radius of <literal>180/pi()</literal> so that distances are in degrees.
</para>
<para>
@@ -123,11 +123,11 @@
<entry><function>earth_box(earth, float8)</function><indexterm><primary>earth_box</primary></indexterm></entry>
<entry><type>cube</type></entry>
<entry>Returns a box suitable for an indexed search using the cube
- <literal>@&gt;</>
+ <literal>@&gt;</literal>
operator for points within a given great circle distance of a location.
Some points in this box are further than the specified great circle
distance from the location, so a second check using
- <function>earth_distance</> should be included in the query.
+ <function>earth_distance</function> should be included in the query.
</entry>
</row>
</tbody>
@@ -141,7 +141,7 @@
<para>
The second part of the module relies on representing Earth locations as
- values of type <type>point</>, in which the first component is taken to
+ values of type <type>point</type>, in which the first component is taken to
represent longitude in degrees, and the second component is taken to
represent latitude in degrees. Points are taken as (longitude, latitude)
and not vice versa because longitude is closer to the intuitive idea of
@@ -165,7 +165,7 @@
</thead>
<tbody>
<row>
- <entry><type>point</> <literal>&lt;@&gt;</literal> <type>point</></entry>
+ <entry><type>point</type> <literal>&lt;@&gt;</literal> <type>point</type></entry>
<entry><type>float8</type></entry>
<entry>Gives the distance in statute miles between
two points on the Earth's surface.
@@ -176,15 +176,15 @@
</table>
<para>
- Note that unlike the <type>cube</>-based part of the module, units
- are hardwired here: changing the <function>earth()</> function will
+ Note that unlike the <type>cube</type>-based part of the module, units
+ are hardwired here: changing the <function>earth()</function> function will
not affect the results of this operator.
</para>
<para>
One disadvantage of the longitude/latitude representation is that
you need to be careful about the edge conditions near the poles
- and near +/- 180 degrees of longitude. The <type>cube</>-based
+ and near +/- 180 degrees of longitude. The <type>cube</type>-based
representation avoids these discontinuities.
</para>
diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml
index 716a101838..0f9ff3a8eb 100644
--- a/doc/src/sgml/ecpg.sgml
+++ b/doc/src/sgml/ecpg.sgml
@@ -46,7 +46,7 @@
correctness. Third, embedded <acronym>SQL</acronym> in C is
specified in the <acronym>SQL</acronym> standard and supported by
many other <acronym>SQL</acronym> database systems. The
- <productname>PostgreSQL</> implementation is designed to match this
+ <productname>PostgreSQL</productname> implementation is designed to match this
standard as much as possible, and it is usually possible to port
embedded <acronym>SQL</acronym> programs written for other SQL
databases to <productname>PostgreSQL</productname> with relative
@@ -97,19 +97,19 @@ EXEC SQL CONNECT TO <replaceable>target</replaceable> <optional>AS <replaceable>
<itemizedlist>
<listitem>
<simpara>
- <literal><replaceable>dbname</><optional>@<replaceable>hostname</></optional><optional>:<replaceable>port</></optional></literal>
+ <literal><replaceable>dbname</replaceable><optional>@<replaceable>hostname</replaceable></optional><optional>:<replaceable>port</replaceable></optional></literal>
</simpara>
</listitem>
<listitem>
<simpara>
- <literal>tcp:postgresql://<replaceable>hostname</><optional>:<replaceable>port</></optional><optional>/<replaceable>dbname</></optional><optional>?<replaceable>options</></optional></literal>
+ <literal>tcp:postgresql://<replaceable>hostname</replaceable><optional>:<replaceable>port</replaceable></optional><optional>/<replaceable>dbname</replaceable></optional><optional>?<replaceable>options</replaceable></optional></literal>
</simpara>
</listitem>
<listitem>
<simpara>
- <literal>unix:postgresql://<replaceable>hostname</><optional>:<replaceable>port</></optional><optional>/<replaceable>dbname</></optional><optional>?<replaceable>options</></optional></literal>
+ <literal>unix:postgresql://<replaceable>hostname</replaceable><optional>:<replaceable>port</replaceable></optional><optional>/<replaceable>dbname</replaceable></optional><optional>?<replaceable>options</replaceable></optional></literal>
</simpara>
</listitem>
@@ -475,7 +475,7 @@ EXEC SQL COMMIT;
In the default mode, statements are committed only when
<command>EXEC SQL COMMIT</command> is issued. The embedded SQL
interface also supports autocommit of transactions (similar to
- <application>psql</>'s default behavior) via the <option>-t</option>
+ <application>psql</application>'s default behavior) via the <option>-t</option>
command-line option to <command>ecpg</command> (see <xref
linkend="app-ecpg">) or via the <literal>EXEC SQL SET AUTOCOMMIT TO
ON</literal> statement. In autocommit mode, each command is
@@ -507,7 +507,7 @@ EXEC SQL COMMIT;
</varlistentry>
<varlistentry>
- <term><literal>EXEC SQL PREPARE TRANSACTION </literal><replaceable class="parameter">transaction_id</></term>
+ <term><literal>EXEC SQL PREPARE TRANSACTION </literal><replaceable class="parameter">transaction_id</replaceable></term>
<listitem>
<para>
Prepare the current transaction for two-phase commit.
@@ -516,7 +516,7 @@ EXEC SQL COMMIT;
</varlistentry>
<varlistentry>
- <term><literal>EXEC SQL COMMIT PREPARED </literal><replaceable class="parameter">transaction_id</></term>
+ <term><literal>EXEC SQL COMMIT PREPARED </literal><replaceable class="parameter">transaction_id</replaceable></term>
<listitem>
<para>
Commit a transaction that is in prepared state.
@@ -525,7 +525,7 @@ EXEC SQL COMMIT;
</varlistentry>
<varlistentry>
- <term><literal>EXEC SQL ROLLBACK PREPARED </literal><replaceable class="parameter">transaction_id</></term>
+ <term><literal>EXEC SQL ROLLBACK PREPARED </literal><replaceable class="parameter">transaction_id</replaceable></term>
<listitem>
<para>
Roll back a transaction that is in prepared state.
@@ -720,7 +720,7 @@ EXEC SQL int i = 4;
<para>
The definition of a structure or union also must be listed inside
- a <literal>DECLARE</> section. Otherwise the preprocessor cannot
+ a <literal>DECLARE</literal> section. Otherwise the preprocessor cannot
handle these types since it does not know the definition.
</para>
</sect2>
@@ -890,8 +890,8 @@ do
</row>
<row>
- <entry><type>character(<replaceable>n</>)</type>, <type>varchar(<replaceable>n</>)</type>, <type>text</type></entry>
- <entry><type>char[<replaceable>n</>+1]</type>, <type>VARCHAR[<replaceable>n</>+1]</type><footnote><para>declared in <filename>ecpglib.h</filename></para></footnote></entry>
+ <entry><type>character(<replaceable>n</replaceable>)</type>, <type>varchar(<replaceable>n</replaceable>)</type>, <type>text</type></entry>
+ <entry><type>char[<replaceable>n</replaceable>+1]</type>, <type>VARCHAR[<replaceable>n</replaceable>+1]</type><footnote><para>declared in <filename>ecpglib.h</filename></para></footnote></entry>
</row>
<row>
@@ -955,7 +955,7 @@ EXEC SQL END DECLARE SECTION;
The other way is using the <type>VARCHAR</type> type, which is a
special type provided by ECPG. The definition on an array of
type <type>VARCHAR</type> is converted into a
- named <type>struct</> for every variable. A declaration like:
+ named <type>struct</type> for every variable. A declaration like:
<programlisting>
VARCHAR var[180];
</programlisting>
@@ -994,10 +994,10 @@ struct varchar_var { int len; char arr[180]; } var;
ECPG contains some special types that help you to interact easily
with some special data types from the PostgreSQL server. In
particular, it has implemented support for the
- <type>numeric</>, <type>decimal</type>, <type>date</>, <type>timestamp</>,
- and <type>interval</> types. These data types cannot usefully be
+ <type>numeric</type>, <type>decimal</type>, <type>date</type>, <type>timestamp</type>,
+ and <type>interval</type> types. These data types cannot usefully be
mapped to primitive host variable types (such
- as <type>int</>, <type>long long int</type>,
+ as <type>int</type>, <type>long long int</type>,
or <type>char[]</type>), because they have a complex internal
structure. Applications deal with these types by declaring host
variables in special types and accessing them using functions in
@@ -1942,10 +1942,10 @@ free(out);
<para>
The numeric type offers to do calculations with arbitrary precision. See
<xref linkend="datatype-numeric"> for the equivalent type in the
- <productname>PostgreSQL</> server. Because of the arbitrary precision this
+ <productname>PostgreSQL</productname> server. Because of the arbitrary precision this
variable needs to be able to expand and shrink dynamically. That's why you
can only create numeric variables on the heap, by means of the
- <function>PGTYPESnumeric_new</> and <function>PGTYPESnumeric_free</>
+ <function>PGTYPESnumeric_new</function> and <function>PGTYPESnumeric_free</function>
functions. The decimal type, which is similar but limited in precision,
can be created on the stack as well as on the heap.
</para>
@@ -2092,17 +2092,17 @@ int PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
<itemizedlist>
<listitem>
<para>
- 1, if <literal>var1</> is bigger than <literal>var2</>
+ 1, if <literal>var1</literal> is bigger than <literal>var2</literal>
</para>
</listitem>
<listitem>
<para>
- -1, if <literal>var1</> is smaller than <literal>var2</>
+ -1, if <literal>var1</literal> is smaller than <literal>var2</literal>
</para>
</listitem>
<listitem>
<para>
- 0, if <literal>var1</> and <literal>var2</> are equal
+ 0, if <literal>var1</literal> and <literal>var2</literal> are equal
</para>
</listitem>
</itemizedlist>
@@ -2119,7 +2119,7 @@ int PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
int PGTYPESnumeric_from_int(signed int int_val, numeric *var);
</synopsis>
This function accepts a variable of type signed int and stores it
- in the numeric variable <literal>var</>. Upon success, 0 is returned and
+ in the numeric variable <literal>var</literal>. Upon success, 0 is returned and
-1 in case of a failure.
</para>
</listitem>
@@ -2134,7 +2134,7 @@ int PGTYPESnumeric_from_int(signed int int_val, numeric *var);
int PGTYPESnumeric_from_long(signed long int long_val, numeric *var);
</synopsis>
This function accepts a variable of type signed long int and stores it
- in the numeric variable <literal>var</>. Upon success, 0 is returned and
+ in the numeric variable <literal>var</literal>. Upon success, 0 is returned and
-1 in case of a failure.
</para>
</listitem>
@@ -2149,7 +2149,7 @@ int PGTYPESnumeric_from_long(signed long int long_val, numeric *var);
int PGTYPESnumeric_copy(numeric *src, numeric *dst);
</synopsis>
This function copies over the value of the variable that
- <literal>src</literal> points to into the variable that <literal>dst</>
+ <literal>src</literal> points to into the variable that <literal>dst</literal>
points to. It returns 0 on success and -1 if an error occurs.
</para>
</listitem>
@@ -2164,7 +2164,7 @@ int PGTYPESnumeric_copy(numeric *src, numeric *dst);
int PGTYPESnumeric_from_double(double d, numeric *dst);
</synopsis>
This function accepts a variable of type double and stores the result
- in the variable that <literal>dst</> points to. It returns 0 on success
+ in the variable that <literal>dst</literal> points to. It returns 0 on success
and -1 if an error occurs.
</para>
</listitem>
@@ -2179,10 +2179,10 @@ int PGTYPESnumeric_from_double(double d, numeric *dst);
int PGTYPESnumeric_to_double(numeric *nv, double *dp)
</synopsis>
The function converts the numeric value from the variable that
- <literal>nv</> points to into the double variable that <literal>dp</> points
+ <literal>nv</literal> points to into the double variable that <literal>dp</literal> points
to. It returns 0 on success and -1 if an error occurs, including
- overflow. On overflow, the global variable <literal>errno</> will be set
- to <literal>PGTYPES_NUM_OVERFLOW</> additionally.
+ overflow. On overflow, the global variable <literal>errno</literal> will be set
+ to <literal>PGTYPES_NUM_OVERFLOW</literal> additionally.
</para>
</listitem>
</varlistentry>
@@ -2196,10 +2196,10 @@ int PGTYPESnumeric_to_double(numeric *nv, double *dp)
int PGTYPESnumeric_to_int(numeric *nv, int *ip);
</synopsis>
The function converts the numeric value from the variable that
- <literal>nv</> points to into the integer variable that <literal>ip</>
+ <literal>nv</literal> points to into the integer variable that <literal>ip</literal>
points to. It returns 0 on success and -1 if an error occurs, including
- overflow. On overflow, the global variable <literal>errno</> will be set
- to <literal>PGTYPES_NUM_OVERFLOW</> additionally.
+ overflow. On overflow, the global variable <literal>errno</literal> will be set
+ to <literal>PGTYPES_NUM_OVERFLOW</literal> additionally.
</para>
</listitem>
</varlistentry>
@@ -2213,10 +2213,10 @@ int PGTYPESnumeric_to_int(numeric *nv, int *ip);
int PGTYPESnumeric_to_long(numeric *nv, long *lp);
</synopsis>
The function converts the numeric value from the variable that
- <literal>nv</> points to into the long integer variable that
- <literal>lp</> points to. It returns 0 on success and -1 if an error
+ <literal>nv</literal> points to into the long integer variable that
+ <literal>lp</literal> points to. It returns 0 on success and -1 if an error
occurs, including overflow. On overflow, the global variable
- <literal>errno</> will be set to <literal>PGTYPES_NUM_OVERFLOW</>
+ <literal>errno</literal> will be set to <literal>PGTYPES_NUM_OVERFLOW</literal>
additionally.
</para>
</listitem>
@@ -2231,10 +2231,10 @@ int PGTYPESnumeric_to_long(numeric *nv, long *lp);
int PGTYPESnumeric_to_decimal(numeric *src, decimal *dst);
</synopsis>
The function converts the numeric value from the variable that
- <literal>src</> points to into the decimal variable that
- <literal>dst</> points to. It returns 0 on success and -1 if an error
+ <literal>src</literal> points to into the decimal variable that
+ <literal>dst</literal> points to. It returns 0 on success and -1 if an error
occurs, including overflow. On overflow, the global variable
- <literal>errno</> will be set to <literal>PGTYPES_NUM_OVERFLOW</>
+ <literal>errno</literal> will be set to <literal>PGTYPES_NUM_OVERFLOW</literal>
additionally.
</para>
</listitem>
@@ -2249,8 +2249,8 @@ int PGTYPESnumeric_to_decimal(numeric *src, decimal *dst);
int PGTYPESnumeric_from_decimal(decimal *src, numeric *dst);
</synopsis>
The function converts the decimal value from the variable that
- <literal>src</> points to into the numeric variable that
- <literal>dst</> points to. It returns 0 on success and -1 if an error
+ <literal>src</literal> points to into the numeric variable that
+ <literal>dst</literal> points to. It returns 0 on success and -1 if an error
occurs. Since the decimal type is implemented as a limited version of
the numeric type, overflow cannot occur with this conversion.
</para>
@@ -2265,7 +2265,7 @@ int PGTYPESnumeric_from_decimal(decimal *src, numeric *dst);
<para>
The date type in C enables your programs to deal with data of the SQL type
date. See <xref linkend="datatype-datetime"> for the equivalent type in the
- <productname>PostgreSQL</> server.
+ <productname>PostgreSQL</productname> server.
</para>
<para>
The following functions can be used to work with the date type:
@@ -2292,8 +2292,8 @@ date PGTYPESdate_from_timestamp(timestamp dt);
<synopsis>
date PGTYPESdate_from_asc(char *str, char **endptr);
</synopsis>
- The function receives a C char* string <literal>str</> and a pointer to
- a C char* string <literal>endptr</>. At the moment ECPG always parses
+ The function receives a C char* string <literal>str</literal> and a pointer to
+ a C char* string <literal>endptr</literal>. At the moment ECPG always parses
the complete string and so it currently does not support to store the
address of the first invalid character in <literal>*endptr</literal>.
You can safely set <literal>endptr</literal> to NULL.
@@ -2397,9 +2397,9 @@ date PGTYPESdate_from_asc(char *str, char **endptr);
<synopsis>
char *PGTYPESdate_to_asc(date dDate);
</synopsis>
- The function receives the date <literal>dDate</> as its only parameter.
- It will output the date in the form <literal>1999-01-18</>, i.e., in the
- <literal>YYYY-MM-DD</> format.
+ The function receives the date <literal>dDate</literal> as its only parameter.
+ It will output the date in the form <literal>1999-01-18</literal>, i.e., in the
+ <literal>YYYY-MM-DD</literal> format.
</para>
</listitem>
</varlistentry>
@@ -2414,11 +2414,11 @@ char *PGTYPESdate_to_asc(date dDate);
void PGTYPESdate_julmdy(date d, int *mdy);
</synopsis>
<!-- almost same description as for rjulmdy() -->
- The function receives the date <literal>d</> and a pointer to an array
- of 3 integer values <literal>mdy</>. The variable name indicates
- the sequential order: <literal>mdy[0]</> will be set to contain the
- number of the month, <literal>mdy[1]</> will be set to the value of the
- day and <literal>mdy[2]</> will contain the year.
+ The function receives the date <literal>d</literal> and a pointer to an array
+ of 3 integer values <literal>mdy</literal>. The variable name indicates
+ the sequential order: <literal>mdy[0]</literal> will be set to contain the
+ number of the month, <literal>mdy[1]</literal> will be set to the value of the
+ day and <literal>mdy[2]</literal> will contain the year.
</para>
</listitem>
</varlistentry>
@@ -2432,7 +2432,7 @@ void PGTYPESdate_julmdy(date d, int *mdy);
<synopsis>
void PGTYPESdate_mdyjul(int *mdy, date *jdate);
</synopsis>
- The function receives the array of the 3 integers (<literal>mdy</>) as
+ The function receives the array of the 3 integers (<literal>mdy</literal>) as
its first argument and as its second argument a pointer to a variable
of type date that should hold the result of the operation.
</para>
@@ -2447,7 +2447,7 @@ void PGTYPESdate_mdyjul(int *mdy, date *jdate);
<synopsis>
int PGTYPESdate_dayofweek(date d);
</synopsis>
- The function receives the date variable <literal>d</> as its only
+ The function receives the date variable <literal>d</literal> as its only
argument and returns an integer that indicates the day of the week for
this date.
<itemizedlist>
@@ -2499,7 +2499,7 @@ int PGTYPESdate_dayofweek(date d);
<synopsis>
void PGTYPESdate_today(date *d);
</synopsis>
- The function receives a pointer to a date variable (<literal>d</>)
+ The function receives a pointer to a date variable (<literal>d</literal>)
that it sets to the current date.
</para>
</listitem>
@@ -2514,9 +2514,9 @@ void PGTYPESdate_today(date *d);
<synopsis>
int PGTYPESdate_fmt_asc(date dDate, char *fmtstring, char *outbuf);
</synopsis>
- The function receives the date to convert (<literal>dDate</>), the
- format mask (<literal>fmtstring</>) and the string that will hold the
- textual representation of the date (<literal>outbuf</>).
+ The function receives the date to convert (<literal>dDate</literal>), the
+ format mask (<literal>fmtstring</literal>) and the string that will hold the
+ textual representation of the date (<literal>outbuf</literal>).
</para>
<para>
On success, 0 is returned and a negative value if an error occurred.
@@ -2637,9 +2637,9 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str);
</synopsis>
<!-- same description as rdefmtdate -->
The function receives a pointer to the date value that should hold the
- result of the operation (<literal>d</>), the format mask to use for
- parsing the date (<literal>fmt</>) and the C char* string containing
- the textual representation of the date (<literal>str</>). The textual
+ result of the operation (<literal>d</literal>), the format mask to use for
+ parsing the date (<literal>fmt</literal>) and the C char* string containing
+ the textual representation of the date (<literal>str</literal>). The textual
representation is expected to match the format mask. However you do not
need to have a 1:1 mapping of the string to the format mask. The
function only analyzes the sequential order and looks for the literals
@@ -2742,7 +2742,7 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str);
<para>
The timestamp type in C enables your programs to deal with data of the SQL
type timestamp. See <xref linkend="datatype-datetime"> for the equivalent
- type in the <productname>PostgreSQL</> server.
+ type in the <productname>PostgreSQL</productname> server.
</para>
<para>
The following functions can be used to work with the timestamp type:
@@ -2756,8 +2756,8 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str);
<synopsis>
timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr);
</synopsis>
- The function receives the string to parse (<literal>str</>) and a
- pointer to a C char* (<literal>endptr</>).
+ The function receives the string to parse (<literal>str</literal>) and a
+ pointer to a C char* (<literal>endptr</literal>).
At the moment ECPG always parses
the complete string and so it currently does not support to store the
address of the first invalid character in <literal>*endptr</literal>.
@@ -2765,15 +2765,15 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr);
</para>
<para>
The function returns the parsed timestamp on success. On error,
- <literal>PGTYPESInvalidTimestamp</literal> is returned and <varname>errno</> is
- set to <literal>PGTYPES_TS_BAD_TIMESTAMP</>. See <xref linkend="PGTYPESInvalidTimestamp"> for important notes on this value.
+ <literal>PGTYPESInvalidTimestamp</literal> is returned and <varname>errno</varname> is
+ set to <literal>PGTYPES_TS_BAD_TIMESTAMP</literal>. See <xref linkend="PGTYPESInvalidTimestamp"> for important notes on this value.
</para>
<para>
In general, the input string can contain any combination of an allowed
date specification, a whitespace character and an allowed time
specification. Note that time zones are not supported by ECPG. It can
parse them but does not apply any calculation as the
- <productname>PostgreSQL</> server does for example. Timezone
+ <productname>PostgreSQL</productname> server does for example. Timezone
specifiers are silently discarded.
</para>
<para>
@@ -2819,7 +2819,7 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr);
<synopsis>
char *PGTYPEStimestamp_to_asc(timestamp tstamp);
</synopsis>
- The function receives the timestamp <literal>tstamp</> as
+ The function receives the timestamp <literal>tstamp</literal> as
its only argument and returns an allocated string that contains the
textual representation of the timestamp.
</para>
@@ -2835,7 +2835,7 @@ char *PGTYPEStimestamp_to_asc(timestamp tstamp);
void PGTYPEStimestamp_current(timestamp *ts);
</synopsis>
The function retrieves the current timestamp and saves it into the
- timestamp variable that <literal>ts</> points to.
+ timestamp variable that <literal>ts</literal> points to.
</para>
</listitem>
</varlistentry>
@@ -2849,8 +2849,8 @@ void PGTYPEStimestamp_current(timestamp *ts);
int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmtstr);
</synopsis>
The function receives a pointer to the timestamp to convert as its
- first argument (<literal>ts</>), a pointer to the output buffer
- (<literal>output</>), the maximal length that has been allocated for
+ first argument (<literal>ts</literal>), a pointer to the output buffer
+ (<literal>output</literal>), the maximal length that has been allocated for
the output buffer (<literal>str_len</literal>) and the format mask to
use for the conversion (<literal>fmtstr</literal>).
</para>
@@ -2861,7 +2861,7 @@ int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmt
<para>
You can use the following format specifiers for the format mask. The
format specifiers are the same ones that are used in the
- <function>strftime</> function in <productname>libc</productname>. Any
+ <function>strftime</function> function in <productname>libc</productname>. Any
non-format specifier will be copied into the output buffer.
<!-- This is from the FreeBSD man page:
http://www.freebsd.org/cgi/man.cgi?query=strftime&apropos=0&sektion=3&manpath=FreeBSD+7.0-current&format=html
@@ -3184,9 +3184,9 @@ int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmt
<synopsis>
int PGTYPEStimestamp_sub(timestamp *ts1, timestamp *ts2, interval *iv);
</synopsis>
- The function will subtract the timestamp variable that <literal>ts2</>
- points to from the timestamp variable that <literal>ts1</> points to
- and will store the result in the interval variable that <literal>iv</>
+ The function will subtract the timestamp variable that <literal>ts2</literal>
+ points to from the timestamp variable that <literal>ts1</literal> points to
+ and will store the result in the interval variable that <literal>iv</literal>
points to.
</para>
<para>
@@ -3206,12 +3206,12 @@ int PGTYPEStimestamp_sub(timestamp *ts1, timestamp *ts2, interval *iv);
int PGTYPEStimestamp_defmt_asc(char *str, char *fmt, timestamp *d);
</synopsis>
The function receives the textual representation of a timestamp in the
- variable <literal>str</> as well as the formatting mask to use in the
- variable <literal>fmt</>. The result will be stored in the variable
- that <literal>d</> points to.
+ variable <literal>str</literal> as well as the formatting mask to use in the
+ variable <literal>fmt</literal>. The result will be stored in the variable
+ that <literal>d</literal> points to.
</para>
<para>
- If the formatting mask <literal>fmt</> is NULL, the function will fall
+ If the formatting mask <literal>fmt</literal> is NULL, the function will fall
back to the default formatting mask which is <literal>%Y-%m-%d
%H:%M:%S</literal>.
</para>
@@ -3231,10 +3231,10 @@ int PGTYPEStimestamp_defmt_asc(char *str, char *fmt, timestamp *d);
<synopsis>
int PGTYPEStimestamp_add_interval(timestamp *tin, interval *span, timestamp *tout);
</synopsis>
- The function receives a pointer to a timestamp variable <literal>tin</>
- and a pointer to an interval variable <literal>span</>. It adds the
+ The function receives a pointer to a timestamp variable <literal>tin</literal>
+ and a pointer to an interval variable <literal>span</literal>. It adds the
interval to the timestamp and saves the resulting timestamp in the
- variable that <literal>tout</> points to.
+ variable that <literal>tout</literal> points to.
</para>
<para>
Upon success, the function returns 0 and a negative value if an
@@ -3251,9 +3251,9 @@ int PGTYPEStimestamp_add_interval(timestamp *tin, interval *span, timestamp *tou
<synopsis>
int PGTYPEStimestamp_sub_interval(timestamp *tin, interval *span, timestamp *tout);
</synopsis>
- The function subtracts the interval variable that <literal>span</>
- points to from the timestamp variable that <literal>tin</> points to
- and saves the result into the variable that <literal>tout</> points
+ The function subtracts the interval variable that <literal>span</literal>
+ points to from the timestamp variable that <literal>tin</literal> points to
+ and saves the result into the variable that <literal>tout</literal> points
to.
</para>
<para>
@@ -3271,7 +3271,7 @@ int PGTYPEStimestamp_sub_interval(timestamp *tin, interval *span, timestamp *tou
<para>
The interval type in C enables your programs to deal with data of the SQL
type interval. See <xref linkend="datatype-datetime"> for the equivalent
- type in the <productname>PostgreSQL</> server.
+ type in the <productname>PostgreSQL</productname> server.
</para>
<para>
The following functions can be used to work with the interval type:
@@ -3309,7 +3309,7 @@ void PGTYPESinterval_new(interval *intvl);
<synopsis>
interval *PGTYPESinterval_from_asc(char *str, char **endptr);
</synopsis>
- The function parses the input string <literal>str</> and returns a
+ The function parses the input string <literal>str</literal> and returns a
pointer to an allocated interval variable.
At the moment ECPG always parses
the complete string and so it currently does not support to store the
@@ -3327,7 +3327,7 @@ interval *PGTYPESinterval_from_asc(char *str, char **endptr);
<synopsis>
char *PGTYPESinterval_to_asc(interval *span);
</synopsis>
- The function converts the interval variable that <literal>span</>
+ The function converts the interval variable that <literal>span</literal>
points to into a C char*. The output looks like this example:
<literal>@ 1 day 12 hours 59 mins 10 secs</literal>.
</para>
@@ -3342,8 +3342,8 @@ char *PGTYPESinterval_to_asc(interval *span);
<synopsis>
int PGTYPESinterval_copy(interval *intvlsrc, interval *intvldest);
</synopsis>
- The function copies the interval variable that <literal>intvlsrc</>
- points to into the variable that <literal>intvldest</> points to. Note
+ The function copies the interval variable that <literal>intvlsrc</literal>
+ points to into the variable that <literal>intvldest</literal> points to. Note
that you need to allocate the memory for the destination variable
before.
</para>
@@ -3360,15 +3360,15 @@ int PGTYPESinterval_copy(interval *intvlsrc, interval *intvldest);
a maximum precision of 30 significant digits. In contrast to the numeric
type which can be created on the heap only, the decimal type can be
created either on the stack or on the heap (by means of the functions
- <function>PGTYPESdecimal_new</> and
- <function>PGTYPESdecimal_free</>).
+ <function>PGTYPESdecimal_new</function> and
+ <function>PGTYPESdecimal_free</function>).
There are a lot of other functions that deal with the decimal type in the
<productname>Informix</productname> compatibility mode described in <xref
linkend="ecpg-informix-compat">.
</para>
<para>
The following functions can be used to work with the decimal type and are
- not only contained in the <literal>libcompat</> library.
+ not only contained in the <literal>libcompat</literal> library.
<variablelist>
<varlistentry>
<term><function>PGTYPESdecimal_new</function></term>
@@ -3548,15 +3548,15 @@ void PGTYPESdecimal_free(decimal *var);
<listitem>
<para>
A value of type timestamp representing an invalid time stamp. This is
- returned by the function <function>PGTYPEStimestamp_from_asc</> on
+ returned by the function <function>PGTYPEStimestamp_from_asc</function> on
parse error.
Note that due to the internal representation of the <type>timestamp</type> data type,
<literal>PGTYPESInvalidTimestamp</literal> is also a valid timestamp at
- the same time. It is set to <literal>1899-12-31 23:59:59</>. In order
+ the same time. It is set to <literal>1899-12-31 23:59:59</literal>. In order
to detect errors, make sure that your application does not only test
for <literal>PGTYPESInvalidTimestamp</literal> but also for
- <literal>errno != 0</> after each call to
- <function>PGTYPEStimestamp_from_asc</>.
+ <literal>errno != 0</literal> after each call to
+ <function>PGTYPEStimestamp_from_asc</function>.
</para>
</listitem>
</varlistentry>
@@ -3927,7 +3927,7 @@ typedef struct sqlda_struct sqlda_t;
<variablelist>
<varlistentry>
- <term><literal>sqldaid</></term>
+ <term><literal>sqldaid</literal></term>
<listitem>
<para>
It contains the literal string <literal>"SQLDA "</literal>.
@@ -3936,7 +3936,7 @@ typedef struct sqlda_struct sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>sqldabc</></term>
+ <term><literal>sqldabc</literal></term>
<listitem>
<para>
It contains the size of the allocated space in bytes.
@@ -3945,7 +3945,7 @@ typedef struct sqlda_struct sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>sqln</></term>
+ <term><literal>sqln</literal></term>
<listitem>
<para>
It contains the number of input parameters for a parameterized query in
@@ -3960,7 +3960,7 @@ typedef struct sqlda_struct sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>sqld</></term>
+ <term><literal>sqld</literal></term>
<listitem>
<para>
It contains the number of fields in a result set.
@@ -3969,17 +3969,17 @@ typedef struct sqlda_struct sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>desc_next</></term>
+ <term><literal>desc_next</literal></term>
<listitem>
<para>
If the query returns more than one record, multiple linked
- SQLDA structures are returned, and <literal>desc_next</> holds
+ SQLDA structures are returned, and <literal>desc_next</literal> holds
a pointer to the next entry in the list.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>sqlvar</></term>
+ <term><literal>sqlvar</literal></term>
<listitem>
<para>
This is the array of the columns in the result set.
@@ -4015,7 +4015,7 @@ typedef struct sqlvar_struct sqlvar_t;
<variablelist>
<varlistentry>
- <term><literal>sqltype</></term>
+ <term><literal>sqltype</literal></term>
<listitem>
<para>
Contains the type identifier of the field. For values,
@@ -4025,7 +4025,7 @@ typedef struct sqlvar_struct sqlvar_t;
</varlistentry>
<varlistentry>
- <term><literal>sqllen</></term>
+ <term><literal>sqllen</literal></term>
<listitem>
<para>
Contains the binary length of the field. e.g. 4 bytes for <type>ECPGt_int</type>.
@@ -4034,7 +4034,7 @@ typedef struct sqlvar_struct sqlvar_t;
</varlistentry>
<varlistentry>
- <term><literal>sqldata</></term>
+ <term><literal>sqldata</literal></term>
<listitem>
<para>
Points to the data. The format of the data is described
@@ -4044,7 +4044,7 @@ typedef struct sqlvar_struct sqlvar_t;
</varlistentry>
<varlistentry>
- <term><literal>sqlind</></term>
+ <term><literal>sqlind</literal></term>
<listitem>
<para>
Points to the null indicator. 0 means not null, -1 means
@@ -4054,7 +4054,7 @@ typedef struct sqlvar_struct sqlvar_t;
</varlistentry>
<varlistentry>
- <term><literal>sqlname</></term>
+ <term><literal>sqlname</literal></term>
<listitem>
<para>
The name of the field.
@@ -4084,7 +4084,7 @@ struct sqlname
The meaning of the fields is:
<variablelist>
<varlistentry>
- <term><literal>length</></term>
+ <term><literal>length</literal></term>
<listitem>
<para>
Contains the length of the field name.
@@ -4092,7 +4092,7 @@ struct sqlname
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>data</></term>
+ <term><literal>data</literal></term>
<listitem>
<para>
Contains the actual field name.
@@ -4113,10 +4113,10 @@ struct sqlname
SQLDA are:
</para>
<step><simpara>Declare an <type>sqlda_t</type> structure to receive the result set.</simpara></step>
- <step><simpara>Execute <command>FETCH</>/<command>EXECUTE</>/<command>DESCRIBE</> commands to process a query specifying the declared SQLDA.</simpara></step>
- <step><simpara>Check the number of records in the result set by looking at <structfield>sqln</>, a member of the <type>sqlda_t</type> structure.</simpara></step>
- <step><simpara>Get the values of each column from <literal>sqlvar[0]</>, <literal>sqlvar[1]</>, etc., members of the <type>sqlda_t</type> structure.</simpara></step>
- <step><simpara>Go to next row (<type>sqlda_t</type> structure) by following the <structfield>desc_next</> pointer, a member of the <type>sqlda_t</type> structure.</simpara></step>
+ <step><simpara>Execute <command>FETCH</command>/<command>EXECUTE</command>/<command>DESCRIBE</command> commands to process a query specifying the declared SQLDA.</simpara></step>
+ <step><simpara>Check the number of records in the result set by looking at <structfield>sqln</structfield>, a member of the <type>sqlda_t</type> structure.</simpara></step>
+ <step><simpara>Get the values of each column from <literal>sqlvar[0]</literal>, <literal>sqlvar[1]</literal>, etc., members of the <type>sqlda_t</type> structure.</simpara></step>
+ <step><simpara>Go to next row (<type>sqlda_t</type> structure) by following the <structfield>desc_next</structfield> pointer, a member of the <type>sqlda_t</type> structure.</simpara></step>
<step><simpara>Repeat above as you need.</simpara></step>
</procedure>
@@ -4133,7 +4133,7 @@ sqlda_t *sqlda1;
<para>
Next, specify the SQLDA in a command. This is
- a <command>FETCH</> command example.
+ a <command>FETCH</command> command example.
<programlisting>
EXEC SQL FETCH NEXT FROM cur1 INTO DESCRIPTOR sqlda1;
</programlisting>
@@ -4168,10 +4168,10 @@ for (i = 0; i &lt; cur_sqlda->sqld; i++)
</para>
<para>
- To get a column value, check the <structfield>sqltype</> value,
+ To get a column value, check the <structfield>sqltype</structfield> value,
a member of the <type>sqlvar_t</type> structure. Then, switch
to an appropriate way, depending on the column type, to copy
- data from the <structfield>sqlvar</> field to a host variable.
+ data from the <structfield>sqlvar</structfield> field to a host variable.
<programlisting>
char var_buf[1024];
@@ -4225,7 +4225,7 @@ EXEC SQL PREPARE stmt1 FROM :query;
<para>
Next, allocate memory for an SQLDA, and set the number of input
- parameters in <structfield>sqln</>, a member variable of
+ parameters in <structfield>sqln</structfield>, a member variable of
the <type>sqlda_t</type> structure. When two or more input
parameters are required for the prepared query, the application
has to allocate additional memory space which is calculated by
@@ -4386,8 +4386,8 @@ main(void)
<para>
Read each columns in the first record. The number of columns is
- stored in <structfield>sqld</>, the actual data of the first
- column is stored in <literal>sqlvar[0]</>, both members of
+ stored in <structfield>sqld</structfield>, the actual data of the first
+ column is stored in <literal>sqlvar[0]</literal>, both members of
the <type>sqlda_t</type> structure.
<programlisting>
@@ -4404,9 +4404,9 @@ main(void)
</para>
<para>
- Now, the column data is stored in the variable <varname>v</>.
+ Now, the column data is stored in the variable <varname>v</varname>.
Copy every datum into host variables, looking
- at <literal>v.sqltype</> for the type of the column.
+ at <literal>v.sqltype</literal> for the type of the column.
<programlisting>
switch (v.sqltype) {
int intval;
@@ -4947,7 +4947,7 @@ struct
</para>
<para>
- Here is one example that combines the use of <literal>WHENEVER</>
+ Here is one example that combines the use of <literal>WHENEVER</literal>
and <varname>sqlca</varname>, printing out the contents
of <varname>sqlca</varname> when an error occurs. This is perhaps
useful for debugging or prototyping applications, before
@@ -5227,8 +5227,8 @@ while (1)
<listitem>
<para>
This means the host variable is of type <type>bool</type> and
- the datum in the database is neither <literal>'t'</> nor
- <literal>'f'</>. (SQLSTATE 42804)
+ the datum in the database is neither <literal>'t'</literal> nor
+ <literal>'f'</literal>. (SQLSTATE 42804)
</para>
</listitem>
</varlistentry>
@@ -5575,8 +5575,8 @@ EXEC SQL INCLUDE "<replaceable>filename</replaceable>";
Similar to the directive <literal>#define</literal> that is known from C,
embedded SQL has a similar concept:
<programlisting>
-EXEC SQL DEFINE <replaceable>name</>;
-EXEC SQL DEFINE <replaceable>name</> <replaceable>value</>;
+EXEC SQL DEFINE <replaceable>name</replaceable>;
+EXEC SQL DEFINE <replaceable>name</replaceable> <replaceable>value</replaceable>;
</programlisting>
So you can define a name:
<programlisting>
@@ -5587,7 +5587,7 @@ EXEC SQL DEFINE HAVE_FEATURE;
EXEC SQL DEFINE MYNUMBER 12;
EXEC SQL DEFINE MYSTRING 'abc';
</programlisting>
- Use <literal>undef</> to remove a previous definition:
+ Use <literal>undef</literal> to remove a previous definition:
<programlisting>
EXEC SQL UNDEF MYNUMBER;
</programlisting>
@@ -5597,15 +5597,15 @@ EXEC SQL UNDEF MYNUMBER;
Of course you can continue to use the C versions <literal>#define</literal>
and <literal>#undef</literal> in your embedded SQL program. The difference
is where your defined values get evaluated. If you use <literal>EXEC SQL
- DEFINE</> then the <command>ecpg</> preprocessor evaluates the defines and substitutes
+ DEFINE</literal> then the <command>ecpg</command> preprocessor evaluates the defines and substitutes
the values. For example if you write:
<programlisting>
EXEC SQL DEFINE MYNUMBER 12;
...
EXEC SQL UPDATE Tbl SET col = MYNUMBER;
</programlisting>
- then <command>ecpg</> will already do the substitution and your C compiler will never
- see any name or identifier <literal>MYNUMBER</>. Note that you cannot use
+ then <command>ecpg</command> will already do the substitution and your C compiler will never
+ see any name or identifier <literal>MYNUMBER</literal>. Note that you cannot use
<literal>#define</literal> for a constant that you are going to use in an
embedded SQL query because in this case the embedded SQL precompiler is not
able to see this declaration.
@@ -5619,23 +5619,23 @@ EXEC SQL UPDATE Tbl SET col = MYNUMBER;
<variablelist>
<varlistentry>
- <term><literal>EXEC SQL ifdef <replaceable>name</>;</literal></term>
+ <term><literal>EXEC SQL ifdef <replaceable>name</replaceable>;</literal></term>
<listitem>
<para>
- Checks a <replaceable>name</> and processes subsequent lines if
- <replaceable>name</> has been created with <literal>EXEC SQL define
- <replaceable>name</></literal>.
+ Checks a <replaceable>name</replaceable> and processes subsequent lines if
+ <replaceable>name</replaceable> has been created with <literal>EXEC SQL define
+ <replaceable>name</replaceable></literal>.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>EXEC SQL ifndef <replaceable>name</>;</literal></term>
+ <term><literal>EXEC SQL ifndef <replaceable>name</replaceable>;</literal></term>
<listitem>
<para>
- Checks a <replaceable>name</> and processes subsequent lines if
- <replaceable>name</> has <emphasis>not</emphasis> been created with
- <literal>EXEC SQL define <replaceable>name</></literal>.
+ Checks a <replaceable>name</replaceable> and processes subsequent lines if
+ <replaceable>name</replaceable> has <emphasis>not</emphasis> been created with
+ <literal>EXEC SQL define <replaceable>name</replaceable></literal>.
</para>
</listitem>
</varlistentry>
@@ -5645,19 +5645,19 @@ EXEC SQL UPDATE Tbl SET col = MYNUMBER;
<listitem>
<para>
Starts processing an alternative section to a section introduced by
- either <literal>EXEC SQL ifdef <replaceable>name</></literal> or
- <literal>EXEC SQL ifndef <replaceable>name</></literal>.
+ either <literal>EXEC SQL ifdef <replaceable>name</replaceable></literal> or
+ <literal>EXEC SQL ifndef <replaceable>name</replaceable></literal>.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>EXEC SQL elif <replaceable>name</>;</literal></term>
+ <term><literal>EXEC SQL elif <replaceable>name</replaceable>;</literal></term>
<listitem>
<para>
- Checks <replaceable>name</> and starts an alternative section if
- <replaceable>name</> has been created with <literal>EXEC SQL define
- <replaceable>name</></literal>.
+ Checks <replaceable>name</replaceable> and starts an alternative section if
+ <replaceable>name</replaceable> has been created with <literal>EXEC SQL define
+ <replaceable>name</replaceable></literal>.
</para>
</listitem>
</varlistentry>
@@ -5707,7 +5707,7 @@ EXEC SQL endif;
<para>
The preprocessor program is called <filename>ecpg</filename> and is
- included in a normal <productname>PostgreSQL</> installation.
+ included in a normal <productname>PostgreSQL</productname> installation.
Embedded SQL programs are typically named with an extension
<filename>.pgc</filename>. If you have a program file called
<filename>prog1.pgc</filename>, you can preprocess it by simply
@@ -5727,8 +5727,8 @@ ecpg prog1.pgc
cc -c prog1.c
</programlisting>
The generated C source files include header files from the
- <productname>PostgreSQL</> installation, so if you installed
- <productname>PostgreSQL</> in a location that is not searched by
+ <productname>PostgreSQL</productname> installation, so if you installed
+ <productname>PostgreSQL</productname> in a location that is not searched by
default, you have to add an option such as
<literal>-I/usr/local/pgsql/include</literal> to the compilation
command line.
@@ -5803,10 +5803,10 @@ ECPG = ecpg
</para>
<note>
<para>
- On Windows, if the <application>ecpg</> libraries and an application are
+ On Windows, if the <application>ecpg</application> libraries and an application are
compiled with different flags, this function call will crash the
application because the internal representation of the
- <literal>FILE</> pointers differ. Specifically,
+ <literal>FILE</literal> pointers differ. Specifically,
multithreaded/single-threaded, release/debug, and static/dynamic
flags should be the same for the library and all applications using
that library.
@@ -5844,7 +5844,7 @@ ECPG = ecpg
<function>ECPGstatus(int <replaceable>lineno</replaceable>,
const char* <replaceable>connection_name</replaceable>)</function>
returns true if you are connected to a database and false if not.
- <replaceable>connection_name</replaceable> can be <literal>NULL</>
+ <replaceable>connection_name</replaceable> can be <literal>NULL</literal>
if a single connection is being used.
</para>
</listitem>
@@ -6217,10 +6217,10 @@ main(void)
<para>
To build the application, proceed as follows. Convert
- <filename>test_mod.pgc</> into <filename>test_mod.c</> by
+ <filename>test_mod.pgc</filename> into <filename>test_mod.c</filename> by
running <command>ecpg</command>, and generate
- <filename>test_mod.o</> by compiling
- <filename>test_mod.c</> with the C compiler:
+ <filename>test_mod.o</filename> by compiling
+ <filename>test_mod.c</filename> with the C compiler:
<programlisting>
ecpg -o test_mod.c test_mod.pgc
cc -c test_mod.c -o test_mod.o
@@ -6228,16 +6228,16 @@ cc -c test_mod.c -o test_mod.o
</para>
<para>
- Next, generate <filename>test_cpp.o</> by compiling
- <filename>test_cpp.cpp</> with the C++ compiler:
+ Next, generate <filename>test_cpp.o</filename> by compiling
+ <filename>test_cpp.cpp</filename> with the C++ compiler:
<programlisting>
c++ -c test_cpp.cpp -o test_cpp.o
</programlisting>
</para>
<para>
- Finally, link these object files, <filename>test_cpp.o</>
- and <filename>test_mod.o</>, into one executable, using the C++
+ Finally, link these object files, <filename>test_cpp.o</filename>
+ and <filename>test_mod.o</filename>, into one executable, using the C++
compiler driver:
<programlisting>
c++ test_cpp.o test_mod.o -lecpg -o test_cpp
@@ -7101,7 +7101,7 @@ EXEC SQL GET DESCRIPTOR d VALUE 2 :d_data = DATA;
<para>
Here is an example for a whole procedure of
- executing <literal>SELECT current_database();</> and showing the number of
+ executing <literal>SELECT current_database();</literal> and showing the number of
columns, the column data length, and the column data:
<programlisting>
int
@@ -7866,10 +7866,10 @@ main(void)
<sect1 id="ecpg-informix-compat">
<title><productname>Informix</productname> Compatibility Mode</title>
<para>
- <command>ecpg</command> can be run in a so-called <firstterm>Informix compatibility mode</>. If
+ <command>ecpg</command> can be run in a so-called <firstterm>Informix compatibility mode</firstterm>. If
this mode is active, it tries to behave as if it were the <productname>Informix</productname>
precompiler for <productname>Informix</productname> E/SQL. Generally spoken this will allow you to use
- the dollar sign instead of the <literal>EXEC SQL</> primitive to introduce
+ the dollar sign instead of the <literal>EXEC SQL</literal> primitive to introduce
embedded SQL commands:
<programlisting>
$int j = 3;
@@ -7891,11 +7891,11 @@ $COMMIT;
</note>
<para>
- There are two compatibility modes: <literal>INFORMIX</>, <literal>INFORMIX_SE</>
+ There are two compatibility modes: <literal>INFORMIX</literal>, <literal>INFORMIX_SE</literal>
</para>
<para>
When linking programs that use this compatibility mode, remember to link
- against <literal>libcompat</> that is shipped with ECPG.
+ against <literal>libcompat</literal> that is shipped with ECPG.
</para>
<para>
Besides the previously explained syntactic sugar, the <productname>Informix</productname> compatibility
@@ -7913,7 +7913,7 @@ $COMMIT;
no drop-in replacement if you are using <productname>Informix</productname> at the moment. Moreover,
some of the data types are different. For example,
<productname>PostgreSQL's</productname> datetime and interval types do not
- know about ranges like for example <literal>YEAR TO MINUTE</> so you won't
+ know about ranges like for example <literal>YEAR TO MINUTE</literal> so you won't
find support in ECPG for that either.
</para>
@@ -7938,11 +7938,11 @@ EXEC SQL FETCH MYCUR INTO :userid;
<para>
<variablelist>
<varlistentry>
- <term><literal>CLOSE DATABASE</></term>
+ <term><literal>CLOSE DATABASE</literal></term>
<listitem>
<para>
This statement closes the current connection. In fact, this is a
- synonym for ECPG's <literal>DISCONNECT CURRENT</>:
+ synonym for ECPG's <literal>DISCONNECT CURRENT</literal>:
<programlisting>
$CLOSE DATABASE; /* close the current connection */
EXEC SQL CLOSE DATABASE;
@@ -7951,12 +7951,12 @@ EXEC SQL CLOSE DATABASE;
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>FREE cursor_name</></term>
+ <term><literal>FREE cursor_name</literal></term>
<listitem>
<para>
Due to the differences how ECPG works compared to Informix's ESQL/C (i.e. which steps
are purely grammar transformations and which steps rely on the underlying run-time library)
- there is no <literal>FREE cursor_name</> statement in ECPG. This is because in ECPG,
+ there is no <literal>FREE cursor_name</literal> statement in ECPG. This is because in ECPG,
<literal>DECLARE CURSOR</literal> doesn't translate to a function call into
the run-time library that uses to the cursor name. This means that there's no run-time
bookkeeping of SQL cursors in the ECPG run-time library, only in the PostgreSQL server.
@@ -7964,10 +7964,10 @@ EXEC SQL CLOSE DATABASE;
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>FREE statement_name</></term>
+ <term><literal>FREE statement_name</literal></term>
<listitem>
<para>
- <literal>FREE statement_name</> is a synonym for <literal>DEALLOCATE PREPARE statement_name</>.
+ <literal>FREE statement_name</literal> is a synonym for <literal>DEALLOCATE PREPARE statement_name</literal>.
</para>
</listitem>
</varlistentry>
@@ -8024,16 +8024,16 @@ typedef struct sqlda_compat sqlda_t;
<variablelist>
<varlistentry>
- <term><literal>sqld</></term>
+ <term><literal>sqld</literal></term>
<listitem>
<para>
- The number of fields in the <literal>SQLDA</> descriptor.
+ The number of fields in the <literal>SQLDA</literal> descriptor.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>sqlvar</></term>
+ <term><literal>sqlvar</literal></term>
<listitem>
<para>
Pointer to the per-field properties.
@@ -8042,7 +8042,7 @@ typedef struct sqlda_compat sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>desc_name</></term>
+ <term><literal>desc_name</literal></term>
<listitem>
<para>
Unused, filled with zero-bytes.
@@ -8051,7 +8051,7 @@ typedef struct sqlda_compat sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>desc_occ</></term>
+ <term><literal>desc_occ</literal></term>
<listitem>
<para>
Size of the allocated structure.
@@ -8060,7 +8060,7 @@ typedef struct sqlda_compat sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>desc_next</></term>
+ <term><literal>desc_next</literal></term>
<listitem>
<para>
Pointer to the next SQLDA structure if the result set contains more than one record.
@@ -8069,7 +8069,7 @@ typedef struct sqlda_compat sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>reserved</></term>
+ <term><literal>reserved</literal></term>
<listitem>
<para>
Unused pointer, contains NULL. Kept for Informix-compatibility.
@@ -8084,7 +8084,7 @@ typedef struct sqlda_compat sqlda_t;
<variablelist>
<varlistentry>
- <term><literal>sqltype</></term>
+ <term><literal>sqltype</literal></term>
<listitem>
<para>
Type of the field. Constants are in <literal>sqltypes.h</literal>
@@ -8093,7 +8093,7 @@ typedef struct sqlda_compat sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>sqllen</></term>
+ <term><literal>sqllen</literal></term>
<listitem>
<para>
Length of the field data.
@@ -8102,7 +8102,7 @@ typedef struct sqlda_compat sqlda_t;
</varlistentry>
<varlistentry>
- <term><literal>sqldata</></term>
+ <term><literal>sqldata</literal></term>
<listitem>
<para>
Pointer to the field data. The pointer is of <literal>char *</literal> type,
@@ -8123,7 +8123,7 @@ switch (sqldata->sqlvar[i].sqltype)
</varlistentry>
<varlistentry>
- <term><literal>sqlind</></term>
+ <term><literal>sqlind</literal></term>
<listitem>
<para>
Pointer to the NULL indicator. If returned by DESCRIBE or FETCH then it's always a valid pointer.
@@ -8139,7 +8139,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0)
</varlistentry>
<varlistentry>
- <term><literal>sqlname</></term>
+ <term><literal>sqlname</literal></term>
<listitem>
<para>
Name of the field. 0-terminated string.
@@ -8148,16 +8148,16 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0)
</varlistentry>
<varlistentry>
- <term><literal>sqlformat</></term>
+ <term><literal>sqlformat</literal></term>
<listitem>
<para>
- Reserved in Informix, value of <function>PQfformat()</> for the field.
+ Reserved in Informix, value of <function>PQfformat()</function> for the field.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>sqlitype</></term>
+ <term><literal>sqlitype</literal></term>
<listitem>
<para>
Type of the NULL indicator data. It's always SQLSMINT when returning data from the server.
@@ -8168,7 +8168,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0)
</varlistentry>
<varlistentry>
- <term><literal>sqlilen</></term>
+ <term><literal>sqlilen</literal></term>
<listitem>
<para>
Length of the NULL indicator data.
@@ -8177,23 +8177,23 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0)
</varlistentry>
<varlistentry>
- <term><literal>sqlxid</></term>
+ <term><literal>sqlxid</literal></term>
<listitem>
<para>
- Extended type of the field, result of <function>PQftype()</>.
+ Extended type of the field, result of <function>PQftype()</function>.
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><literal>sqltypename</></term>