summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lane2023-05-19 21:24:48 +0000
committerTom Lane2023-05-19 21:24:48 +0000
commit0245f8db36f375326c2bae0c3420d3c77714e72d (patch)
tree7ce91f23658a05ea24be4703fb06cdc6b56248f7
parentdf6b19fbbc20d830de91d9bea68715a39635b568 (diff)
Pre-beta mechanical code beautification.
Run pgindent, pgperltidy, and reformat-dat-files. This set of diffs is a bit larger than typical. We've updated to pg_bsd_indent 2.1.2, which properly indents variable declarations that have multi-line initialization expressions (the continuation lines are now indented one tab stop). We've also updated to perltidy version 20230309 and changed some of its settings, which reduces its desire to add whitespace to lines to make assignments etc. line up. Going forward, that should make for fewer random-seeming changes to existing code. Discussion: https://postgr.es/m/[email protected]
-rw-r--r--contrib/amcheck/t/001_verify_heapam.pl4
-rw-r--r--contrib/amcheck/t/003_cic_2pc.pl21
-rw-r--r--contrib/amcheck/verify_heapam.c34
-rw-r--r--contrib/auto_explain/t/001_auto_explain.pl6
-rw-r--r--contrib/basebackup_to_shell/t/001_basic.pl4
-rw-r--r--contrib/basic_archive/basic_archive.c4
-rw-r--r--contrib/dblink/dblink.c2
-rwxr-xr-xcontrib/intarray/bench/bench.pl6
-rwxr-xr-xcontrib/intarray/bench/create_test.pl2
-rw-r--r--contrib/ltree/ltree_gist.c2
-rw-r--r--contrib/ltree/ltree_io.c6
-rw-r--r--contrib/ltree/ltxtquery_io.c6
-rw-r--r--contrib/pg_prewarm/t/001_basic.pl2
-rw-r--r--contrib/pg_walinspect/pg_walinspect.c4
-rw-r--r--contrib/postgres_fdw/connection.c2
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c53
-rw-r--r--contrib/postgres_fdw/shippable.c4
-rwxr-xr-xcontrib/seg/seg-validate.pl10
-rw-r--r--contrib/test_decoding/t/001_repl_stats.pl2
-rw-r--r--contrib/test_decoding/test_decoding.c4
-rw-r--r--doc/src/sgml/mk_feature_tables.pl2
-rw-r--r--src/backend/access/brin/brin.c26
-rw-r--r--src/backend/access/common/reloptions.c6
-rw-r--r--src/backend/access/gist/gist.c2
-rw-r--r--src/backend/access/gist/gistbuildbuffers.c2
-rw-r--r--src/backend/access/gist/gistget.c4
-rw-r--r--src/backend/access/gist/gistxlog.c2
-rw-r--r--src/backend/access/hash/hashfunc.c14
-rw-r--r--src/backend/access/heap/heapam.c2
-rw-r--r--src/backend/access/heap/heapam_handler.c4
-rw-r--r--src/backend/access/heap/hio.c2
-rw-r--r--src/backend/access/heap/pruneheap.c2
-rw-r--r--src/backend/access/heap/vacuumlazy.c17
-rw-r--r--src/backend/access/heap/visibilitymap.c2
-rw-r--r--src/backend/access/nbtree/nbtpage.c4
-rw-r--r--src/backend/access/rmgrdesc/dbasedesc.c4
-rw-r--r--src/backend/access/rmgrdesc/gindesc.c4
-rw-r--r--src/backend/access/spgist/spgscan.c10
-rw-r--r--src/backend/access/table/tableam.c4
-rw-r--r--src/backend/access/transam/multixact.c2
-rw-r--r--src/backend/access/transam/parallel.c8
-rw-r--r--src/backend/access/transam/xact.c13
-rw-r--r--src/backend/access/transam/xlog.c12
-rw-r--r--src/backend/access/transam/xloginsert.c4
-rw-r--r--src/backend/access/transam/xlogprefetcher.c6
-rw-r--r--src/backend/access/transam/xlogreader.c2
-rw-r--r--src/backend/access/transam/xlogrecovery.c2
-rw-r--r--src/backend/backup/basebackup.c8
-rw-r--r--src/backend/backup/basebackup_copy.c3
-rw-r--r--src/backend/catalog/Catalog.pm88
-rw-r--r--src/backend/catalog/aclchk.c4
-rw-r--r--src/backend/catalog/genbki.pl100
-rw-r--r--src/backend/catalog/indexing.c4
-rw-r--r--src/backend/catalog/namespace.c6
-rw-r--r--src/backend/catalog/pg_operator.c2
-rw-r--r--src/backend/catalog/pg_shdepend.c1
-rw-r--r--src/backend/commands/alter.c4
-rw-r--r--src/backend/commands/collationcmds.c46
-rw-r--r--src/backend/commands/dbcommands.c18
-rw-r--r--src/backend/commands/dropcmds.c1
-rw-r--r--src/backend/commands/explain.c6
-rw-r--r--src/backend/commands/functioncmds.c4
-rw-r--r--src/backend/commands/indexcmds.c19
-rw-r--r--src/backend/commands/schemacmds.c2
-rw-r--r--src/backend/commands/subscriptioncmds.c26
-rw-r--r--src/backend/commands/tablecmds.c16
-rw-r--r--src/backend/commands/tablespace.c4
-rw-r--r--src/backend/commands/typecmds.c6
-rw-r--r--src/backend/commands/user.c52
-rw-r--r--src/backend/commands/view.c2
-rw-r--r--src/backend/executor/execExpr.c10
-rw-r--r--src/backend/executor/execExprInterp.c14
-rw-r--r--src/backend/executor/execIndexing.c4
-rw-r--r--src/backend/executor/execSRF.c6
-rw-r--r--src/backend/executor/nodeAgg.c8
-rw-r--r--src/backend/executor/nodeHash.c6
-rw-r--r--src/backend/executor/nodeHashjoin.c28
-rw-r--r--src/backend/executor/nodeIncrementalSort.c4
-rw-r--r--src/backend/executor/nodeModifyTable.c4
-rw-r--r--src/backend/executor/nodeTableFuncscan.c2
-rw-r--r--src/backend/executor/nodeWindowAgg.c10
-rw-r--r--src/backend/executor/spi.c4
-rw-r--r--src/backend/jit/llvm/llvmjit.c10
-rw-r--r--src/backend/jit/llvm/llvmjit_deform.c2
-rw-r--r--src/backend/jit/llvm/llvmjit_expr.c5
-rw-r--r--src/backend/libpq/be-secure-gssapi.c4
-rw-r--r--src/backend/libpq/be-secure-openssl.c8
-rw-r--r--src/backend/libpq/hba.c10
-rw-r--r--src/backend/nodes/gen_node_support.pl82
-rw-r--r--src/backend/optimizer/path/costsize.c2
-rw-r--r--src/backend/optimizer/util/appendinfo.c2
-rw-r--r--src/backend/optimizer/util/relnode.c2
-rw-r--r--src/backend/parser/check_keywords.pl10
-rw-r--r--src/backend/parser/parse_expr.c2
-rw-r--r--src/backend/parser/parse_merge.c4
-rw-r--r--src/backend/parser/parse_utilcmd.c4
-rw-r--r--src/backend/partitioning/partbounds.c8
-rw-r--r--src/backend/postmaster/fork_process.c4
-rw-r--r--src/backend/regex/regc_lex.c1
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c2
-rw-r--r--src/backend/replication/logical/decode.c10
-rw-r--r--src/backend/replication/logical/logical.c4
-rw-r--r--src/backend/replication/logical/origin.c2
-rw-r--r--src/backend/replication/logical/reorderbuffer.c28
-rw-r--r--src/backend/replication/logical/snapbuild.c6
-rw-r--r--src/backend/replication/logical/tablesync.c2
-rw-r--r--src/backend/replication/logical/worker.c37
-rw-r--r--src/backend/replication/pgoutput/pgoutput.c4
-rw-r--r--src/backend/replication/syncrep.c4
-rw-r--r--src/backend/rewrite/rewriteHandler.c2
-rw-r--r--src/backend/rewrite/rowsecurity.c4
-rw-r--r--src/backend/snowball/snowball_create.pl69
-rw-r--r--src/backend/statistics/extended_stats.c4
-rw-r--r--src/backend/storage/buffer/bufmgr.c2
-rw-r--r--src/backend/storage/file/buffile.c3
-rw-r--r--src/backend/storage/ipc/dsm_impl.c15
-rw-r--r--src/backend/storage/lmgr/generate-lwlocknames.pl12
-rw-r--r--src/backend/storage/lmgr/lock.c1
-rw-r--r--src/backend/storage/lmgr/lwlock.c6
-rw-r--r--src/backend/storage/lmgr/predicate.c54
-rw-r--r--src/backend/storage/lmgr/proc.c8
-rw-r--r--src/backend/storage/smgr/md.c8
-rw-r--r--src/backend/tsearch/spell.c2
-rw-r--r--src/backend/utils/Gen_dummy_probes.pl2
-rw-r--r--src/backend/utils/Gen_fmgrtab.pl31
-rw-r--r--src/backend/utils/activity/pgstat.c2
-rw-r--r--src/backend/utils/activity/pgstat_shmem.c2
-rw-r--r--src/backend/utils/activity/pgstat_xact.c8
-rw-r--r--src/backend/utils/adt/datetime.c12
-rw-r--r--src/backend/utils/adt/float.c3
-rw-r--r--src/backend/utils/adt/jsonfuncs.c6
-rw-r--r--src/backend/utils/adt/jsonpath.c54
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c10
-rw-r--r--src/backend/utils/adt/jsonpath_internal.h6
-rw-r--r--src/backend/utils/adt/pg_locale.c136
-rw-r--r--src/backend/utils/adt/ruleutils.c2
-rw-r--r--src/backend/utils/adt/tsquery_op.c6
-rw-r--r--src/backend/utils/adt/tsvector_op.c2
-rw-r--r--src/backend/utils/adt/varchar.c14
-rw-r--r--src/backend/utils/adt/varlena.c5
-rw-r--r--src/backend/utils/adt/xid8funcs.c2
-rw-r--r--src/backend/utils/adt/xml.c2
-rw-r--r--src/backend/utils/cache/lsyscache.c12
-rw-r--r--src/backend/utils/cache/relcache.c22
-rw-r--r--src/backend/utils/cache/relmapper.c10
-rw-r--r--src/backend/utils/fmgr/fmgr.c2
-rw-r--r--src/backend/utils/generate-errcodes.pl5
-rw-r--r--src/backend/utils/init/postinit.c10
-rw-r--r--src/backend/utils/init/usercontext.c8
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_BIG5.pl14
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl10
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl24
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl516
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl30
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl14
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_GB18030.pl10
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_JOHAB.pl30
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl24
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_SJIS.pl84
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_UHC.pl20
-rwxr-xr-xsrc/backend/utils/mb/Unicode/UCS_to_most.pl44
-rw-r--r--src/backend/utils/mb/Unicode/convutils.pm64
-rw-r--r--src/backend/utils/misc/guc.c4
-rw-r--r--src/backend/utils/misc/guc_tables.c4
-rw-r--r--src/backend/utils/mmgr/dsa.c4
-rw-r--r--src/backend/utils/mmgr/freepage.c2
-rw-r--r--src/backend/utils/mmgr/mcxt.c6
-rw-r--r--src/backend/utils/resowner/resowner.c2
-rw-r--r--src/backend/utils/sort/tuplesort.c4
-rw-r--r--src/backend/utils/time/snapmgr.c6
-rw-r--r--src/bin/initdb/initdb.c51
-rw-r--r--src/bin/initdb/t/001_initdb.pl20
-rw-r--r--src/bin/pg_amcheck/t/002_nonesuch.pl54
-rw-r--r--src/bin/pg_amcheck/t/003_check.pl2
-rw-r--r--src/bin/pg_amcheck/t/004_verify_heapam.pl126
-rw-r--r--src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl12
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c16
-rw-r--r--src/bin/pg_basebackup/pg_receivewal.c2
-rw-r--r--src/bin/pg_basebackup/t/010_pg_basebackup.pl102
-rw-r--r--src/bin/pg_basebackup/t/020_pg_receivewal.pl34
-rw-r--r--src/bin/pg_basebackup/t/030_pg_recvlogical.pl26
-rw-r--r--src/bin/pg_basebackup/walmethods.c14
-rw-r--r--src/bin/pg_basebackup/walmethods.h12
-rw-r--r--src/bin/pg_checksums/t/002_actions.pl38
-rw-r--r--src/bin/pg_controldata/t/001_pg_controldata.pl2
-rw-r--r--src/bin/pg_ctl/t/001_start_stop.pl2
-rw-r--r--src/bin/pg_ctl/t/004_logrotate.pl20
-rw-r--r--src/bin/pg_dump/compress_io.c4
-rw-r--r--src/bin/pg_dump/compress_lz4.c12
-rw-r--r--src/bin/pg_dump/compress_zstd.c4
-rw-r--r--src/bin/pg_dump/compress_zstd.h6
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c19
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c8
-rw-r--r--src/bin/pg_dump/pg_dump.c35
-rw-r--r--src/bin/pg_dump/pg_dumpall.c14
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl1267
-rw-r--r--src/bin/pg_dump/t/004_pg_dump_parallel.pl12
-rw-r--r--src/bin/pg_dump/t/010_dump_connstr.pl36
-rw-r--r--src/bin/pg_resetwal/t/002_corrupted.pl2
-rw-r--r--src/bin/pg_rewind/t/001_basic.pl14
-rw-r--r--src/bin/pg_rewind/t/006_options.pl6
-rw-r--r--src/bin/pg_rewind/t/007_standby_source.pl4
-rw-r--r--src/bin/pg_rewind/t/008_min_recovery_point.pl4
-rw-r--r--src/bin/pg_rewind/t/009_growing_files.pl2
-rw-r--r--src/bin/pg_rewind/t/RewindTest.pm26
-rw-r--r--src/bin/pg_test_fsync/pg_test_fsync.c2
-rw-r--r--src/bin/pg_upgrade/check.c4
-rw-r--r--src/bin/pg_upgrade/info.c20
-rw-r--r--src/bin/pg_upgrade/pg_upgrade.c8
-rw-r--r--src/bin/pg_upgrade/t/002_pg_upgrade.pl76
-rw-r--r--src/bin/pg_verifybackup/t/002_algorithm.pl2
-rw-r--r--src/bin/pg_verifybackup/t/003_corruption.pl54
-rw-r--r--src/bin/pg_verifybackup/t/004_options.pl2
-rw-r--r--src/bin/pg_verifybackup/t/006_encoding.pl4
-rw-r--r--src/bin/pg_verifybackup/t/007_wal.pl4
-rw-r--r--src/bin/pg_verifybackup/t/008_untar.pl44
-rw-r--r--src/bin/pg_verifybackup/t/009_extract.pl22
-rw-r--r--src/bin/pg_verifybackup/t/010_client_untar.pl46
-rw-r--r--src/bin/pg_waldump/t/002_save_fullpage.pl8
-rw-r--r--src/bin/pgbench/pgbench.c4
-rw-r--r--src/bin/pgbench/t/001_pgbench_with_server.pl76
-rw-r--r--src/bin/pgbench/t/002_pgbench_no_server.pl8
-rw-r--r--src/bin/psql/command.c11
-rw-r--r--src/bin/psql/common.c2
-rw-r--r--src/bin/psql/create_help.pl24
-rw-r--r--src/bin/psql/crosstabview.c2
-rw-r--r--src/bin/psql/describe.c4
-rw-r--r--src/bin/psql/settings.h3
-rw-r--r--src/bin/psql/t/001_basic.pl58
-rw-r--r--src/bin/psql/t/010_tab_completion.pl9
-rw-r--r--src/bin/psql/t/020_cancel.pl2
-rw-r--r--src/bin/scripts/t/020_createdb.pl45
-rw-r--r--src/bin/scripts/t/040_createuser.pl6
-rw-r--r--src/bin/scripts/t/090_reindexdb.pl21
-rw-r--r--src/bin/scripts/t/100_vacuumdb.pl2
-rw-r--r--src/bin/scripts/t/200_connstr.pl2
-rw-r--r--src/bin/scripts/vacuumdb.c14
-rw-r--r--src/common/unicode/generate-norm_test_table.pl12
-rw-r--r--src/common/unicode/generate-unicode_norm_table.pl35
-rw-r--r--src/common/unicode/generate-unicode_normprops_table.pl6
-rw-r--r--src/fe_utils/print.c7
-rw-r--r--src/include/access/amapi.h2
-rw-r--r--src/include/access/brin_tuple.h2
-rw-r--r--src/include/access/gist_private.h1
-rw-r--r--src/include/access/tableam.h2
-rw-r--r--src/include/access/xlogreader.h1
-rw-r--r--src/include/catalog/pg_aggregate.dat24
-rw-r--r--src/include/catalog/pg_auth_members.h2
-rw-r--r--src/include/catalog/pg_database.dat3
-rw-r--r--src/include/catalog/pg_proc.dat34
-rw-r--r--src/include/catalog/pg_subscription.h6
-rwxr-xr-xsrc/include/catalog/reformat_dat_file.pl10
-rwxr-xr-xsrc/include/catalog/renumber_oids.pl18
-rw-r--r--src/include/executor/hashjoin.h2
-rw-r--r--src/include/executor/tuptable.h2
-rw-r--r--src/include/fe_utils/print.h17
-rw-r--r--src/include/funcapi.h1
-rw-r--r--src/include/nodes/primnodes.h1
-rw-r--r--src/include/port/win32ntdll.h6
-rw-r--r--src/include/replication/reorderbuffer.h2
-rw-r--r--src/include/storage/bufmgr.h2
-rw-r--r--src/include/storage/lock.h2
-rw-r--r--src/include/storage/lwlock.h9
-rw-r--r--src/include/storage/predicate_internals.h2
-rw-r--r--src/include/storage/proc.h6
-rw-r--r--src/include/utils/backend_status.h4
-rw-r--r--src/include/utils/pg_locale.h10
-rw-r--r--src/include/utils/rel.h2
-rw-r--r--src/include/utils/varlena.h2
-rw-r--r--src/interfaces/ecpg/ecpglib/data.c4
-rw-r--r--src/interfaces/ecpg/ecpglib/descriptor.c4
-rw-r--r--src/interfaces/ecpg/ecpglib/execute.c4
-rw-r--r--src/interfaces/ecpg/include/pgtypes_interval.h6
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt.h2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/interval.c12
-rw-r--r--src/interfaces/ecpg/pgtypeslib/timestamp.c4
-rw-r--r--src/interfaces/ecpg/preproc/check_rules.pl29
-rw-r--r--src/interfaces/ecpg/preproc/parse.pl124
-rw-r--r--src/interfaces/ecpg/preproc/type.c2
-rw-r--r--src/interfaces/libpq/fe-connect.c6
-rw-r--r--src/interfaces/libpq/fe-exec.c16
-rw-r--r--src/interfaces/libpq/fe-lobj.c42
-rw-r--r--src/interfaces/libpq/fe-misc.c10
-rw-r--r--src/interfaces/libpq/fe-print.c2
-rw-r--r--src/interfaces/libpq/fe-protocol3.c2
-rw-r--r--src/interfaces/libpq/fe-secure-common.c6
-rw-r--r--src/interfaces/libpq/fe-secure-gssapi.c12
-rw-r--r--src/interfaces/libpq/fe-secure-openssl.c66
-rw-r--r--src/interfaces/libpq/fe-secure.c8
-rw-r--r--src/interfaces/libpq/libpq-int.h4
-rw-r--r--src/interfaces/libpq/t/001_uri.pl27
-rw-r--r--src/interfaces/libpq/t/003_load_balance_host_list.pl30
-rw-r--r--src/interfaces/libpq/t/004_load_balance_dns.pl53
-rw-r--r--src/pl/plperl/plc_perlboot.pl2
-rw-r--r--src/pl/plperl/text2macro.pl8
-rw-r--r--src/port/dirmod.c8
-rw-r--r--src/test/authentication/t/001_password.pl24
-rw-r--r--src/test/authentication/t/002_saslprep.pl20
-rw-r--r--src/test/authentication/t/003_peer.pl8
-rw-r--r--src/test/authentication/t/004_file_inclusion.pl16
-rw-r--r--src/test/icu/t/010_database.pl3
-rw-r--r--src/test/kerberos/t/001_auth.pl236
-rw-r--r--src/test/ldap/LdapServer.pm73
-rw-r--r--src/test/ldap/t/001_auth.pl4
-rw-r--r--src/test/modules/commit_ts/t/002_standby.pl2
-rw-r--r--src/test/modules/commit_ts/t/003_standby_2.pl2
-rw-r--r--src/test/modules/commit_ts/t/004_restart.pl4
-rw-r--r--src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl14
-rw-r--r--src/test/modules/libpq_pipeline/libpq_pipeline.c2
-rw-r--r--src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl2
-rw-r--r--src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl2
-rw-r--r--src/test/modules/test_custom_rmgrs/t/001_basic.pl20
-rw-r--r--src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c2
-rw-r--r--src/test/modules/test_ddl_deparse/test_ddl_deparse.c1
-rw-r--r--src/test/modules/test_misc/t/001_constraint_validation.pl4
-rw-r--r--src/test/modules/test_misc/t/002_tablespace.pl2
-rw-r--r--src/test/modules/test_misc/t/003_check_guc.pl8
-rw-r--r--src/test/modules/test_pg_dump/t/001_base.pl114
-rw-r--r--src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm88
-rw-r--r--src/test/perl/PostgreSQL/Test/BackgroundPsql.pm29
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm180
-rw-r--r--src/test/perl/PostgreSQL/Test/RecursiveCopy.pm2
-rw-r--r--src/test/perl/PostgreSQL/Test/SimpleTee.pm6
-rw-r--r--src/test/perl/PostgreSQL/Test/Utils.pm22
-rw-r--r--src/test/perl/PostgreSQL/Version.pm4
-rw-r--r--src/test/recovery/t/001_stream_rep.pl42
-rw-r--r--src/test/recovery/t/002_archiving.pl6
-rw-r--r--src/test/recovery/t/003_recovery_targets.pl16
-rw-r--r--src/test/recovery/t/005_replay_delay.pl2
-rw-r--r--src/test/recovery/t/006_logical_decoding.pl4
-rw-r--r--src/test/recovery/t/009_twophase.pl4
-rw-r--r--src/test/recovery/t/010_logical_decoding_timelines.pl4
-rw-r--r--src/test/recovery/t/012_subtransactions.pl2
-rw-r--r--src/test/recovery/t/013_crash_restart.pl4
-rw-r--r--src/test/recovery/t/014_unlogged_reinit.pl12
-rw-r--r--src/test/recovery/t/016_min_consistency.pl2
-rw-r--r--src/test/recovery/t/017_shm.pl4
-rw-r--r--src/test/recovery/t/018_wal_optimize.pl4
-rw-r--r--src/test/recovery/t/019_replslot_limit.pl4
-rw-r--r--src/test/recovery/t/020_archive_status.pl10
-rw-r--r--src/test/recovery/t/022_crash_temp_files.pl4
-rw-r--r--src/test/recovery/t/023_pitr_prepared_xact.pl2
-rw-r--r--src/test/recovery/t/024_archive_recovery.pl6
-rw-r--r--src/test/recovery/t/025_stuck_on_old_timeline.pl4
-rw-r--r--src/test/recovery/t/027_stream_regress.pl4
-rw-r--r--src/test/recovery/t/028_pitr_timelines.pl6
-rw-r--r--src/test/recovery/t/029_stats_restart.pl12
-rw-r--r--src/test/recovery/t/031_recovery_conflict.pl27
-rw-r--r--src/test/recovery/t/032_relfilenode_reuse.pl4
-rw-r--r--src/test/recovery/t/033_replay_tsp_drops.pl6
-rw-r--r--src/test/recovery/t/034_create_database.pl2
-rw-r--r--src/test/recovery/t/035_standby_logical_decoding.pl306
-rw-r--r--src/test/regress/pg_regress.c4
-rw-r--r--src/test/ssl/t/001_ssltests.pl104
-rw-r--r--src/test/ssl/t/002_scram.pl7
-rw-r--r--src/test/ssl/t/003_sslinfo.pl7
-rw-r--r--src/test/ssl/t/SSL/Backend/OpenSSL.pm16
-rw-r--r--src/test/ssl/t/SSL/Server.pm18
-rw-r--r--src/test/subscription/t/001_rep_changes.pl3
-rw-r--r--src/test/subscription/t/005_encoding.pl6
-rw-r--r--src/test/subscription/t/012_collation.pl4
-rw-r--r--src/test/subscription/t/014_binary.pl2
-rw-r--r--src/test/subscription/t/015_stream.pl23
-rw-r--r--src/test/subscription/t/018_stream_subxact_abort.pl3
-rw-r--r--src/test/subscription/t/023_twophase_stream.pl3
-rw-r--r--src/test/subscription/t/025_rep_changes_for_schema.pl3
-rw-r--r--src/test/subscription/t/026_stats.pl2
-rw-r--r--src/test/subscription/t/027_nosuperuser.pl18
-rw-r--r--src/test/subscription/t/028_row_filter.pl8
-rw-r--r--src/test/subscription/t/030_origin.pl6
-rw-r--r--src/test/subscription/t/031_column_list.pl2
-rw-r--r--src/test/subscription/t/032_subscribe_use_index.pl150
-rw-r--r--src/test/subscription/t/033_run_as_table_owner.pl25
-rw-r--r--src/test/subscription/t/100_bugs.pl6
-rw-r--r--src/timezone/zic.c10
-rw-r--r--src/tools/PerfectHash.pm16
-rwxr-xr-xsrc/tools/check_bison_recursion.pl6
-rw-r--r--src/tools/ci/windows_build_config.pl6
-rwxr-xr-xsrc/tools/copyright.pl4
-rw-r--r--src/tools/gen_export.pl11
-rw-r--r--src/tools/gen_keywordlist.pl30
-rwxr-xr-xsrc/tools/git_changelog56
-rwxr-xr-xsrc/tools/mark_pgdllimport.pl2
-rw-r--r--src/tools/msvc/Install.pm69
-rw-r--r--src/tools/msvc/MSBuildProject.pm45
-rw-r--r--src/tools/msvc/Mkvcbuild.pm134
-rw-r--r--src/tools/msvc/Project.pm44
-rw-r--r--src/tools/msvc/Solution.pm627
-rw-r--r--src/tools/msvc/VSObjectFactory.pm2
-rw-r--r--src/tools/msvc/build.pl6
-rw-r--r--src/tools/msvc/config_default.pl32
-rw-r--r--src/tools/msvc/dummylib/Win32/Registry.pm2
-rw-r--r--src/tools/msvc/dummylib/Win32API/File.pm4
-rw-r--r--src/tools/msvc/gendef.pl2
-rw-r--r--src/tools/msvc/pgbison.pl2
-rw-r--r--src/tools/msvc/vcregress.pl76
-rw-r--r--src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl2
-rwxr-xr-xsrc/tools/pginclude/pgcheckdefines4
-rwxr-xr-xsrc/tools/pgindent/pgindent52
-rw-r--r--src/tools/pgindent/typedefs.list178
-rwxr-xr-xsrc/tools/win32tzlist.pl10
-rw-r--r--src/tutorial/funcs.c4
402 files changed, 4754 insertions, 4425 deletions
diff --git a/contrib/amcheck/t/001_verify_heapam.pl b/contrib/amcheck/t/001_verify_heapam.pl
index 1aedebe430..46d5b53181 100644
--- a/contrib/amcheck/t/001_verify_heapam.pl
+++ b/contrib/amcheck/t/001_verify_heapam.pl
@@ -81,7 +81,7 @@ sub relation_filepath
my ($relname) = @_;
my $pgdata = $node->data_dir;
- my $rel = $node->safe_psql('postgres',
+ my $rel = $node->safe_psql('postgres',
qq(SELECT pg_relation_filepath('$relname')));
die "path not found for relation $relname" unless defined $rel;
return "$pgdata/$rel";
@@ -267,7 +267,7 @@ sub check_all_options_uncorrupted
for my $endblock (qw(NULL 0))
{
my $opts =
- "on_error_stop := $stop, "
+ "on_error_stop := $stop, "
. "check_toast := $check_toast, "
. "skip := $skip, "
. "startblock := $startblock, "
diff --git a/contrib/amcheck/t/003_cic_2pc.pl b/contrib/amcheck/t/003_cic_2pc.pl
index 5323ed11ae..3279a2505a 100644
--- a/contrib/amcheck/t/003_cic_2pc.pl
+++ b/contrib/amcheck/t/003_cic_2pc.pl
@@ -38,30 +38,35 @@ $node->safe_psql('postgres', q(CREATE TABLE tbl(i int)));
my $main_h = $node->background_psql('postgres');
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
BEGIN;
INSERT INTO tbl VALUES(0);
));
my $cic_h = $node->background_psql('postgres');
-$cic_h->query_until(qr/start/, q(
+$cic_h->query_until(
+ qr/start/, q(
\echo start
CREATE INDEX CONCURRENTLY idx ON tbl(i);
));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'a';
));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
BEGIN;
INSERT INTO tbl VALUES(0);
));
$node->safe_psql('postgres', q(COMMIT PREPARED 'a';));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'b';
BEGIN;
INSERT INTO tbl VALUES(0);
@@ -69,7 +74,8 @@ INSERT INTO tbl VALUES(0);
$node->safe_psql('postgres', q(COMMIT PREPARED 'b';));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'c';
COMMIT PREPARED 'c';
));
@@ -97,7 +103,8 @@ PREPARE TRANSACTION 'persists_forever';
$node->restart;
my $reindex_h = $node->background_psql('postgres');
-$reindex_h->query_until(qr/start/, q(
+$reindex_h->query_until(
+ qr/start/, q(
\echo start
DROP INDEX CONCURRENTLY idx;
CREATE INDEX CONCURRENTLY idx ON tbl(i);
diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c
index 34d73ad442..97f3253522 100644
--- a/contrib/amcheck/verify_heapam.c
+++ b/contrib/amcheck/verify_heapam.c
@@ -407,7 +407,7 @@ verify_heapam(PG_FUNCTION_ARGS)
OffsetNumber successor[MaxOffsetNumber];
bool lp_valid[MaxOffsetNumber];
bool xmin_commit_status_ok[MaxOffsetNumber];
- XidCommitStatus xmin_commit_status[MaxOffsetNumber];
+ XidCommitStatus xmin_commit_status[MaxOffsetNumber];
CHECK_FOR_INTERRUPTS();
@@ -444,7 +444,7 @@ verify_heapam(PG_FUNCTION_ARGS)
for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff;
ctx.offnum = OffsetNumberNext(ctx.offnum))
{
- BlockNumber nextblkno;
+ BlockNumber nextblkno;
OffsetNumber nextoffnum;
successor[ctx.offnum] = InvalidOffsetNumber;
@@ -484,9 +484,9 @@ verify_heapam(PG_FUNCTION_ARGS)
/*
* Since we've checked that this redirect points to a line
- * pointer between FirstOffsetNumber and maxoff, it should
- * now be safe to fetch the referenced line pointer. We expect
- * it to be LP_NORMAL; if not, that's corruption.
+ * pointer between FirstOffsetNumber and maxoff, it should now
+ * be safe to fetch the referenced line pointer. We expect it
+ * to be LP_NORMAL; if not, that's corruption.
*/
rditem = PageGetItemId(ctx.page, rdoffnum);
if (!ItemIdIsUsed(rditem))
@@ -610,8 +610,8 @@ verify_heapam(PG_FUNCTION_ARGS)
{
/*
* We should not have set successor[ctx.offnum] to a value
- * other than InvalidOffsetNumber unless that line pointer
- * is LP_NORMAL.
+ * other than InvalidOffsetNumber unless that line pointer is
+ * LP_NORMAL.
*/
Assert(ItemIdIsNormal(next_lp));
@@ -642,8 +642,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
- * If the next line pointer is a redirect, or if it's a tuple
- * but the XMAX of this tuple doesn't match the XMIN of the next
+ * If the next line pointer is a redirect, or if it's a tuple but
+ * the XMAX of this tuple doesn't match the XMIN of the next
* tuple, then the two aren't part of the same update chain and
* there is nothing more to do.
*/
@@ -667,8 +667,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
- * This tuple and the tuple to which it points seem to be part
- * of an update chain.
+ * This tuple and the tuple to which it points seem to be part of
+ * an update chain.
*/
predecessor[nextoffnum] = ctx.offnum;
@@ -721,8 +721,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
- * If the current tuple's xmin is aborted but the successor tuple's
- * xmin is in-progress or committed, that's corruption.
+ * If the current tuple's xmin is aborted but the successor
+ * tuple's xmin is in-progress or committed, that's corruption.
*/
if (xmin_commit_status_ok[ctx.offnum] &&
xmin_commit_status[ctx.offnum] == XID_ABORTED &&
@@ -1025,7 +1025,7 @@ check_tuple_visibility(HeapCheckContext *ctx, bool *xmin_commit_status_ok,
HeapTupleHeader tuphdr = ctx->tuphdr;
ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */
- *xmin_commit_status_ok = false; /* have not yet proven otherwise */
+ *xmin_commit_status_ok = false; /* have not yet proven otherwise */
/* If xmin is normal, it should be within valid range */
xmin = HeapTupleHeaderGetXmin(tuphdr);
@@ -1837,7 +1837,7 @@ check_tuple(HeapCheckContext *ctx, bool *xmin_commit_status_ok,
* therefore cannot check it.
*/
if (!check_tuple_visibility(ctx, xmin_commit_status_ok,
- xmin_commit_status))
+ xmin_commit_status))
return;
/*
@@ -1897,8 +1897,8 @@ FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx)
diff = (int32) (ctx->next_xid - xid);
/*
- * In cases of corruption we might see a 32bit xid that is before epoch
- * 0. We can't represent that as a 64bit xid, due to 64bit xids being
+ * In cases of corruption we might see a 32bit xid that is before epoch 0.
+ * We can't represent that as a 64bit xid, due to 64bit xids being
* unsigned integers, without the modulo arithmetic of 32bit xid. There's
* no really nice way to deal with that, but it works ok enough to use
* FirstNormalFullTransactionId in that case, as a freshly initdb'd
diff --git a/contrib/auto_explain/t/001_auto_explain.pl b/contrib/auto_explain/t/001_auto_explain.pl
index 7873feb044..abb422f8de 100644
--- a/contrib/auto_explain/t/001_auto_explain.pl
+++ b/contrib/auto_explain/t/001_auto_explain.pl
@@ -19,7 +19,7 @@ sub query_log
local $ENV{PGOPTIONS} = join " ",
map { "-c $_=$params->{$_}" } keys %$params;
- my $log = $node->logfile();
+ my $log = $node->logfile();
my $offset = -s $log;
$node->safe_psql("postgres", $sql);
@@ -113,7 +113,7 @@ $log_contents = query_log(
"SELECT * FROM pg_class;",
{
"auto_explain.log_verbose" => "on",
- "compute_query_id" => "on"
+ "compute_query_id" => "on"
});
like(
@@ -127,7 +127,7 @@ $log_contents = query_log(
"SELECT * FROM pg_class;",
{
"auto_explain.log_verbose" => "on",
- "compute_query_id" => "regress"
+ "compute_query_id" => "regress"
});
unlike(
diff --git a/contrib/basebackup_to_shell/t/001_basic.pl b/contrib/basebackup_to_shell/t/001_basic.pl
index 84ad93f614..e2cdd2ecb0 100644
--- a/contrib/basebackup_to_shell/t/001_basic.pl
+++ b/contrib/basebackup_to_shell/t/001_basic.pl
@@ -25,7 +25,7 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
# This is only needed on Windows machines that don't use UNIX sockets.
$node->init(
'allows_streaming' => 1,
- 'auth_extra' => [ '--create-role', 'backupuser' ]);
+ 'auth_extra' => [ '--create-role', 'backupuser' ]);
$node->append_conf('postgresql.conf',
"shared_preload_libraries = 'basebackup_to_shell'");
@@ -50,7 +50,7 @@ $node->command_fails_like(
'fails if basebackup_to_shell.command is not set');
# Configure basebackup_to_shell.command and reload the configuration file.
-my $backup_path = PostgreSQL::Test::Utils::tempdir;
+my $backup_path = PostgreSQL::Test::Utils::tempdir;
my $escaped_backup_path = $backup_path;
$escaped_backup_path =~ s{\\}{\\\\}g
if ($PostgreSQL::Test::Utils::windows_os);
diff --git a/contrib/basic_archive/basic_archive.c b/contrib/basic_archive/basic_archive.c
index cd852888ce..4d78c31859 100644
--- a/contrib/basic_archive/basic_archive.c
+++ b/contrib/basic_archive/basic_archive.c
@@ -407,8 +407,8 @@ basic_archive_shutdown(ArchiveModuleState *state)
MemoryContext basic_archive_context;
/*
- * If we didn't get to storing the pointer to our allocated state, we don't
- * have anything to clean up.
+ * If we didn't get to storing the pointer to our allocated state, we
+ * don't have anything to clean up.
*/
if (data == NULL)
return;
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 55f75eff36..3a3e916f9e 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -1287,7 +1287,7 @@ dblink_get_connections(PG_FUNCTION_ARGS)
if (astate)
PG_RETURN_DATUM(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
else
PG_RETURN_NULL();
}
diff --git a/contrib/intarray/bench/bench.pl b/contrib/intarray/bench/bench.pl
index bd6dd83c93..067654986e 100755
--- a/contrib/intarray/bench/bench.pl
+++ b/contrib/intarray/bench/bench.pl
@@ -83,7 +83,7 @@ else
$outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
}
my $sql =
- "select $outf from "
+ "select $outf from "
. join(', ', keys %table)
. " where "
. join(' AND ', @where) . ';';
@@ -100,9 +100,9 @@ if ($opt{e})
print @plan;
}
-my $t0 = [gettimeofday];
+my $t0 = [gettimeofday];
my $count = 0;
-my $b = $opt{b};
+my $b = $opt{b};
$b ||= 1;
my @a;
foreach (1 .. $b)
diff --git a/contrib/intarray/bench/create_test.pl b/contrib/intarray/bench/create_test.pl
index 5bdcebddbe..6efe9151ca 100755
--- a/contrib/intarray/bench/create_test.pl
+++ b/contrib/intarray/bench/create_test.pl
@@ -19,7 +19,7 @@ create table message_section_map (
EOT
-open(my $msg, '>', "message.tmp") || die;
+open(my $msg, '>', "message.tmp") || die;
open(my $map, '>', "message_section_map.tmp") || die;
srand(1);
diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c
index 21b7d02028..932f69bff2 100644
--- a/contrib/ltree/ltree_gist.c
+++ b/contrib/ltree/ltree_gist.c
@@ -43,7 +43,7 @@ ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
ltree *left, ltree *right)
{
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
- (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
+ (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size);
SET_VARSIZE(result, size);
diff --git a/contrib/ltree/ltree_io.c b/contrib/ltree/ltree_io.c
index 5dce70bd1a..0a12c77a62 100644
--- a/contrib/ltree/ltree_io.c
+++ b/contrib/ltree/ltree_io.c
@@ -175,7 +175,7 @@ Datum
ltree_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- ltree *res;
+ ltree *res;
if ((res = parse_ltree(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
@@ -584,7 +584,7 @@ parse_lquery(const char *buf, struct Node *escontext)
*/
static bool
finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos,
- struct Node *escontext)
+ struct Node *escontext)
{
if (is_lquery)
{
@@ -745,7 +745,7 @@ Datum
lquery_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- lquery *res;
+ lquery *res;
if ((res = parse_lquery(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c
index 0d29e15630..121fc55e46 100644
--- a/contrib/ltree/ltxtquery_io.c
+++ b/contrib/ltree/ltxtquery_io.c
@@ -186,8 +186,8 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long")));
- if (! pushquery(state, type, ltree_crc32_sz(strval, lenval),
- state->curop - state->op, lenval, flag))
+ if (!pushquery(state, type, ltree_crc32_sz(strval, lenval),
+ state->curop - state->op, lenval, flag))
return false;
while (state->curop - state->op + lenval + 1 >= state->lenop)
@@ -408,7 +408,7 @@ PG_FUNCTION_INFO_V1(ltxtq_in);
Datum
ltxtq_in(PG_FUNCTION_ARGS)
{
- ltxtquery *res;
+ ltxtquery *res;
if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL)
PG_RETURN_NULL();
diff --git a/contrib/pg_prewarm/t/001_basic.pl b/contrib/pg_prewarm/t/001_basic.pl
index 9811c51cee..6b7c869afc 100644
--- a/contrib/pg_prewarm/t/001_basic.pl
+++ b/contrib/pg_prewarm/t/001_basic.pl
@@ -21,7 +21,7 @@ $node->start;
# setup
$node->safe_psql("postgres",
- "CREATE EXTENSION pg_prewarm;\n"
+ "CREATE EXTENSION pg_prewarm;\n"
. "CREATE TABLE test(c1 int);\n"
. "INSERT INTO test SELECT generate_series(1, 100);");
diff --git a/contrib/pg_walinspect/pg_walinspect.c b/contrib/pg_walinspect/pg_walinspect.c
index 1cd3744d5d..796a74f322 100644
--- a/contrib/pg_walinspect/pg_walinspect.c
+++ b/contrib/pg_walinspect/pg_walinspect.c
@@ -252,8 +252,8 @@ GetWALBlockInfo(FunctionCallInfo fcinfo, XLogReaderState *record,
int block_id;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
RmgrData desc;
- const char *record_type;
- StringInfoData rec_desc;
+ const char *record_type;
+ StringInfoData rec_desc;
Assert(XLogRecHasAnyBlockRefs(record));
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index da32d503bc..d918ba89e1 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -61,7 +61,7 @@ typedef struct ConnCacheEntry
bool have_error; /* have any subxacts aborted in this xact? */
bool changing_xact_state; /* xact state change in process */
bool parallel_commit; /* do we commit (sub)xacts in parallel? */
- bool parallel_abort; /* do we abort (sub)xacts in parallel? */
+ bool parallel_abort; /* do we abort (sub)xacts in parallel? */
bool invalidated; /* true if reconnect is pending */
bool keep_connections; /* setting value of keep_connections
* server option */
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 95dbe8b06c..428ea3810f 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -2024,9 +2024,8 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
/*
* Should never get called when the insert is being performed on a table
- * that is also among the target relations of an UPDATE operation,
- * because postgresBeginForeignInsert() currently rejects such insert
- * attempts.
+ * that is also among the target relations of an UPDATE operation, because
+ * postgresBeginForeignInsert() currently rejects such insert attempts.
*/
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
@@ -5167,15 +5166,15 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
*/
if (method != ANALYZE_SAMPLE_OFF)
{
- bool can_tablesample;
+ bool can_tablesample;
reltuples = postgresGetAnalyzeInfoForForeignTable(relation,
&can_tablesample);
/*
- * Make sure we're not choosing TABLESAMPLE when the remote relation does
- * not support that. But only do this for "auto" - if the user explicitly
- * requested BERNOULLI/SYSTEM, it's better to fail.
+ * Make sure we're not choosing TABLESAMPLE when the remote relation
+ * does not support that. But only do this for "auto" - if the user
+ * explicitly requested BERNOULLI/SYSTEM, it's better to fail.
*/
if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
method = ANALYZE_SAMPLE_RANDOM;
@@ -5189,35 +5188,35 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
else
{
/*
- * All supported sampling methods require sampling rate,
- * not target rows directly, so we calculate that using
- * the remote reltuples value. That's imperfect, because
- * it might be off a good deal, but that's not something
- * we can (or should) address here.
+ * All supported sampling methods require sampling rate, not
+ * target rows directly, so we calculate that using the remote
+ * reltuples value. That's imperfect, because it might be off a
+ * good deal, but that's not something we can (or should) address
+ * here.
*
- * If reltuples is too low (i.e. when table grew), we'll
- * end up sampling more rows - but then we'll apply the
- * local sampling, so we get the expected sample size.
- * This is the same outcome as without remote sampling.
+ * If reltuples is too low (i.e. when table grew), we'll end up
+ * sampling more rows - but then we'll apply the local sampling,
+ * so we get the expected sample size. This is the same outcome as
+ * without remote sampling.
*
- * If reltuples is too high (e.g. after bulk DELETE), we
- * will end up sampling too few rows.
+ * If reltuples is too high (e.g. after bulk DELETE), we will end
+ * up sampling too few rows.
*
- * We can't really do much better here - we could try
- * sampling a bit more rows, but we don't know how off
- * the reltuples value is so how much is "a bit more"?
+ * We can't really do much better here - we could try sampling a
+ * bit more rows, but we don't know how off the reltuples value is
+ * so how much is "a bit more"?
*
- * Furthermore, the targrows value for partitions is
- * determined based on table size (relpages), which can
- * be off in different ways too. Adjusting the sampling
- * rate here might make the issue worse.
+ * Furthermore, the targrows value for partitions is determined
+ * based on table size (relpages), which can be off in different
+ * ways too. Adjusting the sampling rate here might make the issue
+ * worse.
*/
sample_frac = targrows / reltuples;
/*
* We should never get sampling rate outside the valid range
- * (between 0.0 and 1.0), because those cases should be covered
- * by the previous branch that sets ANALYZE_SAMPLE_OFF.
+ * (between 0.0 and 1.0), because those cases should be covered by
+ * the previous branch that sets ANALYZE_SAMPLE_OFF.
*/
Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
}
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
index eb33d2a993..07c11b75e9 100644
--- a/contrib/postgres_fdw/shippable.c
+++ b/contrib/postgres_fdw/shippable.c
@@ -183,7 +183,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
/* See if we already cached the result. */
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
if (!entry)
{
@@ -196,7 +196,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
* cache invalidation.
*/
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
entry->shippable = shippable;
}
diff --git a/contrib/seg/seg-validate.pl b/contrib/seg/seg-validate.pl
index 00bc23aa95..67c0015e6b 100755
--- a/contrib/seg/seg-validate.pl
+++ b/contrib/seg/seg-validate.pl
@@ -6,14 +6,14 @@ use strict;
use warnings;
my $integer = '[+-]?[0-9]+';
-my $real = '[+-]?[0-9]+\.[0-9]+';
+my $real = '[+-]?[0-9]+\.[0-9]+';
-my $RANGE = '(\.\.)(\.)?';
-my $PLUMIN = q(\'\+\-\');
-my $FLOAT = "(($integer)|($real))([eE]($integer))?";
+my $RANGE = '(\.\.)(\.)?';
+my $PLUMIN = q(\'\+\-\');
+my $FLOAT = "(($integer)|($real))([eE]($integer))?";
my $EXTENSION = '<|>|~';
-my $boundary = "($EXTENSION)?$FLOAT";
+my $boundary = "($EXTENSION)?$FLOAT";
my $deviation = $FLOAT;
my $rule_1 = $boundary . $PLUMIN . $deviation;
diff --git a/contrib/test_decoding/t/001_repl_stats.pl b/contrib/test_decoding/t/001_repl_stats.pl
index dede36ff16..7c2d87561c 100644
--- a/contrib/test_decoding/t/001_repl_stats.pl
+++ b/contrib/test_decoding/t/001_repl_stats.pl
@@ -92,7 +92,7 @@ regression_slot3|t|t),
# replication statistics data is fine after restart.
$node->stop;
-my $datadir = $node->data_dir;
+my $datadir = $node->data_dir;
my $slot3_replslotdir = "$datadir/pg_replslot/regression_slot3";
rmtree($slot3_replslotdir);
diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c
index 628c6a2595..12d1d0505d 100644
--- a/contrib/test_decoding/test_decoding.c
+++ b/contrib/test_decoding/test_decoding.c
@@ -288,7 +288,7 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;
@@ -348,7 +348,7 @@ pg_decode_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;
diff --git a/doc/src/sgml/mk_feature_tables.pl b/doc/src/sgml/mk_feature_tables.pl
index 5a16da0d06..824be729a0 100644
--- a/doc/src/sgml/mk_feature_tables.pl
+++ b/doc/src/sgml/mk_feature_tables.pl
@@ -34,7 +34,7 @@ print "<tbody>\n";
while (<$feat>)
{
chomp;
- my ($feature_id, $feature_name, $subfeature_id,
+ my ($feature_id, $feature_name, $subfeature_id,
$subfeature_name, $is_supported, $comments) = split /\t/;
$is_supported eq $yesno || next;
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index e91fd7e2bd..3c6a956eaa 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -700,8 +700,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
}
/*
- * If we found a scan key eliminating the range, no need to
- * check additional ones.
+ * If we found a scan key eliminating the range, no need
+ * to check additional ones.
*/
if (!addrange)
break;
@@ -1223,7 +1223,7 @@ brin_build_desc(Relation rel)
* Obtain BrinOpcInfo for each indexed column. While at it, accumulate
* the number of columns stored, since the number is opclass-defined.
*/
- opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts);
+ opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts);
for (keyno = 0; keyno < tupdesc->natts; keyno++)
{
FmgrInfo *opcInfoFn;
@@ -1801,8 +1801,8 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
bval = &dtup->bt_columns[keyno];
/*
- * Does the range have actual NULL values? Either of the flags can
- * be set, but we ignore the state before adding first row.
+ * Does the range have actual NULL values? Either of the flags can be
+ * set, but we ignore the state before adding first row.
*
* We have to remember this, because we'll modify the flags and we
* need to know if the range started as empty.
@@ -1842,12 +1842,12 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
/*
* If the range was had actual NULL values (i.e. did not start empty),
- * make sure we don't forget about the NULL values. Either the allnulls
- * flag is still set to true, or (if the opclass cleared it) we need to
- * set hasnulls=true.
+ * make sure we don't forget about the NULL values. Either the
+ * allnulls flag is still set to true, or (if the opclass cleared it)
+ * we need to set hasnulls=true.
*
- * XXX This can only happen when the opclass modified the tuple, so the
- * modified flag should be set.
+ * XXX This can only happen when the opclass modified the tuple, so
+ * the modified flag should be set.
*/
if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls))
{
@@ -1859,9 +1859,9 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
/*
* After updating summaries for all the keys, mark it as not empty.
*
- * If we're actually changing the flag value (i.e. tuple started as empty),
- * we should have modified the tuple. So we should not see empty range that
- * was not modified.
+ * If we're actually changing the flag value (i.e. tuple started as
+ * empty), we should have modified the tuple. So we should not see empty
+ * range that was not modified.
*/
Assert(!dtup->bt_empty_range || modified);
dtup->bt_empty_range = false;
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 90cb3951fc..11cc431677 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -1717,7 +1717,7 @@ allocateReloptStruct(Size base, relopt_value *options, int numoptions)
if (optstr->fill_cb)
{
const char *val = optval->isset ? optval->values.string_val :
- optstr->default_isnull ? NULL : optstr->default_val;
+ optstr->default_isnull ? NULL : optstr->default_val;
size += optstr->fill_cb(val, NULL);
}
@@ -1796,8 +1796,8 @@ fillRelOptions(void *rdopts, Size basesize,
if (optstring->fill_cb)
{
Size size =
- optstring->fill_cb(string_val,
- (char *) rdopts + offset);
+ optstring->fill_cb(string_val,
+ (char *) rdopts + offset);
if (size)
{
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index b5c1754e78..516465f8b7 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1117,7 +1117,7 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
{
IndexTuple ituple = (IndexTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (downlink == NULL)
downlink = CopyIndexTuple(ituple);
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 95cbed4337..1423b4b047 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -598,7 +598,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
{
GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc);
GISTNodeBuffer *newNodeBuffer;
- int i = foreach_current_index(lc);
+ int i = foreach_current_index(lc);
/* Decompress parent index tuple of node buffer page. */
gistDeCompressAtt(giststate, r,
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 7382b0921d..e2c9b5f069 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -657,7 +657,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
@@ -694,7 +694,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index a2ddfd5e69..15249aa921 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -125,7 +125,7 @@ gistRedoPageUpdateRecord(XLogReaderState *record)
if (data - begin < datalen)
{
OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
while (data - begin < datalen)
{
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index d850edd1d5..37646cc9a1 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -289,7 +289,8 @@ hashtext(PG_FUNCTION_ARGS)
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key);
@@ -304,8 +305,8 @@ hashtext(PG_FUNCTION_ARGS)
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any((uint8_t *) buf, bsize + 1);
@@ -343,7 +344,8 @@ hashtextextended(PG_FUNCTION_ARGS)
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key);
@@ -357,8 +359,8 @@ hashtextextended(PG_FUNCTION_ARGS)
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1));
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 0124f37911..7ed72abe59 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2491,7 +2491,7 @@ static inline bool
xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
{
const uint16 interesting =
- HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
+ HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
if ((new_infomask & interesting) != (old_infomask & interesting))
return true;
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index cbb35aa73d..646135cc21 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -334,8 +334,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
* Note: heap_update returns the tid (location) of the new tuple in the
* t_self field.
*
- * If the update is not HOT, we must update all indexes. If the update
- * is HOT, it could be that we updated summarized columns, so we either
+ * If the update is not HOT, we must update all indexes. If the update is
+ * HOT, it could be that we updated summarized columns, so we either
* update only summarized indexes, or none at all.
*/
if (result != TM_Ok)
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index fb95c19e90..c275b08494 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -376,7 +376,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate,
if (use_fsm && i >= not_in_fsm_pages)
{
Size freespace = BufferGetPageSize(victim_buffers[i]) -
- SizeOfPageHeaderData;
+ SizeOfPageHeaderData;
RecordPageWithFreeSpace(relation, curBlock, freespace);
}
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 20df39c149..47b9e20915 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -532,7 +532,7 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
if (!TransactionIdIsValid(prstate->old_snap_xmin))
{
TransactionId horizon =
- GlobalVisTestNonRemovableHorizon(prstate->vistest);
+ GlobalVisTestNonRemovableHorizon(prstate->vistest);
TransactionIdLimitedForOldSnapshots(horizon, prstate->rel,
&prstate->old_snap_xmin,
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index cda8889f5e..4eb953f904 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -389,6 +389,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
params->truncate != VACOPTVALUE_AUTO);
+
/*
* While VacuumFailSafeActive is reset to false before calling this, we
* still need to reset it here due to recursive calls.
@@ -1813,12 +1814,12 @@ retry:
{
/*
* We have no freeze plans to execute, so there's no added cost
- * from following the freeze path. That's why it was chosen.
- * This is important in the case where the page only contains
- * totally frozen tuples at this point (perhaps only following
- * pruning). Such pages can be marked all-frozen in the VM by our
- * caller, even though none of its tuples were newly frozen here
- * (note that the "no freeze" path never sets pages all-frozen).
+ * from following the freeze path. That's why it was chosen. This
+ * is important in the case where the page only contains totally
+ * frozen tuples at this point (perhaps only following pruning).
+ * Such pages can be marked all-frozen in the VM by our caller,
+ * even though none of its tuples were newly frozen here (note
+ * that the "no freeze" path never sets pages all-frozen).
*
* We never increment the frozen_pages instrumentation counter
* here, since it only counts pages with newly frozen tuples
@@ -3117,8 +3118,8 @@ dead_items_max_items(LVRelState *vacrel)
{
int64 max_items;
int vac_work_mem = IsAutoVacuumWorkerProcess() &&
- autovacuum_work_mem != -1 ?
- autovacuum_work_mem : maintenance_work_mem;
+ autovacuum_work_mem != -1 ?
+ autovacuum_work_mem : maintenance_work_mem;
if (vacrel->nindexes > 0)
{
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index ac91d1a14d..7d54ec9c0f 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -626,7 +626,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
static Buffer
vm_extend(Relation rel, BlockNumber vm_nblocks)
{
- Buffer buf;
+ Buffer buf;
buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
EB_CREATE_FORK_IF_NEEDED |
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 41aa1c4ccd..6be8915229 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -2947,7 +2947,7 @@ void
_bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
{
IndexBulkDeleteResult *stats = vstate->stats;
- Relation heaprel = vstate->info->heaprel;
+ Relation heaprel = vstate->info->heaprel;
Assert(stats->pages_newly_deleted >= vstate->npendingpages);
@@ -3027,7 +3027,7 @@ _bt_pendingfsm_add(BTVacState *vstate,
if (vstate->npendingpages > 0)
{
FullTransactionId lastsafexid =
- vstate->pendingpages[vstate->npendingpages - 1].safexid;
+ vstate->pendingpages[vstate->npendingpages - 1].safexid;
Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
}
diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c
index 7d12e0ef91..3922120d64 100644
--- a/src/backend/access/rmgrdesc/dbasedesc.c
+++ b/src/backend/access/rmgrdesc/dbasedesc.c
@@ -27,7 +27,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record)
if (info == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) rec;
+ (xl_dbase_create_file_copy_rec *) rec;
appendStringInfo(buf, "copy dir %u/%u to %u/%u",
xlrec->src_tablespace_id, xlrec->src_db_id,
@@ -36,7 +36,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record)
else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{
xl_dbase_create_wal_log_rec *xlrec =
- (xl_dbase_create_wal_log_rec *) rec;
+ (xl_dbase_create_wal_log_rec *) rec;
appendStringInfo(buf, "create dir %u/%u",
xlrec->tablespace_id, xlrec->db_id);
diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c
index 9ef4981ad1..246a6a6b85 100644
--- a/src/backend/access/rmgrdesc/gindesc.c
+++ b/src/backend/access/rmgrdesc/gindesc.c
@@ -120,7 +120,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
else
{
ginxlogInsertDataInternal *insertData =
- (ginxlogInsertDataInternal *) payload;
+ (ginxlogInsertDataInternal *) payload;
appendStringInfo(buf, " pitem: %u-%u/%u",
PostingItemGetBlockNumber(&insertData->newitem),
@@ -156,7 +156,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
else
{
ginxlogVacuumDataLeafPage *xlrec =
- (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
+ (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
desc_recompress_leaf(buf, &xlrec->data);
}
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index f323699165..cbfaf0c00a 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -115,7 +115,7 @@ spgAllocSearchItem(SpGistScanOpaque so, bool isnull, double *distances)
{
/* allocate distance array only for non-NULL items */
SpGistSearchItem *item =
- palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
+ palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
item->isNull = isnull;
@@ -130,7 +130,7 @@ static void
spgAddStartItem(SpGistScanOpaque so, bool isnull)
{
SpGistSearchItem *startEntry =
- spgAllocSearchItem(so, isnull, so->zeroDistances);
+ spgAllocSearchItem(so, isnull, so->zeroDistances);
ItemPointerSet(&startEntry->heapPtr,
isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO,
@@ -768,7 +768,7 @@ spgTestLeafTuple(SpGistScanOpaque so,
storeRes_func storeRes)
{
SpGistLeafTuple leafTuple = (SpGistLeafTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (leafTuple->tupstate != SPGIST_LIVE)
{
@@ -896,7 +896,7 @@ redirect:
else /* page is inner */
{
SpGistInnerTuple innerTuple = (SpGistInnerTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (innerTuple->tupstate != SPGIST_LIVE)
{
@@ -974,7 +974,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
else
{
IndexOrderByDistance *distances =
- palloc(sizeof(distances[0]) * so->numberOfOrderBys);
+ palloc(sizeof(distances[0]) * so->numberOfOrderBys);
int i;
for (i = 0; i < so->numberOfOrderBys; i++)
diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c
index a5e6c92f35..771438c8ce 100644
--- a/src/backend/access/table/tableam.c
+++ b/src/backend/access/table/tableam.c
@@ -112,7 +112,7 @@ TableScanDesc
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
Oid relid = RelationGetRelid(relation);
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
@@ -176,7 +176,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
{
Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
Assert(RelationGetRelid(relation) == pscan->phs_relid);
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index fe6698d5ff..abb022e067 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -3270,7 +3270,7 @@ multixact_redo(XLogReaderState *record)
else if (info == XLOG_MULTIXACT_CREATE_ID)
{
xl_multixact_create *xlrec =
- (xl_multixact_create *) XLogRecGetData(record);
+ (xl_multixact_create *) XLogRecGetData(record);
TransactionId max_xid;
int i;
diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c
index 7133ec0b22..2b8bc2f58d 100644
--- a/src/backend/access/transam/parallel.c
+++ b/src/backend/access/transam/parallel.c
@@ -375,8 +375,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
/*
- * Serialize the transaction snapshot if the transaction
- * isolation level uses a transaction snapshot.
+ * Serialize the transaction snapshot if the transaction isolation
+ * level uses a transaction snapshot.
*/
if (IsolationUsesXactSnapshot())
{
@@ -1497,8 +1497,8 @@ ParallelWorkerMain(Datum main_arg)
RestoreClientConnectionInfo(clientconninfospace);
/*
- * Initialize SystemUser now that MyClientConnectionInfo is restored.
- * Also ensure that auth_method is actually valid, aka authn_id is not NULL.
+ * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
+ * ensure that auth_method is actually valid, aka authn_id is not NULL.
*/
if (MyClientConnectionInfo.authn_id)
InitializeSystemUser(MyClientConnectionInfo.authn_id,
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 6a837e1539..8daaa535ed 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -3152,10 +3152,9 @@ CommitTransactionCommand(void)
break;
/*
- * The user issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already did
- * PushTransaction, so as to have someplace to put the SUBBEGIN
- * state.)
+ * The user issued a SAVEPOINT inside a transaction block. Start a
+ * subtransaction. (DefineSavepoint already did PushTransaction,
+ * so as to have someplace to put the SUBBEGIN state.)
*/
case TBLOCK_SUBBEGIN:
StartSubTransaction();
@@ -4696,9 +4695,9 @@ RollbackAndReleaseCurrentSubTransaction(void)
s = CurrentTransactionState; /* changed by pop */
Assert(s->blockState == TBLOCK_SUBINPROGRESS ||
- s->blockState == TBLOCK_INPROGRESS ||
- s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
- s->blockState == TBLOCK_STARTED);
+ s->blockState == TBLOCK_INPROGRESS ||
+ s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
+ s->blockState == TBLOCK_STARTED);
}
/*
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index bc5a8e0569..b2430f617c 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -5460,8 +5460,8 @@ StartupXLOG(void)
missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
/*
- * Reset ps status display, so as no information related to recovery
- * shows up.
+ * Reset ps status display, so as no information related to recovery shows
+ * up.
*/
set_ps_display("");
@@ -5596,9 +5596,9 @@ StartupXLOG(void)
if (!XLogRecPtrIsInvalid(missingContrecPtr))
{
/*
- * We should only have a missingContrecPtr if we're not switching to
- * a new timeline. When a timeline switch occurs, WAL is copied from
- * the old timeline to the new only up to the end of the last complete
+ * We should only have a missingContrecPtr if we're not switching to a
+ * new timeline. When a timeline switch occurs, WAL is copied from the
+ * old timeline to the new only up to the end of the last complete
* record, so there can't be an incomplete WAL record that we need to
* disregard.
*/
@@ -8494,7 +8494,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
*/
if (rllen > datadirpathlen &&
strncmp(linkpath, DataDir, datadirpathlen) == 0 &&
- IS_DIR_SEP(linkpath[datadirpathlen]))
+ IS_DIR_SEP(linkpath[datadirpathlen]))
relpath = pstrdup(linkpath + datadirpathlen + 1);
/*
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index ea7e2f67af..54247e1d81 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -897,8 +897,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
*
* XLogReader machinery is only able to handle records up to a certain
* size (ignoring machine resource limitations), so make sure that we will
- * not emit records larger than the sizes advertised to be supported.
- * This cap is based on DecodeXLogRecordRequiredSpace().
+ * not emit records larger than the sizes advertised to be supported. This
+ * cap is based on DecodeXLogRecordRequiredSpace().
*/
if (total_len >= XLogRecordMaxSize)
ereport(ERROR,
diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c
index 906e3d9469..539928cb85 100644
--- a/src/backend/access/transam/xlogprefetcher.c
+++ b/src/backend/access/transam/xlogprefetcher.c
@@ -569,7 +569,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
if (record_type == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) record->main_data;
+ (xl_dbase_create_file_copy_rec *) record->main_data;
RelFileLocator rlocator =
{InvalidOid, xlrec->db_id, InvalidRelFileNumber};
@@ -596,7 +596,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
if (record_type == XLOG_SMGR_CREATE)
{
xl_smgr_create *xlrec = (xl_smgr_create *)
- record->main_data;
+ record->main_data;
if (xlrec->forkNum == MAIN_FORKNUM)
{
@@ -624,7 +624,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
else if (record_type == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *)
- record->main_data;
+ record->main_data;
/*
* Don't consider prefetching anything in the truncated
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index 631f260f79..2e7b1ba8e1 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -282,7 +282,7 @@ XLogRecPtr
XLogReleasePreviousRecord(XLogReaderState *state)
{
DecodedXLogRecord *record;
- XLogRecPtr next_lsn;
+ XLogRecPtr next_lsn;
if (!state->record)
return InvalidXLogRecPtr;
diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c
index 188f6d6f85..4883fcb512 100644
--- a/src/backend/access/transam/xlogrecovery.c
+++ b/src/backend/access/transam/xlogrecovery.c
@@ -3215,7 +3215,7 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen,
XLogRecPtr targetRecPtr, char *readBuf)
{
XLogPageReadPrivate *private =
- (XLogPageReadPrivate *) xlogreader->private_data;
+ (XLogPageReadPrivate *) xlogreader->private_data;
int emode = private->emode;
uint32 targetPageOff;
XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c
index 5baea7535b..45be21131c 100644
--- a/src/backend/backup/basebackup.c
+++ b/src/backend/backup/basebackup.c
@@ -1609,10 +1609,10 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename,
*
* There's no guarantee that this will actually
* happen, though: the torn write could take an
- * arbitrarily long time to complete. Retrying multiple
- * times wouldn't fix this problem, either, though
- * it would reduce the chances of it happening in
- * practice. The only real fix here seems to be to
+ * arbitrarily long time to complete. Retrying
+ * multiple times wouldn't fix this problem, either,
+ * though it would reduce the chances of it happening
+ * in practice. The only real fix here seems to be to
* have some kind of interlock that allows us to wait
* until we can be certain that no write to the block
* is in progress. Since we don't have any such thing
diff --git a/src/backend/backup/basebackup_copy.c b/src/backend/backup/basebackup_copy.c
index 73a3f4a970..1db80cde1b 100644
--- a/src/backend/backup/basebackup_copy.c
+++ b/src/backend/backup/basebackup_copy.c
@@ -350,6 +350,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0);
+
/*
* int8 may seem like a surprising data type for this, but in theory int4
* would not be wide enough for this, as TimeLineID is unsigned.
@@ -360,7 +361,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
/* Data row */
- values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
+ values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
values[1] = Int64GetDatum(tli);
do_tup_output(tstate, values, nulls);
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index 656b57934e..84aaeb002a 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -28,25 +28,25 @@ sub ParseHeader
# There are a few types which are given one name in the C source, but a
# different name at the SQL level. These are enumerated here.
my %RENAME_ATTTYPE = (
- 'int16' => 'int2',
- 'int32' => 'int4',
- 'int64' => 'int8',
- 'Oid' => 'oid',
- 'NameData' => 'name',
+ 'int16' => 'int2',
+ 'int32' => 'int4',
+ 'int64' => 'int8',
+ 'Oid' => 'oid',
+ 'NameData' => 'name',
'TransactionId' => 'xid',
- 'XLogRecPtr' => 'pg_lsn');
+ 'XLogRecPtr' => 'pg_lsn');
my %catalog;
my $declaring_attributes = 0;
- my $is_varlen = 0;
- my $is_client_code = 0;
+ my $is_varlen = 0;
+ my $is_client_code = 0;
- $catalog{columns} = [];
- $catalog{toasting} = [];
- $catalog{indexing} = [];
- $catalog{other_oids} = [];
+ $catalog{columns} = [];
+ $catalog{toasting} = [];
+ $catalog{indexing} = [];
+ $catalog{other_oids} = [];
$catalog{foreign_keys} = [];
- $catalog{client_code} = [];
+ $catalog{client_code} = [];
open(my $ifh, '<', $input_file) || die "$input_file: $!";
@@ -102,10 +102,10 @@ sub ParseHeader
{
push @{ $catalog{toasting} },
{
- parent_table => $1,
- toast_oid => $2,
- toast_index_oid => $3,
- toast_oid_macro => $4,
+ parent_table => $1,
+ toast_oid => $2,
+ toast_index_oid => $3,
+ toast_oid_macro => $4,
toast_index_oid_macro => $5
};
}
@@ -116,11 +116,11 @@ sub ParseHeader
push @{ $catalog{indexing} },
{
is_unique => $1 ? 1 : 0,
- is_pkey => $2 ? 1 : 0,
- index_name => $3,
- index_oid => $4,
+ is_pkey => $2 ? 1 : 0,
+ index_name => $3,
+ index_oid => $4,
index_oid_macro => $5,
- index_decl => $6
+ index_decl => $6
};
}
elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/)
@@ -128,7 +128,7 @@ sub ParseHeader
push @{ $catalog{other_oids} },
{
other_name => $1,
- other_oid => $2
+ other_oid => $2
};
}
elsif (
@@ -138,16 +138,16 @@ sub ParseHeader
push @{ $catalog{foreign_keys} },
{
is_array => $1 ? 1 : 0,
- is_opt => $2 ? 1 : 0,
- fk_cols => $3,
+ is_opt => $2 ? 1 : 0,
+ fk_cols => $3,
pk_table => $4,
- pk_cols => $5
+ pk_cols => $5
};
}
elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/)
{
- $catalog{catname} = $1;
- $catalog{relation_oid} = $2;
+ $catalog{catname} = $1;
+ $catalog{relation_oid} = $2;
$catalog{relation_oid_macro} = $3;
$catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : '';
@@ -155,15 +155,15 @@ sub ParseHeader
/BKI_SHARED_RELATION/ ? ' shared_relation' : '';
if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/)
{
- $catalog{rowtype_oid} = $1;
+ $catalog{rowtype_oid} = $1;
$catalog{rowtype_oid_clause} = " rowtype_oid $1";
- $catalog{rowtype_oid_macro} = $2;
+ $catalog{rowtype_oid_macro} = $2;
}
else
{
- $catalog{rowtype_oid} = '';
+ $catalog{rowtype_oid} = '';
$catalog{rowtype_oid_clause} = '';
- $catalog{rowtype_oid_macro} = '';
+ $catalog{rowtype_oid_macro} = '';
}
$catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0;
$declaring_attributes = 1;
@@ -209,8 +209,8 @@ sub ParseHeader
$atttype = '_' . $atttype;
}
- $column{type} = $atttype;
- $column{name} = $attname;
+ $column{type} = $atttype;
+ $column{name} = $attname;
$column{is_varlen} = 1 if $is_varlen;
foreach my $attopt (@attopts)
@@ -243,14 +243,14 @@ sub ParseHeader
# BKI_LOOKUP implicitly makes an FK reference
push @{ $catalog{foreign_keys} },
{
- is_array =>
- ($atttype eq 'oidvector' || $atttype eq '_oid')
+ is_array => (
+ $atttype eq 'oidvector' || $atttype eq '_oid')
? 1
: 0,
- is_opt => $column{lookup_opt},
- fk_cols => $attname,
+ is_opt => $column{lookup_opt},
+ fk_cols => $attname,
pk_table => $column{lookup},
- pk_cols => 'oid'
+ pk_cols => 'oid'
};
}
else
@@ -285,7 +285,7 @@ sub ParseData
$input_file =~ /(\w+)\.dat$/
or die "Input file $input_file needs to be a .dat file.\n";
my $catname = $1;
- my $data = [];
+ my $data = [];
if ($preserve_formatting)
{
@@ -433,7 +433,7 @@ sub AddDefaultValues
sub GenerateArrayTypes
{
my $pgtype_schema = shift;
- my $types = shift;
+ my $types = shift;
my @array_types;
foreach my $elem_type (@$types)
@@ -444,9 +444,9 @@ sub GenerateArrayTypes
my %array_type;
# Set up metadata fields for array type.
- $array_type{oid} = $elem_type->{array_type_oid};
+ $array_type{oid} = $elem_type->{array_type_oid};
$array_type{autogenerated} = 1;
- $array_type{line_number} = $elem_type->{line_number};
+ $array_type{line_number} = $elem_type->{line_number};
# Set up column values derived from the element type.
$array_type{typname} = '_' . $elem_type->{typname};
@@ -499,8 +499,8 @@ sub GenerateArrayTypes
sub RenameTempFile
{
my $final_name = shift;
- my $extension = shift;
- my $temp_name = $final_name . $extension;
+ my $extension = shift;
+ my $temp_name = $final_name . $extension;
if (-f $final_name
&& compare($temp_name, $final_name) == 0)
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 45cdcd3dc6..bc2ad773c9 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -3389,8 +3389,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask,
result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
/*
- * Check if ACL_MAINTAIN is being checked and, if so, and not already set as
- * part of the result, then check if the user is a member of the
+ * Check if ACL_MAINTAIN is being checked and, if so, and not already set
+ * as part of the result, then check if the user is a member of the
* pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH
* MATERIALIZED VIEW, and REINDEX on all relations.
*/
diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl
index 2c5bfe23a1..4a7205472c 100644
--- a/src/backend/catalog/genbki.pl
+++ b/src/backend/catalog/genbki.pl
@@ -29,12 +29,12 @@ my $include_path;
my $num_errors = 0;
GetOptions(
- 'output:s' => \$output_path,
- 'set-version:s' => \$major_version,
+ 'output:s' => \$output_path,
+ 'set-version:s' => \$major_version,
'include-path:s' => \$include_path) || usage();
# Sanity check arguments.
-die "No input files.\n" unless @ARGV;
+die "No input files.\n" unless @ARGV;
die "--set-version must be specified.\n" unless $major_version;
die "Invalid version string: $major_version\n"
unless $major_version =~ /^\d+$/;
@@ -67,7 +67,7 @@ foreach my $header (@ARGV)
my $catalog = Catalog::ParseHeader($header);
my $catname = $catalog->{catname};
- my $schema = $catalog->{columns};
+ my $schema = $catalog->{columns};
if (defined $catname)
{
@@ -100,9 +100,9 @@ foreach my $header (@ARGV)
if (defined $row->{descr})
{
my %descr = (
- objoid => $row->{oid},
- classoid => $catalog->{relation_oid},
- objsubid => 0,
+ objoid => $row->{oid},
+ classoid => $catalog->{relation_oid},
+ objsubid => 0,
description => $row->{descr});
if ($catalog->{shared_relation})
@@ -364,7 +364,7 @@ open(my $ef, '<', $encfile) || die "$encfile: $!";
# We're parsing an enum, so start with 0 and increment
# every time we find an enum member.
-my $encid = 0;
+my $encid = 0;
my $collect_encodings = 0;
while (<$ef>)
{
@@ -387,27 +387,27 @@ close $ef;
# Map lookup name to the corresponding hash table.
my %lookup_kind = (
- pg_am => \%amoids,
- pg_authid => \%authidoids,
- pg_class => \%classoids,
- pg_collation => \%collationoids,
- pg_language => \%langoids,
- pg_namespace => \%namespaceoids,
- pg_opclass => \%opcoids,
- pg_operator => \%operoids,
- pg_opfamily => \%opfoids,
- pg_proc => \%procoids,
- pg_tablespace => \%tablespaceoids,
- pg_ts_config => \%tsconfigoids,
- pg_ts_dict => \%tsdictoids,
- pg_ts_parser => \%tsparseroids,
+ pg_am => \%amoids,
+ pg_authid => \%authidoids,
+ pg_class => \%classoids,
+ pg_collation => \%collationoids,
+ pg_language => \%langoids,
+ pg_namespace => \%namespaceoids,
+ pg_opclass => \%opcoids,
+ pg_operator => \%operoids,
+ pg_opfamily => \%opfoids,
+ pg_proc => \%procoids,
+ pg_tablespace => \%tablespaceoids,
+ pg_ts_config => \%tsconfigoids,
+ pg_ts_dict => \%tsdictoids,
+ pg_ts_parser => \%tsparseroids,
pg_ts_template => \%tstemplateoids,
- pg_type => \%typeoids,
- encoding => \%encids);
+ pg_type => \%typeoids,
+ encoding => \%encids);
# Open temp files
-my $tmpext = ".tmp$$";
+my $tmpext = ".tmp$$";
my $bkifile = $output_path . 'postgres.bki';
open my $bki, '>', $bkifile . $tmpext
or die "can't open $bkifile$tmpext: $!";
@@ -600,7 +600,7 @@ EOM
# each element of the array as per the lookup rule.
if ($column->{lookup})
{
- my $lookup = $lookup_kind{ $column->{lookup} };
+ my $lookup = $lookup_kind{ $column->{lookup} };
my $lookup_opt = $column->{lookup_opt};
my @lookupnames;
my @lookupoids;
@@ -790,7 +790,7 @@ foreach my $catname (@catnames)
printf $fk_info
"\t{ /* %s */ %s, /* %s */ %s, \"{%s}\", \"{%s}\", %s, %s},\n",
- $catname, $catalog->{relation_oid},
+ $catname, $catalog->{relation_oid},
$pktabname, $catalogs{$pktabname}->{relation_oid},
$fkinfo->{fk_cols},
$fkinfo->{pk_cols},
@@ -809,9 +809,9 @@ close $fk_info;
close $constraints;
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($bkifile, $tmpext);
-Catalog::RenameTempFile($schemafile, $tmpext);
-Catalog::RenameTempFile($fk_info_file, $tmpext);
+Catalog::RenameTempFile($bkifile, $tmpext);
+Catalog::RenameTempFile($schemafile, $tmpext);
+Catalog::RenameTempFile($fk_info_file, $tmpext);
Catalog::RenameTempFile($constraints_file, $tmpext);
exit($num_errors != 0 ? 1 : 0);
@@ -845,13 +845,13 @@ sub gen_pg_attribute
push @tables_needing_macros, $table_name;
# Generate entries for user attributes.
- my $attnum = 0;
+ my $attnum = 0;
my $priorfixedwidth = 1;
foreach my $attr (@{ $table->{columns} })
{
$attnum++;
my %row;
- $row{attnum} = $attnum;
+ $row{attnum} = $attnum;
$row{attrelid} = $table->{relation_oid};
morph_row_for_pgattr(\%row, $schema, $attr, $priorfixedwidth);
@@ -877,18 +877,18 @@ sub gen_pg_attribute
{
$attnum = 0;
my @SYS_ATTRS = (
- { name => 'ctid', type => 'tid' },
- { name => 'xmin', type => 'xid' },
- { name => 'cmin', type => 'cid' },
- { name => 'xmax', type => 'xid' },
- { name => 'cmax', type => 'cid' },
+ { name => 'ctid', type => 'tid' },
+ { name => 'xmin', type => 'xid' },
+ { name => 'cmin', type => 'cid' },
+ { name => 'xmax', type => 'xid' },
+ { name => 'cmax', type => 'cid' },
{ name => 'tableoid', type => 'oid' });
foreach my $attr (@SYS_ATTRS)
{
$attnum--;
my %row;
- $row{attnum} = $attnum;
- $row{attrelid} = $table->{relation_oid};
+ $row{attnum} = $attnum;
+ $row{attrelid} = $table->{relation_oid};
$row{attstattarget} = '0';
morph_row_for_pgattr(\%row, $schema, $attr, 1);
@@ -916,10 +916,10 @@ sub morph_row_for_pgattr
# Copy the type data from pg_type, and add some type-dependent items
my $type = $types{$atttype};
- $row->{atttypid} = $type->{oid};
- $row->{attlen} = $type->{typlen};
- $row->{attbyval} = $type->{typbyval};
- $row->{attalign} = $type->{typalign};
+ $row->{atttypid} = $type->{oid};
+ $row->{attlen} = $type->{typlen};
+ $row->{attbyval} = $type->{typbyval};
+ $row->{attalign} = $type->{typalign};
$row->{attstorage} = $type->{typstorage};
# set attndims if it's an array type
@@ -946,7 +946,7 @@ sub morph_row_for_pgattr
# At this point the width of type name is still symbolic,
# so we need a special test.
$row->{attnotnull} =
- $row->{attlen} eq 'NAMEDATALEN' ? 't'
+ $row->{attlen} eq 'NAMEDATALEN' ? 't'
: $row->{attlen} > 0 ? 't'
: 'f';
}
@@ -962,15 +962,15 @@ sub morph_row_for_pgattr
# Write an entry to postgres.bki.
sub print_bki_insert
{
- my $row = shift;
+ my $row = shift;
my $schema = shift;
my @bki_values;
foreach my $column (@$schema)
{
- my $attname = $column->{name};
- my $atttype = $column->{type};
+ my $attname = $column->{name};
+ my $atttype = $column->{type};
my $bki_value = $row->{$attname};
# Fold backslash-zero to empty string if it's the entire string,
@@ -1002,7 +1002,7 @@ sub print_bki_insert
# quite identical, to the corresponding values in postgres.bki.
sub morph_row_for_schemapg
{
- my $row = shift;
+ my $row = shift;
my $pgattr_schema = shift;
foreach my $column (@$pgattr_schema)
@@ -1027,7 +1027,7 @@ sub morph_row_for_schemapg
# don't change.
elsif ($atttype eq 'bool')
{
- $row->{$attname} = 'true' if $row->{$attname} eq 't';
+ $row->{$attname} = 'true' if $row->{$attname} eq 't';
$row->{$attname} = 'false' if $row->{$attname} eq 'f';
}
@@ -1089,7 +1089,7 @@ sub form_pg_type_symbol
# Skip for rowtypes of bootstrap catalogs, since they have their
# own naming convention defined elsewhere.
return
- if $typename eq 'pg_type'
+ if $typename eq 'pg_type'
or $typename eq 'pg_proc'
or $typename eq 'pg_attribute'
or $typename eq 'pg_class';
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index feddff654e..522da0ac85 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -148,8 +148,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple,
#endif /* USE_ASSERT_CHECKING */
/*
- * Skip insertions into non-summarizing indexes if we only need
- * to update summarizing indexes.
+ * Skip insertions into non-summarizing indexes if we only need to
+ * update summarizing indexes.
*/
if (onlySummarized && !indexInfo->ii_Summarizing)
continue;
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 73ddb67882..69ab1b8e4b 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -3842,7 +3842,7 @@ recomputeNamespacePath(void)
if (OidIsValid(namespaceId) &&
!list_member_oid(oidlist, namespaceId) &&
object_aclcheck(NamespaceRelationId, namespaceId, roleid,
- ACL_USAGE) == ACLCHECK_OK &&
+ ACL_USAGE) == ACLCHECK_OK &&
InvokeNamespaceSearchHook(namespaceId, false))
oidlist = lappend_oid(oidlist, namespaceId);
}
@@ -3870,7 +3870,7 @@ recomputeNamespacePath(void)
if (OidIsValid(namespaceId) &&
!list_member_oid(oidlist, namespaceId) &&
object_aclcheck(NamespaceRelationId, namespaceId, roleid,
- ACL_USAGE) == ACLCHECK_OK &&
+ ACL_USAGE) == ACLCHECK_OK &&
InvokeNamespaceSearchHook(namespaceId, false))
oidlist = lappend_oid(oidlist, namespaceId);
}
@@ -4006,7 +4006,7 @@ InitTempTableNamespace(void)
* temp table creation request is made by someone with appropriate rights.
*/
if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CREATE_TEMP) != ACLCHECK_OK)
+ ACL_CREATE_TEMP) != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to create temporary tables in database \"%s\"",
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 792b0ef414..95918a77a1 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -625,7 +625,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId,
/* not in catalogs, different from operator, so make shell */
aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(otherNamespace));
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 64d326f073..91c7f3426f 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -1414,6 +1414,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
/* FALLTHROUGH */
case SHARED_DEPENDENCY_OWNER:
+
/*
* Save it for deletion below, if it's a local object or a
* role grant. Other shared objects, such as databases,
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index 10f28f94bc..e95dc31bde 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -231,7 +231,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
if (OidIsValid(namespaceId))
{
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
@@ -1035,7 +1035,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c
index c91fe66d9b..2969a2bb21 100644
--- a/src/backend/commands/collationcmds.c
+++ b/src/backend/commands/collationcmds.c
@@ -270,8 +270,8 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e
*/
if (!IsBinaryUpgrade)
{
- char *langtag = icu_language_tag(colliculocale,
- icu_validation_level);
+ char *langtag = icu_language_tag(colliculocale,
+ icu_validation_level);
if (langtag && strcmp(colliculocale, langtag) != 0)
{
@@ -476,17 +476,18 @@ AlterCollation(AlterCollationStmt *stmt)
Datum
pg_collation_actual_version(PG_FUNCTION_ARGS)
{
- Oid collid = PG_GETARG_OID(0);
- char provider;
- char *locale;
- char *version;
- Datum datum;
+ Oid collid = PG_GETARG_OID(0);
+ char provider;
+ char *locale;
+ char *version;
+ Datum datum;
if (collid == DEFAULT_COLLATION_OID)
{
/* retrieve from pg_database */
HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
+
if (!HeapTupleIsValid(dbtup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -506,7 +507,8 @@ pg_collation_actual_version(PG_FUNCTION_ARGS)
{
/* retrieve from pg_collation */
- HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
+ HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
+
if (!HeapTupleIsValid(colltp))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -657,11 +659,10 @@ create_collation_from_locale(const char *locale, int nspid,
Oid collid;
/*
- * Some systems have locale names that don't consist entirely of
- * ASCII letters (such as "bokm&aring;l" or "fran&ccedil;ais").
- * This is pretty silly, since we need the locale itself to
- * interpret the non-ASCII characters. We can't do much with
- * those, so we filter them out.
+ * Some systems have locale names that don't consist entirely of ASCII
+ * letters (such as "bokm&aring;l" or "fran&ccedil;ais"). This is pretty
+ * silly, since we need the locale itself to interpret the non-ASCII
+ * characters. We can't do much with those, so we filter them out.
*/
if (!pg_is_ascii(locale))
{
@@ -681,19 +682,18 @@ create_collation_from_locale(const char *locale, int nspid,
return -1;
}
if (enc == PG_SQL_ASCII)
- return -1; /* C/POSIX are already in the catalog */
+ return -1; /* C/POSIX are already in the catalog */
/* count valid locales found in operating system */
(*nvalidp)++;
/*
- * Create a collation named the same as the locale, but quietly
- * doing nothing if it already exists. This is the behavior we
- * need even at initdb time, because some versions of "locale -a"
- * can report the same locale name more than once. And it's
- * convenient for later import runs, too, since you just about
- * always want to add on new locales without a lot of chatter
- * about existing ones.
+ * Create a collation named the same as the locale, but quietly doing
+ * nothing if it already exists. This is the behavior we need even at
+ * initdb time, because some versions of "locale -a" can report the same
+ * locale name more than once. And it's convenient for later import runs,
+ * too, since you just about always want to add on new locales without a
+ * lot of chatter about existing ones.
*/
collid = CollationCreate(locale, nspid, GetUserId(),
COLLPROVIDER_LIBC, true, enc,
@@ -995,8 +995,8 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
param.nvalidp = &nvalid;
/*
- * Enumerate the locales that are either installed on or supported
- * by the OS.
+ * Enumerate the locales that are either installed on or supported by
+ * the OS.
*/
if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL,
(LPARAM) &param, NULL))
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 2e242eeff2..99d4080ea9 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -259,7 +259,7 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
List *rlocatorlist = NIL;
LockRelId relid;
Snapshot snapshot;
- SMgrRelation smgr;
+ SMgrRelation smgr;
BufferAccessStrategy bstrategy;
/* Get pg_class relfilenumber. */
@@ -1065,8 +1065,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
*/
if (!IsBinaryUpgrade && dbiculocale != src_iculocale)
{
- char *langtag = icu_language_tag(dbiculocale,
- icu_validation_level);
+ char *langtag = icu_language_tag(dbiculocale,
+ icu_validation_level);
if (langtag && strcmp(dbiculocale, langtag) != 0)
{
@@ -1219,7 +1219,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
dst_deftablespace = get_tablespace_oid(tablespacename, false);
/* check permissions */
aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
tablespacename);
@@ -1406,8 +1406,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
* If we're going to be reading data for the to-be-created database into
* shared_buffers, take a lock on it. Nobody should know that this
* database exists yet, but it's good to maintain the invariant that an
- * AccessExclusiveLock on the database is sufficient to drop all
- * of its buffers without worrying about more being read later.
+ * AccessExclusiveLock on the database is sufficient to drop all of its
+ * buffers without worrying about more being read later.
*
* Note that we need to do this before entering the
* PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback
@@ -1933,7 +1933,7 @@ movedb(const char *dbname, const char *tblspcname)
* Permission checks
*/
aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
tblspcname);
@@ -3110,7 +3110,7 @@ dbase_redo(XLogReaderState *record)
if (info == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
+ (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
char *src_path;
char *dst_path;
char *parent_path;
@@ -3182,7 +3182,7 @@ dbase_redo(XLogReaderState *record)
else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{
xl_dbase_create_wal_log_rec *xlrec =
- (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
+ (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
char *dbpath;
char *parent_path;
diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c
index 82bda15889..469a6c2ee9 100644
--- a/src/backend/commands/dropcmds.c
+++ b/src/backend/commands/dropcmds.c
@@ -493,6 +493,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
case OBJECT_TABLE:
case OBJECT_TABLESPACE:
case OBJECT_VIEW:
+
/*
* These are handled elsewhere, so if someone gets here the code
* is probably wrong or should be revisited.
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 5334c503e1..15f9bddcdf 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -1523,7 +1523,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
{
BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan;
const char *indexname =
- explain_get_index_name(bitmapindexscan->indexid);
+ explain_get_index_name(bitmapindexscan->indexid);
if (es->format == EXPLAIN_FORMAT_TEXT)
appendStringInfo(es->str, " on %s",
@@ -3008,7 +3008,7 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
for (n = 0; n < incrsortstate->shared_info->num_workers; n++)
{
IncrementalSortInfo *incsort_info =
- &incrsortstate->shared_info->sinfo[n];
+ &incrsortstate->shared_info->sinfo[n];
/*
* If a worker hasn't processed any sort groups at all, then
@@ -4212,7 +4212,7 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es)
{
ListCell *cell;
const char *label =
- (list_length(css->custom_ps) != 1 ? "children" : "child");
+ (list_length(css->custom_ps) != 1 ? "children" : "child");
foreach(cell, css->custom_ps)
ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es);
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 71caa3b9f3..49c7864c7c 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -151,7 +151,7 @@ compute_return_type(TypeName *returnType, Oid languageOid,
namespaceId = QualifiedNameGetCreationNamespace(returnType->names,
&typname);
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
@@ -2117,7 +2117,7 @@ ExecuteDoStmt(ParseState *pstate, DoStmt *stmt, bool atomic)
AclResult aclresult;
aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_LANGUAGE,
NameStr(languageStruct->lanname));
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index e6ee99e51f..a5168c9f09 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -748,7 +748,7 @@ DefineIndex(Oid relationId,
AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
@@ -780,7 +780,7 @@ DefineIndex(Oid relationId,
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId));
@@ -2708,7 +2708,7 @@ ExecReindex(ParseState *pstate, ReindexStmt *stmt, bool isTopLevel)
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params.tablespaceOid));
@@ -3066,11 +3066,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
/*
* The table can be reindexed if the user has been granted MAINTAIN on
* the table or one of its partition ancestors or the user is a
- * superuser, the table owner, or the database/schema owner (but in the
- * latter case, only if it's not a shared relation). pg_class_aclcheck
- * includes the superuser case, and depending on objectKind we already
- * know that the user has permission to run REINDEX on this database or
- * schema per the permission checks at the beginning of this routine.
+ * superuser, the table owner, or the database/schema owner (but in
+ * the latter case, only if it's not a shared relation).
+ * pg_class_aclcheck includes the superuser case, and depending on
+ * objectKind we already know that the user has permission to run
+ * REINDEX on this database or schema per the permission checks at the
+ * beginning of this routine.
*/
if (classtuple->relisshared &&
pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&
@@ -3312,7 +3313,7 @@ ReindexMultipleInternal(List *relids, ReindexParams *params)
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params->tablespaceOid));
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index b6a71154a8..6eb3dc6bab 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -400,7 +400,7 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId)
* no special case for them.
*/
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_DATABASE,
get_database_name(MyDatabaseId));
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index e8b288d01c..1c88c2bccb 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -604,9 +604,9 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)");
/*
- * We don't want to allow unprivileged users to be able to trigger attempts
- * to access arbitrary network destinations, so require the user to have
- * been specifically authorized to create subscriptions.
+ * We don't want to allow unprivileged users to be able to trigger
+ * attempts to access arbitrary network destinations, so require the user
+ * to have been specifically authorized to create subscriptions.
*/
if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION))
ereport(ERROR,
@@ -631,10 +631,10 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
* exempt a subscription from this requirement.
*/
if (!opts.passwordrequired && !superuser_arg(owner))
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/*
* If built with appropriate switch, whine when regression-testing
@@ -1113,8 +1113,8 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
if (!sub->passwordrequired && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* Lock the subscription so nobody else can do anything with it. */
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
@@ -1827,8 +1827,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
if (!form->subpasswordrequired && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* Must be able to become new owner */
check_can_set_role(GetUserId(), newOwnerId);
@@ -1837,8 +1837,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
* current owner must have CREATE on database
*
* This is consistent with how ALTER SCHEMA ... OWNER TO works, but some
- * other object types behave differently (e.g. you can't give a table to
- * a user who lacks CREATE privileges on a schema).
+ * other object types behave differently (e.g. you can't give a table to a
+ * user who lacks CREATE privileges on a schema).
*/
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId,
GetUserId(), ACL_CREATE);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index c7a8a689b7..4d49d70c33 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -806,7 +806,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId));
@@ -1931,7 +1931,7 @@ ExecuteTruncateGuts(List *explicit_rels,
resultRelInfo = resultRelInfos;
foreach(cell, rels)
{
- UserContext ucxt;
+ UserContext ucxt;
if (run_as_table_owner)
SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
@@ -2143,7 +2143,7 @@ ExecuteTruncateGuts(List *explicit_rels,
resultRelInfo = resultRelInfos;
foreach(cell, rels)
{
- UserContext ucxt;
+ UserContext ucxt;
if (run_as_table_owner)
SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
@@ -2635,7 +2635,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
if (CompressionMethodIsValid(attribute->attcompression))
{
const char *compression =
- GetCompressionMethodName(attribute->attcompression);
+ GetCompressionMethodName(attribute->attcompression);
if (def->compression == NULL)
def->compression = pstrdup(compression);
@@ -13947,7 +13947,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock
/* New owner must have CREATE privilege on namespace */
aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceOid));
@@ -14377,7 +14377,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
if (check_option)
{
const char *view_updatable_error =
- view_query_is_auto_updatable(view_query, true);
+ view_query_is_auto_updatable(view_query, true);
if (view_updatable_error)
ereport(ERROR,
@@ -14656,7 +14656,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt)
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(new_tablespaceoid));
@@ -17134,7 +17134,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
if (IsA(stmt, RenameStmt))
{
aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(classform->relnamespace));
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 3dfbf6a917..13b0dee146 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -1278,7 +1278,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
/* Check permissions, similarly complaining only if interactive */
aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
{
if (source >= PGC_S_INTERACTIVE)
@@ -1408,7 +1408,7 @@ PrepareTempTablespaces(void)
/* Check permissions similarly */
aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
continue;
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 3440dbc440..216482095d 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -734,7 +734,7 @@ DefineDomain(CreateDomainStmt *stmt)
/* Check we have creation rights in target namespace */
aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(domainNamespace));
@@ -3743,8 +3743,8 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
/* New owner must have CREATE privilege on namespace */
aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace,
- newOwnerId,
- ACL_CREATE);
+ newOwnerId,
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(typTup->typnamespace));
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 707114bdd0..d63d3c58ca 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -86,7 +86,7 @@ typedef struct
int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256;
char *createrole_self_grant = "";
bool createrole_self_grant_enabled = false;
-GrantRoleOptions createrole_self_grant_options;
+GrantRoleOptions createrole_self_grant_options;
/* Hook to check passwords in CreateRole() and AlterRole() */
check_password_hook_type check_password_hook = NULL;
@@ -169,7 +169,7 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
DefElem *dadminmembers = NULL;
DefElem *dvalidUntil = NULL;
DefElem *dbypassRLS = NULL;
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
/* The defaults can vary depending on the original statement type */
switch (stmt->stmt_type)
@@ -535,8 +535,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
*
* The grantor of record for this implicit grant is the bootstrap
* superuser, which means that the CREATEROLE user cannot revoke the
- * grant. They can however grant the created role back to themselves
- * with different options, since they enjoy ADMIN OPTION on it.
+ * grant. They can however grant the created role back to themselves with
+ * different options, since they enjoy ADMIN OPTION on it.
*/
if (!superuser())
{
@@ -561,8 +561,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
BOOTSTRAP_SUPERUSERID, &poptself);
/*
- * We must make the implicit grant visible to the code below, else
- * the additional grants will fail.
+ * We must make the implicit grant visible to the code below, else the
+ * additional grants will fail.
*/
CommandCounterIncrement();
@@ -585,8 +585,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
* Add the specified members to this new role. adminmembers get the admin
* option, rolemembers don't.
*
- * NB: No permissions check is required here. If you have enough rights
- * to create a role, you can add any members you like.
+ * NB: No permissions check is required here. If you have enough rights to
+ * create a role, you can add any members you like.
*/
AddRoleMems(currentUserId, stmt->role, roleid,
rolemembers, roleSpecsToIds(rolemembers),
@@ -647,7 +647,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt)
DefElem *dbypassRLS = NULL;
Oid roleid;
Oid currentUserId = GetUserId();
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
check_rolespec_name(stmt->role,
_("Cannot alter reserved roles."));
@@ -862,7 +862,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt)
*/
if (dissuper)
{
- bool should_be_super = boolVal(dissuper->arg);
+ bool should_be_super = boolVal(dissuper->arg);
if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID)
ereport(ERROR,
@@ -1021,9 +1021,9 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
shdepLockAndCheckObject(AuthIdRelationId, roleid);
/*
- * To mess with a superuser you gotta be superuser; otherwise you
- * need CREATEROLE plus admin option on the target role; unless you're
- * just trying to change your own settings
+ * To mess with a superuser you gotta be superuser; otherwise you need
+ * CREATEROLE plus admin option on the target role; unless you're just
+ * trying to change your own settings
*/
if (roleform->rolsuper)
{
@@ -1037,7 +1037,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
else
{
if ((!have_createrole_privilege() ||
- !is_admin_of_role(GetUserId(), roleid))
+ !is_admin_of_role(GetUserId(), roleid))
&& roleid != GetUserId())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -1490,14 +1490,14 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt)
Oid grantor;
List *grantee_ids;
ListCell *item;
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
Oid currentUserId = GetUserId();
/* Parse options list. */
InitGrantRoleOptions(&popt);
foreach(item, stmt->opt)
{
- DefElem *opt = (DefElem *) lfirst(item);
+ DefElem *opt = (DefElem *) lfirst(item);
char *optval = defGetString(opt);
if (strcmp(opt->defname, "admin") == 0)
@@ -1546,8 +1546,8 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt)
/*
* Step through all of the granted roles and add, update, or remove
* entries in pg_auth_members as appropriate. If stmt->is_grant is true,
- * we are adding new grants or, if they already exist, updating options
- * on those grants. If stmt->is_grant is false, we are revoking grants or
+ * we are adding new grants or, if they already exist, updating options on
+ * those grants. If stmt->is_grant is false, we are revoking grants or
* removing options from them.
*/
foreach(item, stmt->granted_roles)
@@ -1848,8 +1848,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
ObjectIdGetDatum(grantorId));
/*
- * If we found a tuple, update it with new option values, unless
- * there are no changes, in which case issue a WARNING.
+ * If we found a tuple, update it with new option values, unless there
+ * are no changes, in which case issue a WARNING.
*
* If we didn't find a tuple, just insert one.
*/
@@ -1932,8 +1932,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
popt->inherit;
else
{
- HeapTuple mrtup;
- Form_pg_authid mrform;
+ HeapTuple mrtup;
+ Form_pg_authid mrform;
mrtup = SearchSysCache1(AUTHOID, memberid);
if (!HeapTupleIsValid(mrtup))
@@ -2332,8 +2332,8 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions,
/*
* If popt.specified == 0, we're revoking the grant entirely; otherwise,
* we expect just one bit to be set, and we're revoking the corresponding
- * option. As of this writing, there's no syntax that would allow for
- * an attempt to revoke multiple options at once, and the logic below
+ * option. As of this writing, there's no syntax that would allow for an
+ * attempt to revoke multiple options at once, and the logic below
* wouldn't work properly if such syntax were added, so assert that our
* caller isn't trying to do that.
*/
@@ -2365,7 +2365,7 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions,
}
else
{
- bool revoke_admin_option_only;
+ bool revoke_admin_option_only;
/*
* Revoking the grant entirely, or ADMIN option on a grant,
@@ -2572,7 +2572,7 @@ check_createrole_self_grant(char **newval, void **extra, GucSource source)
void
assign_createrole_self_grant(const char *newval, void *extra)
{
- unsigned options = * (unsigned *) extra;
+ unsigned options = *(unsigned *) extra;
createrole_self_grant_enabled = (options != 0);
createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index ff98c773f5..9bd77546b9 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -437,7 +437,7 @@ DefineView(ViewStmt *stmt, const char *queryString,
if (check_option)
{
const char *view_updatable_error =
- view_query_is_auto_updatable(viewParse, true);
+ view_query_is_auto_updatable(viewParse, true);
if (view_updatable_error)
ereport(ERROR,
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index bf257a41c8..e6e616865c 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -1214,8 +1214,8 @@ ExecInitExprRec(Expr *node, ExprState *state,
/* Check permission to call function */
aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid,
- GetUserId(),
- ACL_EXECUTE);
+ GetUserId(),
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(cmpfuncid));
@@ -1224,8 +1224,8 @@ ExecInitExprRec(Expr *node, ExprState *state,
if (OidIsValid(opexpr->hashfuncid))
{
aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid,
- GetUserId(),
- ACL_EXECUTE);
+ GetUserId(),
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(opexpr->hashfuncid));
@@ -3613,7 +3613,7 @@ ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase,
* column sorted on.
*/
TargetEntry *source_tle =
- (TargetEntry *) linitial(pertrans->aggref->args);
+ (TargetEntry *) linitial(pertrans->aggref->args);
Assert(list_length(pertrans->aggref->args) == 1);
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 7cc443ec52..7a4d7a4eee 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -1659,7 +1659,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerGroup pergroup_allaggs =
- aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
+ aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
@@ -1684,7 +1684,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@@ -1712,7 +1712,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@@ -1730,7 +1730,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@@ -1747,7 +1747,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@@ -1768,7 +1768,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@@ -1785,7 +1785,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index da28e5e40c..1d82b64b89 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -354,8 +354,8 @@ ExecInsertIndexTuples(ResultRelInfo *resultRelInfo,
continue;
/*
- * Skip processing of non-summarizing indexes if we only
- * update summarizing indexes
+ * Skip processing of non-summarizing indexes if we only update
+ * summarizing indexes
*/
if (onlySummarizing && !indexInfo->ii_Summarizing)
continue;
diff --git a/src/backend/executor/execSRF.c b/src/backend/executor/execSRF.c
index d09a7758dc..73bf9152a4 100644
--- a/src/backend/executor/execSRF.c
+++ b/src/backend/executor/execSRF.c
@@ -260,7 +260,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
if (first_time)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
@@ -290,7 +290,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
if (tupdesc == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
/*
* This is the first non-NULL result from the
@@ -395,7 +395,7 @@ no_function_result:
if (rsinfo.setResult == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index ad81a675aa..468db94fe5 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -3690,7 +3690,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/* Check permission to call aggregate function */
aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(),
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_AGGREGATE,
get_func_name(aggref->aggfnoid));
@@ -3757,7 +3757,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
if (OidIsValid(finalfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(finalfn_oid));
@@ -3766,7 +3766,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
if (OidIsValid(serialfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(serialfn_oid));
@@ -3775,7 +3775,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
if (OidIsValid(deserialfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(deserialfn_oid));
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 301e4acba3..8b5c35b82b 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -1339,7 +1339,7 @@ ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
else
{
size_t tuple_size =
- MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
+ MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
/* It belongs in a later batch. */
hashtable->batches[batchno].estimated_size += tuple_size;
@@ -1381,7 +1381,7 @@ ExecParallelHashRepartitionRest(HashJoinTable hashtable)
for (i = 1; i < old_nbatch; ++i)
{
ParallelHashJoinBatch *shared =
- NthParallelHashJoinBatch(old_batches, i);
+ NthParallelHashJoinBatch(old_batches, i);
old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
ParallelWorkerNumber + 1,
@@ -3337,7 +3337,7 @@ ExecHashTableDetachBatch(HashJoinTable hashtable)
while (DsaPointerIsValid(batch->chunks))
{
HashMemoryChunk chunk =
- dsa_get_address(hashtable->area, batch->chunks);
+ dsa_get_address(hashtable->area, batch->chunks);
dsa_pointer next = chunk->next.shared;
dsa_free(hashtable->area, batch->chunks);
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index e40436db38..980746128b 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -1216,7 +1216,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
{
SharedTuplestoreAccessor *inner_tuples;
Barrier *batch_barrier =
- &hashtable->batches[batchno].shared->batch_barrier;
+ &hashtable->batches[batchno].shared->batch_barrier;
switch (BarrierAttach(batch_barrier))
{
@@ -1330,22 +1330,22 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
BufFile *file = *fileptr;
/*
- * The batch file is lazily created. If this is the first tuple
- * written to this batch, the batch file is created and its buffer is
- * allocated in the spillCxt context, NOT in the batchCxt.
+ * The batch file is lazily created. If this is the first tuple written to
+ * this batch, the batch file is created and its buffer is allocated in
+ * the spillCxt context, NOT in the batchCxt.
*
- * During the build phase, buffered files are created for inner
- * batches. Each batch's buffered file is closed (and its buffer freed)
- * after the batch is loaded into memory during the outer side scan.
- * Therefore, it is necessary to allocate the batch file buffer in a
- * memory context which outlives the batch itself.
+ * During the build phase, buffered files are created for inner batches.
+ * Each batch's buffered file is closed (and its buffer freed) after the
+ * batch is loaded into memory during the outer side scan. Therefore, it
+ * is necessary to allocate the batch file buffer in a memory context
+ * which outlives the batch itself.
*
- * Also, we use spillCxt instead of hashCxt for a better accounting of
- * the spilling memory consumption.
+ * Also, we use spillCxt instead of hashCxt for a better accounting of the
+ * spilling memory consumption.
*/
if (file == NULL)
{
- MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
+ MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
file = BufFileCreateTemp(false);
*fileptr = file;
@@ -1622,7 +1622,7 @@ ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
{
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(pcxt->toc, plan_node_id, false);
+ shm_toc_lookup(pcxt->toc, plan_node_id, false);
/*
* It would be possible to reuse the shared hash table in single-batch
@@ -1657,7 +1657,7 @@ ExecHashJoinInitializeWorker(HashJoinState *state,
HashState *hashNode;
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(pwcxt->toc, plan_node_id, false);
+ shm_toc_lookup(pwcxt->toc, plan_node_id, false);
/* Attach to the space for shared temporary files. */
SharedFileSetAttach(&pstate->fileset, pwcxt->seg);
diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c
index 26ceafec5f..34257ce34b 100644
--- a/src/backend/executor/nodeIncrementalSort.c
+++ b/src/backend/executor/nodeIncrementalSort.c
@@ -1007,9 +1007,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
if (incrsortstate->ss.ps.instrument != NULL)
{
IncrementalSortGroupInfo *fullsortGroupInfo =
- &incrsortstate->incsort_info.fullsortGroupInfo;
+ &incrsortstate->incsort_info.fullsortGroupInfo;
IncrementalSortGroupInfo *prefixsortGroupInfo =
- &incrsortstate->incsort_info.prefixsortGroupInfo;
+ &incrsortstate->incsort_info.prefixsortGroupInfo;
fullsortGroupInfo->groupCount = 0;
fullsortGroupInfo->maxDiskSpaceUsed = 0;
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 7f5002527f..2a5fec8d01 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -111,7 +111,7 @@ typedef struct UpdateContext
{
bool updated; /* did UPDATE actually occur? */
bool crossPartUpdate; /* was it a cross-partition update? */
- TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
+ TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
/*
* Lock mode to acquire on the latest tuple version before performing
@@ -881,7 +881,7 @@ ExecInsert(ModifyTableContext *context,
{
TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
TupleDesc plan_tdesc =
- CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+ CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c
index 0c6c912778..791cbd2372 100644
--- a/src/backend/executor/nodeTableFuncscan.c
+++ b/src/backend/executor/nodeTableFuncscan.c
@@ -352,7 +352,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc)
int colno;
Datum value;
int ordinalitycol =
- ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
+ ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
/*
* Install the document as a possibly-toasted Datum into the tablefunc
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index 4f0618f27a..310ac23e3a 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -2582,7 +2582,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
/* Check permission to call window function */
aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(),
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(wfunc->winfnoid));
@@ -2821,7 +2821,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
if (!OidIsValid(aggform->aggminvtransfn))
use_ma_code = false; /* sine qua non */
else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY &&
- aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
+ aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
use_ma_code = true; /* decision forced by safety */
else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
use_ma_code = false; /* non-moving frame head */
@@ -2871,7 +2871,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
ReleaseSysCache(procTuple);
aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(transfn_oid));
@@ -2880,7 +2880,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
if (OidIsValid(invtransfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(invtransfn_oid));
@@ -2890,7 +2890,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
if (OidIsValid(finalfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(finalfn_oid));
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 256632c985..33975687b3 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -3345,7 +3345,7 @@ SPI_register_trigger_data(TriggerData *tdata)
if (tdata->tg_newtable)
{
EphemeralNamedRelation enr =
- palloc(sizeof(EphemeralNamedRelationData));
+ palloc(sizeof(EphemeralNamedRelationData));
int rc;
enr->md.name = tdata->tg_trigger->tgnewtable;
@@ -3362,7 +3362,7 @@ SPI_register_trigger_data(TriggerData *tdata)
if (tdata->tg_oldtable)
{
EphemeralNamedRelation enr =
- palloc(sizeof(EphemeralNamedRelationData));
+ palloc(sizeof(EphemeralNamedRelationData));
int rc;
enr->md.name = tdata->tg_trigger->tgoldtable;
diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c
index a8b73a9cf1..04ae3052a8 100644
--- a/src/backend/jit/llvm/llvmjit.c
+++ b/src/backend/jit/llvm/llvmjit.c
@@ -799,9 +799,9 @@ llvm_session_initialize(void)
LLVMInitializeNativeAsmParser();
/*
- * When targeting an LLVM version with opaque pointers enabled by
- * default, turn them off for the context we build our code in. We don't
- * need to do so for other contexts (e.g. llvm_ts_context). Once the IR is
+ * When targeting an LLVM version with opaque pointers enabled by default,
+ * turn them off for the context we build our code in. We don't need to
+ * do so for other contexts (e.g. llvm_ts_context). Once the IR is
* generated, it carries the necessary information.
*/
#if LLVM_VERSION_MAJOR > 14
@@ -1118,7 +1118,7 @@ llvm_resolve_symbol(const char *symname, void *ctx)
static LLVMErrorRef
llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx,
- LLVMOrcLookupStateRef * LookupState, LLVMOrcLookupKind Kind,
+ LLVMOrcLookupStateRef *LookupState, LLVMOrcLookupKind Kind,
LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags,
LLVMOrcCLookupSet LookupSet, size_t LookupSetSize)
{
@@ -1175,7 +1175,7 @@ static LLVMOrcObjectLayerRef
llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple)
{
LLVMOrcObjectLayerRef objlayer =
- LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
+ LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
#if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER
if (jit_debugging_support)
diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c
index 6b15588da6..15d4a7b431 100644
--- a/src/backend/jit/llvm/llvmjit_deform.c
+++ b/src/backend/jit/llvm/llvmjit_deform.c
@@ -650,7 +650,7 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc,
{
LLVMValueRef v_tmp_loaddata;
LLVMTypeRef vartypep =
- LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
+ LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
v_tmp_loaddata =
LLVMBuildPointerCast(b, v_attdatap, vartypep, "");
diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c
index 774db57ae2..00d7b8110b 100644
--- a/src/backend/jit/llvm/llvmjit_expr.c
+++ b/src/backend/jit/llvm/llvmjit_expr.c
@@ -1047,7 +1047,7 @@ llvm_compile_expr(ExprState *state)
else
{
LLVMValueRef v_value =
- LLVMBuildLoad(b, v_resvaluep, "");
+ LLVMBuildLoad(b, v_resvaluep, "");
v_value = LLVMBuildZExt(b,
LLVMBuildICmp(b, LLVMIntEQ,
@@ -2127,8 +2127,7 @@ llvm_compile_expr(ExprState *state)
/*
* pergroup = &aggstate->all_pergroups
- * [op->d.agg_trans.setoff]
- * [op->d.agg_trans.transno];
+ * [op->d.agg_trans.setoff] [op->d.agg_trans.transno];
*/
v_allpergroupsp =
l_load_struct_gep(b, v_aggstatep,
diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c
index 7f52e1ee23..43d45810cd 100644
--- a/src/backend/libpq/be-secure-gssapi.c
+++ b/src/backend/libpq/be-secure-gssapi.c
@@ -527,8 +527,8 @@ secure_open_gssapi(Port *port)
/*
* Use the configured keytab, if there is one. As we now require MIT
- * Kerberos, we might consider using the credential store extensions in the
- * future instead of the environment variable.
+ * Kerberos, we might consider using the credential store extensions in
+ * the future instead of the environment variable.
*/
if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0')
{
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index dc4153a2f2..05276ab95c 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -1104,8 +1104,8 @@ prepare_cert_name(char *name)
if (namelen > MAXLEN)
{
/*
- * Keep the end of the name, not the beginning, since the most specific
- * field is likely to give users the most information.
+ * Keep the end of the name, not the beginning, since the most
+ * specific field is likely to give users the most information.
*/
truncated = name + namelen - MAXLEN;
truncated[0] = truncated[1] = truncated[2] = '.';
@@ -1165,8 +1165,8 @@ verify_cb(int ok, X509_STORE_CTX *ctx)
/*
* Get the Subject and Issuer for logging, but don't let maliciously
- * huge certs flood the logs, and don't reflect non-ASCII bytes into it
- * either.
+ * huge certs flood the logs, and don't reflect non-ASCII bytes into
+ * it either.
*/
subject = X509_NAME_to_cstring(X509_get_subject_name(cert));
sub_prepared = prepare_cert_name(subject);
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index d786a01835..1ef113649f 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -2693,8 +2693,9 @@ load_hba(void)
if (!ok)
{
/*
- * File contained one or more errors, so bail out. MemoryContextDelete
- * is enough to clean up everything, including regexes.
+ * File contained one or more errors, so bail out.
+ * MemoryContextDelete is enough to clean up everything, including
+ * regexes.
*/
MemoryContextDelete(hbacxt);
return false;
@@ -3056,8 +3057,9 @@ load_ident(void)
if (!ok)
{
/*
- * File contained one or more errors, so bail out. MemoryContextDelete
- * is enough to clean up everything, including regexes.
+ * File contained one or more errors, so bail out.
+ * MemoryContextDelete is enough to clean up everything, including
+ * regexes.
*/
MemoryContextDelete(ident_context);
return false;
diff --git a/src/backend/nodes/gen_node_support.pl b/src/backend/nodes/gen_node_support.pl
index ecbcadb8bf..b89b491d35 100644
--- a/src/backend/nodes/gen_node_support.pl
+++ b/src/backend/nodes/gen_node_support.pl
@@ -106,7 +106,7 @@ my @nodetag_only_files = qw(
# In HEAD, these variables should be left undef, since we don't promise
# ABI stability during development.
-my $last_nodetag = undef;
+my $last_nodetag = undef;
my $last_nodetag_no = undef;
# output file names
@@ -161,9 +161,9 @@ push @node_types, qw(List);
# (Ideally we'd mark List as "special copy/equal" not "no copy/equal".
# But until there's other use-cases for that, just hot-wire the tests
# that would need to distinguish.)
-push @no_copy, qw(List);
-push @no_equal, qw(List);
-push @no_query_jumble, qw(List);
+push @no_copy, qw(List);
+push @no_equal, qw(List);
+push @no_query_jumble, qw(List);
push @special_read_write, qw(List);
# Nodes with custom copy/equal implementations are skipped from
@@ -230,7 +230,7 @@ foreach my $infile (@ARGV)
}
$file_content .= $raw_file_content;
- my $lineno = 0;
+ my $lineno = 0;
my $prevline = '';
foreach my $line (split /\n/, $file_content)
{
@@ -247,7 +247,7 @@ foreach my $infile (@ARGV)
if ($line =~ /;$/)
{
# found the end, re-attach any previous line(s)
- $line = $prevline . $line;
+ $line = $prevline . $line;
$prevline = '';
}
elsif ($prevline eq ''
@@ -272,7 +272,7 @@ foreach my $infile (@ARGV)
if ($subline == 1)
{
$is_node_struct = 0;
- $supertype = undef;
+ $supertype = undef;
next if $line eq '{';
die "$infile:$lineno: expected opening brace\n";
}
@@ -280,7 +280,7 @@ foreach my $infile (@ARGV)
elsif ($subline == 2
&& $line =~ /^\s*pg_node_attr\(([\w(), ]*)\)$/)
{
- $node_attrs = $1;
+ $node_attrs = $1;
$node_attrs_lineno = $lineno;
# hack: don't count the line
$subline--;
@@ -296,8 +296,8 @@ foreach my $infile (@ARGV)
}
elsif ($line =~ /\s*(\w+)\s+(\w+);/ and elem $1, @node_types)
{
- $is_node_struct = 1;
- $supertype = $1;
+ $is_node_struct = 1;
+ $supertype = $1;
$supertype_field = $2;
next;
}
@@ -339,7 +339,7 @@ foreach my $infile (@ARGV)
}
elsif ($attr eq 'no_copy_equal')
{
- push @no_copy, $in_struct;
+ push @no_copy, $in_struct;
push @no_equal, $in_struct;
}
elsif ($attr eq 'no_query_jumble')
@@ -373,7 +373,7 @@ foreach my $infile (@ARGV)
push @node_types, $in_struct;
# field names, types, attributes
- my @f = @my_fields;
+ my @f = @my_fields;
my %ft = %my_field_types;
my %fa = %my_field_attrs;
@@ -405,7 +405,7 @@ foreach my $infile (@ARGV)
unshift @f, @superfields;
}
# save in global info structure
- $node_type_info{$in_struct}->{fields} = \@f;
+ $node_type_info{$in_struct}->{fields} = \@f;
$node_type_info{$in_struct}->{field_types} = \%ft;
$node_type_info{$in_struct}->{field_attrs} = \%fa;
@@ -428,9 +428,9 @@ foreach my $infile (@ARGV)
}
# start new cycle
- $in_struct = undef;
- $node_attrs = '';
- @my_fields = ();
+ $in_struct = undef;
+ $node_attrs = '';
+ @my_fields = ();
%my_field_types = ();
%my_field_attrs = ();
}
@@ -441,10 +441,10 @@ foreach my $infile (@ARGV)
{
if ($is_node_struct)
{
- my $type = $1;
- my $name = $2;
+ my $type = $1;
+ my $name = $2;
my $array_size = $3;
- my $attrs = $4;
+ my $attrs = $4;
# strip "const"
$type =~ s/^const\s*//;
@@ -499,9 +499,9 @@ foreach my $infile (@ARGV)
{
if ($is_node_struct)
{
- my $type = $1;
- my $name = $2;
- my $args = $3;
+ my $type = $1;
+ my $name = $2;
+ my $args = $3;
my $attrs = $4;
my @attrs;
@@ -540,20 +540,20 @@ foreach my $infile (@ARGV)
if ($line =~ /^(?:typedef )?struct (\w+)$/ && $1 ne 'Node')
{
$in_struct = $1;
- $subline = 0;
+ $subline = 0;
}
# one node type typedef'ed directly from another
elsif ($line =~ /^typedef (\w+) (\w+);$/ and elem $1, @node_types)
{
my $alias_of = $1;
- my $n = $2;
+ my $n = $2;
# copy everything over
push @node_types, $n;
- my @f = @{ $node_type_info{$alias_of}->{fields} };
+ my @f = @{ $node_type_info{$alias_of}->{fields} };
my %ft = %{ $node_type_info{$alias_of}->{field_types} };
my %fa = %{ $node_type_info{$alias_of}->{field_attrs} };
- $node_type_info{$n}->{fields} = \@f;
+ $node_type_info{$n}->{fields} = \@f;
$node_type_info{$n}->{field_types} = \%ft;
$node_type_info{$n}->{field_attrs} = \%fa;
}
@@ -608,7 +608,7 @@ open my $nt, '>', "$output_path/nodetags.h$tmpext"
printf $nt $header_comment, 'nodetags.h';
-my $tagno = 0;
+my $tagno = 0;
my $last_tag = undef;
foreach my $n (@node_types, @extra_tags)
{
@@ -669,7 +669,7 @@ foreach my $n (@node_types)
{
next if elem $n, @abstract_types;
next if elem $n, @nodetag_only;
- my $struct_no_copy = (elem $n, @no_copy);
+ my $struct_no_copy = (elem $n, @no_copy);
my $struct_no_equal = (elem $n, @no_equal);
next if $struct_no_copy && $struct_no_equal;
@@ -705,15 +705,15 @@ _equal${n}(const $n *a, const $n *b)
# print instructions for each field
foreach my $f (@{ $node_type_info{$n}->{fields} })
{
- my $t = $node_type_info{$n}->{field_types}{$f};
- my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
- my $copy_ignore = $struct_no_copy;
+ my $t = $node_type_info{$n}->{field_types}{$f};
+ my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
+ my $copy_ignore = $struct_no_copy;
my $equal_ignore = $struct_no_equal;
# extract per-field attributes
my $array_size_field;
my $copy_as_field;
- my $copy_as_scalar = 0;
+ my $copy_as_scalar = 0;
my $equal_as_scalar = 0;
foreach my $a (@a)
{
@@ -768,7 +768,7 @@ _equal${n}(const $n *a, const $n *b)
# select instructions by field type
if ($t eq 'char*')
{
- print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_STRING_FIELD($f);\n" unless $equal_ignore;
}
elsif ($t eq 'Bitmapset*' || $t eq 'Relids')
@@ -779,7 +779,7 @@ _equal${n}(const $n *a, const $n *b)
}
elsif ($t eq 'int' && $f =~ 'location$')
{
- print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_LOCATION_FIELD($f);\n" unless $equal_ignore;
}
elsif (elem $t, @scalar_types or elem $t, @enum_types)
@@ -828,7 +828,7 @@ _equal${n}(const $n *a, const $n *b)
elsif ($t eq 'function pointer')
{
# we can copy and compare as a scalar
- print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore;
}
# node type
@@ -846,13 +846,13 @@ _equal${n}(const $n *a, const $n *b)
and $1 ne 'List'
and !$equal_ignore;
- print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_NODE_FIELD($f);\n" unless $equal_ignore;
}
# array (inline)
elsif ($t =~ /^\w+\[\w+\]$/)
{
- print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_ARRAY_FIELD($f);\n" unless $equal_ignore;
}
elsif ($t eq 'struct CustomPathMethods*'
@@ -861,7 +861,7 @@ _equal${n}(const $n *a, const $n *b)
# Fields of these types are required to be a pointer to a
# static table of callback functions. So we don't copy
# the table itself, just reference the original one.
- print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore;
}
else
@@ -1073,7 +1073,7 @@ _read${n}(void)
{
print $off "\tWRITE_FLOAT_FIELD($f.startup);\n";
print $off "\tWRITE_FLOAT_FIELD($f.per_tuple);\n";
- print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read;
+ print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read;
print $rff "\tREAD_FLOAT_FIELD($f.per_tuple);\n" unless $no_read;
}
elsif ($t eq 'Selectivity')
@@ -1278,8 +1278,8 @@ _jumble${n}(JumbleState *jstate, Node *node)
# print instructions for each field
foreach my $f (@{ $node_type_info{$n}->{fields} })
{
- my $t = $node_type_info{$n}->{field_types}{$f};
- my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
+ my $t = $node_type_info{$n}->{field_types}{$f};
+ my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
my $query_jumble_ignore = $struct_no_query_jumble;
my $query_jumble_location = 0;
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 0b271dae84..ef475d95a1 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -2011,7 +2011,7 @@ cost_incremental_sort(Path *path,
{
PathKey *key = (PathKey *) lfirst(l);
EquivalenceMember *member = (EquivalenceMember *)
- linitial(key->pk_eclass->ec_members);
+ linitial(key->pk_eclass->ec_members);
/*
* Check if the expression contains Var with "varno 0" so that we
diff --git a/src/backend/optimizer/util/appendinfo.c b/src/backend/optimizer/util/appendinfo.c
index c1b1557570..f456b3b0a4 100644
--- a/src/backend/optimizer/util/appendinfo.c
+++ b/src/backend/optimizer/util/appendinfo.c
@@ -370,7 +370,7 @@ adjust_appendrel_attrs_mutator(Node *node,
if (leaf_relid)
{
RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
- list_nth(context->root->row_identity_vars, var->varattno - 1);
+ list_nth(context->root->row_identity_vars, var->varattno - 1);
if (bms_is_member(leaf_relid, ridinfo->rowidrels))
{
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 04ea04b5b6..32a407f54b 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -1158,7 +1158,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
{
/* UPDATE/DELETE/MERGE row identity vars are always needed */
RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
- list_nth(root->row_identity_vars, var->varattno - 1);
+ list_nth(root->row_identity_vars, var->varattno - 1);
/* Update reltarget width estimate from RowIdentityVarInfo */
joinrel->reltarget->width += ridinfo->rowidwidth;
diff --git a/src/backend/parser/check_keywords.pl b/src/backend/parser/check_keywords.pl
index ddfdf20d33..e9b6f40eaa 100644
--- a/src/backend/parser/check_keywords.pl
+++ b/src/backend/parser/check_keywords.pl
@@ -9,7 +9,7 @@
use strict;
use warnings;
-my $gram_filename = $ARGV[0];
+my $gram_filename = $ARGV[0];
my $kwlist_filename = $ARGV[1];
my $errors = 0;
@@ -47,10 +47,10 @@ $, = ' '; # set output field separator
$\ = "\n"; # set output record separator
my %keyword_categories;
-$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD';
-$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD';
+$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD';
+$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD';
$keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD';
-$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD';
+$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD';
open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename");
@@ -183,7 +183,7 @@ kwlist_line: while (<$kwlist>)
if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/)
{
my ($kwstring) = $1;
- my ($kwname) = $2;
+ my ($kwname) = $2;
my ($kwcat_id) = $3;
my ($collabel) = $4;
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 0b3632735b..346fd272b6 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -3357,7 +3357,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format,
if (format->format_type == JS_FORMAT_JSON)
{
JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ?
- format->encoding : JS_ENC_UTF8;
+ format->encoding : JS_ENC_UTF8;
if (targettype != BYTEAOID &&
format->encoding != JS_ENC_DEFAULT)
diff --git a/src/backend/parser/parse_merge.c b/src/backend/parser/parse_merge.c
index d8866373b8..91b1156d99 100644
--- a/src/backend/parser/parse_merge.c
+++ b/src/backend/parser/parse_merge.c
@@ -165,8 +165,8 @@ transformMergeStmt(ParseState *pstate, MergeStmt *stmt)
/*
* Set up the MERGE target table. The target table is added to the
- * namespace below and to joinlist in transform_MERGE_to_join, so don't
- * do it here.
+ * namespace below and to joinlist in transform_MERGE_to_join, so don't do
+ * it here.
*/
qry->resultRelation = setTargetTable(pstate, stmt->relation,
stmt->relation->inh,
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index b1255e3b70..d67580fc77 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -993,7 +993,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
{
aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TYPE,
RelationGetRelationName(relation));
@@ -2355,7 +2355,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
* mentioned above.
*/
Datum attoptions =
- get_attoptions(RelationGetRelid(index_rel), i + 1);
+ get_attoptions(RelationGetRelid(index_rel), i + 1);
defopclass = GetDefaultOpClass(attform->atttypid,
index_rel->rd_rel->relam);
diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c
index c685621416..7c5d9110fb 100644
--- a/src/backend/partitioning/partbounds.c
+++ b/src/backend/partitioning/partbounds.c
@@ -2340,9 +2340,9 @@ merge_default_partitions(PartitionMap *outer_map,
/*
* The default partitions have to be joined with each other, so merge
* them. Note that each of the default partitions isn't merged yet
- * (see, process_outer_partition()/process_inner_partition()), so
- * they should be merged successfully. The merged partition will act
- * as the default partition of the join relation.
+ * (see, process_outer_partition()/process_inner_partition()), so they
+ * should be merged successfully. The merged partition will act as
+ * the default partition of the join relation.
*/
Assert(outer_merged_index == -1);
Assert(inner_merged_index == -1);
@@ -3193,7 +3193,7 @@ check_new_partition_bound(char *relname, Relation parent,
* datums list.
*/
PartitionRangeDatum *datum =
- list_nth(spec->upperdatums, abs(cmpval) - 1);
+ list_nth(spec->upperdatums, abs(cmpval) - 1);
/*
* The new partition overlaps with the
diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c
index 509587636e..6f9c2765d6 100644
--- a/src/backend/postmaster/fork_process.c
+++ b/src/backend/postmaster/fork_process.c
@@ -58,8 +58,8 @@ fork_process(void)
/*
* We start postmaster children with signals blocked. This allows them to
* install their own handlers before unblocking, to avoid races where they
- * might run the postmaster's handler and miss an important control signal.
- * With more analysis this could potentially be relaxed.
+ * might run the postmaster's handler and miss an important control
+ * signal. With more analysis this could potentially be relaxed.
*/
sigprocmask(SIG_SETMASK, &BlockSig, &save_mask);
result = fork();
diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c
index 38c09b1123..9087ef95af 100644
--- a/src/backend/regex/regc_lex.c
+++ b/src/backend/regex/regc_lex.c
@@ -759,6 +759,7 @@ lexescape(struct vars *v)
RETV(PLAIN, c);
break;
default:
+
/*
* Throw an error for unrecognized ASCII alpha escape sequences,
* which reserves them for future use if needed.
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 052505e46f..dc9c5c82d9 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -259,7 +259,7 @@ libpqrcv_check_conninfo(const char *conninfo, bool must_use_password)
if (must_use_password)
{
- bool uses_password = false;
+ bool uses_password = false;
for (opt = opts; opt->keyword != NULL; ++opt)
{
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index beef399b42..d91055a440 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -155,7 +155,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
case XLOG_PARAMETER_CHANGE:
{
xl_parameter_change *xlrec =
- (xl_parameter_change *) XLogRecGetData(buf->record);
+ (xl_parameter_change *) XLogRecGetData(buf->record);
/*
* If wal_level on the primary is reduced to less than
@@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
* invalidated when this WAL record is replayed; and further,
* slot creation fails when wal_level is not sufficient; but
* all these operations are not synchronized, so a logical
- * slot may creep in while the wal_level is being
- * reduced. Hence this extra check.
+ * slot may creep in while the wal_level is being reduced.
+ * Hence this extra check.
*/
if (xlrec->wal_level < WAL_LEVEL_LOGICAL)
{
@@ -752,7 +752,7 @@ DecodePrepare(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
SnapBuild *builder = ctx->snapshot_builder;
XLogRecPtr origin_lsn = parsed->origin_lsn;
TimestampTz prepare_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
int i;
TransactionId xid = parsed->twophase_xid;
@@ -828,7 +828,7 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
int i;
XLogRecPtr origin_lsn = InvalidXLogRecPtr;
TimestampTz abort_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
bool skip_xact;
if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN)
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index 7e1f677f7a..41243d0187 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin,
MemoryContext old_context;
/*
- * On a standby, this check is also required while creating the
- * slot. Check the comments in the function.
+ * On a standby, this check is also required while creating the slot.
+ * Check the comments in the function.
*/
CheckLogicalDecodingRequirements();
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index 2c04c8707d..b0255ffd25 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -833,7 +833,7 @@ replorigin_redo(XLogReaderState *record)
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec =
- (xl_replorigin_set *) XLogRecGetData(record);
+ (xl_replorigin_set *) XLogRecGetData(record);
replorigin_advance(xlrec->node_id,
xlrec->remote_lsn, record->EndRecPtr,
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index b85b890010..26d252bd87 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -1408,7 +1408,7 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
{
dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node);
ReorderBufferChange *next_change =
- dlist_container(ReorderBufferChange, node, next);
+ dlist_container(ReorderBufferChange, node, next);
/* txn stays the same */
state->entries[off].lsn = next_change->lsn;
@@ -1439,8 +1439,8 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
{
/* successfully restored changes from disk */
ReorderBufferChange *next_change =
- dlist_head_element(ReorderBufferChange, node,
- &entry->txn->changes);
+ dlist_head_element(ReorderBufferChange, node,
+ &entry->txn->changes);
elog(DEBUG2, "restored %u/%u changes from disk",
(uint32) entry->txn->nentries_mem,
@@ -1582,7 +1582,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
dclist_delete_from(&rb->catchange_txns, &txn->catchange_node);
/* now remove reference from buffer */
- hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
+ hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
Assert(found);
/* remove entries spilled to disk */
@@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
ReorderBufferTXN *txn;
/*
- * Bail out if logical_replication_mode is buffered and we haven't exceeded
- * the memory limit.
+ * Bail out if logical_replication_mode is buffered and we haven't
+ * exceeded the memory limit.
*/
if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED &&
rb->size < logical_decoding_work_mem * 1024L)
@@ -3841,7 +3841,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
{
char *data;
Size inval_size = sizeof(SharedInvalidationMessage) *
- change->data.inval.ninvalidations;
+ change->data.inval.ninvalidations;
sz += inval_size;
@@ -4010,10 +4010,10 @@ ReorderBufferStreamTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
* After that we need to reuse the snapshot from the previous run.
*
* Unlike DecodeCommit which adds xids of all the subtransactions in
- * snapshot's xip array via SnapBuildCommitTxn, we can't do that here
- * but we do add them to subxip array instead via ReorderBufferCopySnap.
- * This allows the catalog changes made in subtransactions decoded till
- * now to be visible.
+ * snapshot's xip array via SnapBuildCommitTxn, we can't do that here but
+ * we do add them to subxip array instead via ReorderBufferCopySnap. This
+ * allows the catalog changes made in subtransactions decoded till now to
+ * be visible.
*/
if (txn->snapshot_now == NULL)
{
@@ -4206,7 +4206,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
dlist_foreach_modify(cleanup_iter, &txn->changes)
{
ReorderBufferChange *cleanup =
- dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
+ dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
dlist_delete(&cleanup->node);
ReorderBufferReturnChange(rb, cleanup, true);
@@ -4431,7 +4431,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
case REORDER_BUFFER_CHANGE_INVALIDATION:
{
Size inval_size = sizeof(SharedInvalidationMessage) *
- change->data.inval.ninvalidations;
+ change->data.inval.ninvalidations;
change->data.inval.invalidations =
MemoryContextAlloc(rb->context, inval_size);
@@ -4936,7 +4936,7 @@ ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
dlist_foreach_modify(it, &ent->chunks)
{
ReorderBufferChange *change =
- dlist_container(ReorderBufferChange, node, it.cur);
+ dlist_container(ReorderBufferChange, node, it.cur);
dlist_delete(&change->node);
ReorderBufferReturnChange(rb, change, true);
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 62542827e4..0786bb0ab7 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -574,7 +574,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder)
Assert(builder->building_full_snapshot);
/* don't allow older snapshots */
- InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
+ InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
if (HaveRegisteredOrActiveSnapshot())
elog(ERROR, "cannot build an initial slot snapshot when snapshots exist");
Assert(!HistoricSnapshotActive());
@@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
*/
/*
- * xl_running_xacts record is older than what we can use, we might not have
- * all necessary catalog rows anymore.
+ * xl_running_xacts record is older than what we can use, we might not
+ * have all necessary catalog rows anymore.
*/
if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
NormalTransactionIdPrecedes(running->oldestRunningXid,
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index 0c71ae9ba7..c56d42dcd2 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -563,7 +563,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
* the lock.
*/
int nsyncworkers =
- logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
+ logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
/* Now safe to release the LWLock */
LWLockRelease(LogicalRepWorkerLock);
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 4b67098814..78926f8647 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -2399,7 +2399,7 @@ apply_handle_insert(StringInfo s)
LogicalRepRelMapEntry *rel;
LogicalRepTupleData newtup;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
@@ -2547,7 +2547,7 @@ apply_handle_update(StringInfo s)
{
LogicalRepRelMapEntry *rel;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
LogicalRepTupleData oldtup;
@@ -2732,7 +2732,7 @@ apply_handle_delete(StringInfo s)
LogicalRepRelMapEntry *rel;
LogicalRepTupleData oldtup;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
@@ -3079,8 +3079,8 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
if (map)
{
TupleConversionMap *PartitionToRootMap =
- convert_tuples_by_name(RelationGetDescr(partrel),
- RelationGetDescr(parentrel));
+ convert_tuples_by_name(RelationGetDescr(partrel),
+ RelationGetDescr(parentrel));
remoteslot =
execute_attr_map_slot(PartitionToRootMap->attrMap,
@@ -3414,7 +3414,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush,
dlist_foreach_modify(iter, &lsn_mapping)
{
FlushPosition *pos =
- dlist_container(FlushPosition, node, iter.cur);
+ dlist_container(FlushPosition, node, iter.cur);
*write = pos->remote_end;
@@ -4702,11 +4702,11 @@ ApplyWorkerMain(Datum main_arg)
ereport(DEBUG1,
(errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s",
- MySubscription->name,
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
- "?")));
+ MySubscription->name,
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
+ "?")));
}
else
{
@@ -5080,10 +5080,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
}
/*
- * If we are processing this transaction using a parallel apply worker then
- * either we send the changes to the parallel worker or if the worker is busy
- * then serialize the changes to the file which will later be processed by
- * the parallel worker.
+ * If we are processing this transaction using a parallel apply worker
+ * then either we send the changes to the parallel worker or if the worker
+ * is busy then serialize the changes to the file which will later be
+ * processed by the parallel worker.
*/
*winfo = pa_find_worker(xid);
@@ -5097,9 +5097,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
}
/*
- * If there is no parallel worker involved to process this transaction then
- * we either directly apply the change or serialize it to a file which will
- * later be applied when the transaction finish message is processed.
+ * If there is no parallel worker involved to process this transaction
+ * then we either directly apply the change or serialize it to a file
+ * which will later be applied when the transaction finish message is
+ * processed.
*/
else if (in_streamed_transaction)
{
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index f88389de84..b08ca55041 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications,
* are multiple lists (one for each operation) to which row filters will
* be appended.
*
- * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row
- * filter expression" so it takes precedence.
+ * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter
+ * expression" so it takes precedence.
*/
foreach(lc, publications)
{
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index c263a59690..0ea71b5c43 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -330,7 +330,7 @@ static void
SyncRepQueueInsert(int mode)
{
dlist_head *queue;
- dlist_iter iter;
+ dlist_iter iter;
Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
queue = &WalSndCtl->SyncRepQueue[mode];
@@ -879,7 +879,7 @@ SyncRepWakeQueue(bool all, int mode)
dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode])
{
- PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
+ PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
/*
* Assume the queue is ordered by LSN
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 980dc1816f..0e4f76efa8 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -3548,7 +3548,7 @@ rewriteTargetView(Query *parsetree, Relation view)
if (parsetree->withCheckOptions != NIL)
{
WithCheckOption *parent_wco =
- (WithCheckOption *) linitial(parsetree->withCheckOptions);
+ (WithCheckOption *) linitial(parsetree->withCheckOptions);
if (parent_wco->cascaded)
{
diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c
index 569c1c9467..5c3fe4eda2 100644
--- a/src/backend/rewrite/rowsecurity.c
+++ b/src/backend/rewrite/rowsecurity.c
@@ -581,7 +581,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id,
if (row_security_policy_hook_restrictive)
{
List *hook_policies =
- (*row_security_policy_hook_restrictive) (cmd, relation);
+ (*row_security_policy_hook_restrictive) (cmd, relation);
/*
* As with built-in restrictive policies, we sort any hook-provided
@@ -603,7 +603,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id,
if (row_security_policy_hook_permissive)
{
List *hook_policies =
- (*row_security_policy_hook_permissive) (cmd, relation);
+ (*row_security_policy_hook_permissive) (cmd, relation);
foreach(item, hook_policies)
{
diff --git a/src/backend/snowball/snowball_create.pl b/src/backend/snowball/snowball_create.pl
index f4b58ada1c..35d1cd9621 100644
--- a/src/backend/snowball/snowball_create.pl
+++ b/src/backend/snowball/snowball_create.pl
@@ -10,34 +10,34 @@ my $input_path = '';
my $depfile;
our @languages = qw(
- arabic
- armenian
- basque
- catalan
- danish
- dutch
- english
- finnish
- french
- german
- greek
- hindi
- hungarian
- indonesian
- irish
- italian
- lithuanian
- nepali
- norwegian
- portuguese
- romanian
- russian
- serbian
- spanish
- swedish
- tamil
- turkish
- yiddish
+ arabic
+ armenian
+ basque
+ catalan
+ danish
+ dutch
+ english
+ finnish
+ french
+ german
+ greek
+ hindi
+ hungarian
+ indonesian
+ irish
+ italian
+ lithuanian
+ nepali
+ norwegian
+ portuguese
+ romanian
+ russian
+ serbian
+ spanish
+ swedish
+ tamil
+ turkish
+ yiddish
);
# Names of alternative dictionaries for all-ASCII words. If not
@@ -48,13 +48,12 @@ our @languages = qw(
our %ascii_languages = (
'hindi' => 'english',
- 'russian' => 'english',
-);
+ 'russian' => 'english',);
GetOptions(
- 'depfile' => \$depfile,
- 'outdir:s' => \$outdir_path,
- 'input:s' => \$input_path) || usage();
+ 'depfile' => \$depfile,
+ 'outdir:s' => \$outdir_path,
+ 'input:s' => \$input_path) || usage();
# Make sure input_path ends in a slash if needed.
if ($input_path ne '' && substr($input_path, -1) ne '/')
@@ -110,8 +109,8 @@ sub GenerateTsearchFiles
foreach my $lang (@languages)
{
my $asclang = $ascii_languages{$lang} || $lang;
- my $txt = $tmpl;
- my $stop = '';
+ my $txt = $tmpl;
+ my $stop = '';
my $stopword_path = "$input_path/stopwords/$lang.stop";
if (-s "$stopword_path")
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index 54e3bb4aa2..28b52d8aa1 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -2237,8 +2237,8 @@ compute_expr_stats(Relation onerel, double totalrows,
if (tcnt > 0)
{
AttributeOpts *aopt =
- get_attribute_options(stats->attr->attrelid,
- stats->attr->attnum);
+ get_attribute_options(stats->attr->attrelid,
+ stats->attr->attnum);
stats->exprvals = exprvals;
stats->exprnulls = exprnulls;
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 0bbf09564a..aafec4a09d 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2667,7 +2667,7 @@ BufferSync(int flags)
{
BufferDesc *bufHdr = NULL;
CkptTsStatus *ts_stat = (CkptTsStatus *)
- DatumGetPointer(binaryheap_first(ts_heap));
+ DatumGetPointer(binaryheap_first(ts_heap));
buf_id = CkptBufferIds[ts_stat->index].buf_id;
Assert(buf_id != -1);
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 84ead85942..41ab64100e 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -98,8 +98,7 @@ struct BufFile
/*
* XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid
- * wasting per-file alignment padding when some users create many
- * files.
+ * wasting per-file alignment padding when some users create many files.
*/
PGAlignedBlock buffer;
};
diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c
index f0965c3481..6399fa2ad5 100644
--- a/src/backend/storage/ipc/dsm_impl.c
+++ b/src/backend/storage/ipc/dsm_impl.c
@@ -357,14 +357,15 @@ dsm_impl_posix_resize(int fd, off_t size)
/*
* Block all blockable signals, except SIGQUIT. posix_fallocate() can run
* for quite a long time, and is an all-or-nothing operation. If we
- * allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery
- * conflicts), the retry loop might never succeed.
+ * allowed SIGUSR1 to interrupt us repeatedly (for example, due to
+ * recovery conflicts), the retry loop might never succeed.
*/
if (IsUnderPostmaster)
sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask);
pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE);
#if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__)
+
/*
* On Linux, a shm_open fd is backed by a tmpfs file. If we were to use
* ftruncate, the file would contain a hole. Accessing memory backed by a
@@ -374,8 +375,8 @@ dsm_impl_posix_resize(int fd, off_t size)
* SIGBUS later.
*
* We still use a traditional EINTR retry loop to handle SIGCONT.
- * posix_fallocate() doesn't restart automatically, and we don't want
- * this to fail if you attach a debugger.
+ * posix_fallocate() doesn't restart automatically, and we don't want this
+ * to fail if you attach a debugger.
*/
do
{
@@ -383,9 +384,9 @@ dsm_impl_posix_resize(int fd, off_t size)
} while (rc == EINTR);
/*
- * The caller expects errno to be set, but posix_fallocate() doesn't
- * set it. Instead it returns error numbers directly. So set errno,
- * even though we'll also return rc to indicate success or failure.
+ * The caller expects errno to be set, but posix_fallocate() doesn't set
+ * it. Instead it returns error numbers directly. So set errno, even
+ * though we'll also return rc to indicate success or failure.
*/
errno = rc;
#else
diff --git a/src/backend/storage/lmgr/generate-lwlocknames.pl b/src/backend/storage/lmgr/generate-lwlocknames.pl
index c124f49d80..863c88252b 100644
--- a/src/backend/storage/lmgr/generate-lwlocknames.pl
+++ b/src/backend/storage/lmgr/generate-lwlocknames.pl
@@ -10,10 +10,9 @@ use Getopt::Long;
my $output_path = '.';
my $lastlockidx = -1;
-my $continue = "\n";
+my $continue = "\n";
-GetOptions(
- 'outdir:s' => \$output_path);
+GetOptions('outdir:s' => \$output_path);
open my $lwlocknames, '<', $ARGV[0] or die;
@@ -48,7 +47,7 @@ while (<$lwlocknames>)
$trimmedlockname =~ s/Lock$//;
die "lock names must end with 'Lock'" if $trimmedlockname eq $lockname;
- die "lwlocknames.txt not in order" if $lockidx < $lastlockidx;
+ die "lwlocknames.txt not in order" if $lockidx < $lastlockidx;
die "lwlocknames.txt has duplicates" if $lockidx == $lastlockidx;
while ($lastlockidx < $lockidx - 1)
@@ -59,7 +58,7 @@ while (<$lwlocknames>)
}
printf $c "%s \"%s\"", $continue, $trimmedlockname;
$lastlockidx = $lockidx;
- $continue = ",\n";
+ $continue = ",\n";
print $h "#define $lockname (&MainLWLockArray[$lockidx].lock)\n";
}
@@ -71,7 +70,8 @@ printf $h "#define NUM_INDIVIDUAL_LWLOCKS %s\n", $lastlockidx + 1;
close $h;
close $c;
-rename($htmp, "$output_path/lwlocknames.h") || die "rename: $htmp to $output_path/lwlocknames.h: $!";
+rename($htmp, "$output_path/lwlocknames.h")
+ || die "rename: $htmp to $output_path/lwlocknames.h: $!";
rename($ctmp, "$output_path/lwlocknames.c") || die "rename: $ctmp: $!";
close $lwlocknames;
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 42595b38b2..193f50fc0f 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -3936,6 +3936,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
dclist_foreach(proc_iter, waitQueue)
{
PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
+
if (queued_proc == blocked_proc)
break;
data->waiter_pids[data->npids++] = queued_proc->pid;
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 59347ab951..01d738f306 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1118,9 +1118,9 @@ LWLockDequeueSelf(LWLock *lock)
LWLockWaitListLock(lock);
/*
- * Remove ourselves from the waitlist, unless we've already been
- * removed. The removal happens with the wait list lock held, so there's
- * no race in this check.
+ * Remove ourselves from the waitlist, unless we've already been removed.
+ * The removal happens with the wait list lock held, so there's no race in
+ * this check.
*/
on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
if (on_waitlist)
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 203b189559..533f616541 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -625,7 +625,7 @@ RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (conflict->sxactIn == writer)
return true;
@@ -708,7 +708,7 @@ FlagSxactUnsafe(SERIALIZABLEXACT *sxact)
dlist_foreach_modify(iter, &sxact->possibleUnsafeConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
Assert(!SxactIsReadOnly(conflict->sxactOut));
Assert(sxact == conflict->sxactIn);
@@ -1587,7 +1587,7 @@ GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
dlist_foreach(iter, &blocking_sxact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
@@ -1825,8 +1825,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
/*
* If we didn't find any possibly unsafe conflicts because every
* uncommitted writable transaction turned out to be doomed, then we
- * can "opt out" immediately. See comments above the earlier check for
- * PredXact->WritableSxactCount == 0.
+ * can "opt out" immediately. See comments above the earlier check
+ * for PredXact->WritableSxactCount == 0.
*/
if (dlist_is_empty(&sxact->possibleUnsafeConflicts))
{
@@ -2613,7 +2613,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
bool found;
dlist_delete(&(predlock->xactLink));
@@ -2754,7 +2754,7 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
dlist_foreach_modify(iter, &oldtarget->predicateLocks)
{
PREDICATELOCK *oldpredlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
PREDICATELOCK *newpredlock;
SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
@@ -2976,7 +2976,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
dlist_foreach_modify(iter, &oldtarget->predicateLocks)
{
PREDICATELOCK *oldpredlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
PREDICATELOCK *newpredlock;
SerCommitSeqNo oldCommitSeqNo;
SERIALIZABLEXACT *oldXact;
@@ -3194,7 +3194,7 @@ SetNewSxactGlobalXmin(void)
dlist_foreach(iter, &PredXact->activeList)
{
SERIALIZABLEXACT *sxact =
- dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
+ dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
if (!SxactIsRolledBack(sxact)
&& !SxactIsCommitted(sxact)
@@ -3440,7 +3440,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
@@ -3471,7 +3471,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
dlist_foreach_modify(iter, &MySerializableXact->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (isCommit
&& !SxactIsReadOnly(MySerializableXact)
@@ -3496,7 +3496,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
dlist_foreach_modify(iter, &MySerializableXact->inConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
if (!isCommit
|| SxactIsCommitted(conflict->sxactOut)
@@ -3515,7 +3515,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
roXact = possibleUnsafeConflict->sxactIn;
Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
@@ -3564,8 +3564,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
* xmin and purge any transactions which finished before this transaction
* was launched.
*
- * For parallel queries in read-only transactions, it might run twice.
- * We only release the reference on the first call.
+ * For parallel queries in read-only transactions, it might run twice. We
+ * only release the reference on the first call.
*/
needToClear = false;
if ((partiallyReleasing ||
@@ -3641,7 +3641,7 @@ ClearOldPredicateLocks(void)
dlist_foreach_modify(iter, FinishedSerializableTransactions)
{
SERIALIZABLEXACT *finishedSxact =
- dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
+ dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
|| TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
@@ -3700,7 +3700,7 @@ ClearOldPredicateLocks(void)
dlist_foreach_modify(iter, &OldCommittedSxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
bool canDoPartialCleanup;
LWLockAcquire(SerializableXactHashLock, LW_SHARED);
@@ -3787,7 +3787,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
dlist_foreach_modify(iter, &sxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
PREDICATELOCKTAG tag;
PREDICATELOCKTARGET *target;
PREDICATELOCKTARGETTAG targettag;
@@ -3864,7 +3864,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
dlist_foreach_modify(iter, &sxact->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (summarize)
conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
@@ -3876,7 +3876,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
dlist_foreach_modify(iter, &sxact->inConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
if (summarize)
conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
@@ -4134,7 +4134,7 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
SERIALIZABLEXACT *sxact = predlock->tag.myXact;
if (sxact == MySerializableXact)
@@ -4407,7 +4407,7 @@ CheckTableForSerializableConflictIn(Relation relation)
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
if (predlock->tag.myXact != MySerializableXact
&& !RWConflictExists(predlock->tag.myXact, MySerializableXact))
@@ -4519,7 +4519,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
dlist_foreach(iter, &writer->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
SERIALIZABLEXACT *t2 = conflict->sxactIn;
if (SxactIsPrepared(t2)
@@ -4566,7 +4566,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->inConflicts)
{
const RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
const SERIALIZABLEXACT *t0 = conflict->sxactOut;
if (!SxactIsDoomed(t0)
@@ -4664,7 +4664,7 @@ PreCommit_CheckForSerializationFailure(void)
dlist_foreach(near_iter, &MySerializableXact->inConflicts)
{
RWConflict nearConflict =
- dlist_container(RWConflictData, inLink, near_iter.cur);
+ dlist_container(RWConflictData, inLink, near_iter.cur);
if (!SxactIsCommitted(nearConflict->sxactOut)
&& !SxactIsDoomed(nearConflict->sxactOut))
@@ -4674,7 +4674,7 @@ PreCommit_CheckForSerializationFailure(void)
dlist_foreach(far_iter, &nearConflict->sxactOut->inConflicts)
{
RWConflict farConflict =
- dlist_container(RWConflictData, inLink, far_iter.cur);
+ dlist_container(RWConflictData, inLink, far_iter.cur);
if (farConflict->sxactOut == MySerializableXact
|| (!SxactIsCommitted(farConflict->sxactOut)
@@ -4770,7 +4770,7 @@ AtPrepare_PredicateLocks(void)
dlist_foreach(iter, &sxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
record.type = TWOPHASEPREDICATERECORD_LOCK;
lockRecord->target = predlock->tag.myTarget->tag;
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 22b4278610..dac921219f 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -101,7 +101,7 @@ ProcGlobalShmemSize(void)
{
Size size = 0;
Size TotalProcs =
- add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
+ add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
/* ProcGlobal */
size = add_size(size, sizeof(PROC_HDR));
@@ -331,7 +331,7 @@ InitProcess(void)
if (!dlist_is_empty(procgloballist))
{
- MyProc = (PGPROC*) dlist_pop_head_node(procgloballist);
+ MyProc = (PGPROC *) dlist_pop_head_node(procgloballist);
SpinLockRelease(ProcStructLock);
}
else
@@ -1009,7 +1009,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
uint32 hashcode = locallock->hashcode;
LWLock *partitionLock = LockHashPartitionLock(hashcode);
dclist_head *waitQueue = &lock->waitProcs;
- PGPROC *insert_before = NULL;
+ PGPROC *insert_before = NULL;
LOCKMASK myHeldLocks = MyProc->heldLocks;
TimestampTz standbyWaitStart = 0;
bool early_deadlock = false;
@@ -1244,7 +1244,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
if (InHotStandby)
{
bool maybe_log_conflict =
- (standbyWaitStart != 0 && !logged_recovery_conflict);
+ (standbyWaitStart != 0 && !logged_recovery_conflict);
/* Set a timer and wait for that or for the lock to be granted */
ResolveRecoveryConflictWithLock(locallock->tag.lock,
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 42e3501255..65bb22541c 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -549,7 +549,7 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum,
while (remblocks > 0)
{
- BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
+ BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
off_t seekpos = (off_t) BLCKSZ * segstartblock;
int numblocks;
@@ -597,9 +597,9 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum,
/*
* Even if we don't want to use fallocate, we can still extend a
* bit more efficiently than writing each 8kB block individually.
- * pg_pwrite_zeros() (via FileZero()) uses
- * pg_pwritev_with_retry() to avoid multiple writes or needing a
- * zeroed buffer for the whole length of the extension.
+ * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
+ * to avoid multiple writes or needing a zeroed buffer for the
+ * whole length of the extension.
*/
ret = FileZero(v->mdfd_vfd,
seekpos, (off_t) BLCKSZ * numblocks,
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index fe4fd3a929..8a2cb55876 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -2256,7 +2256,7 @@ NormalizeSubWord(IspellDict *Conf, char *word, int flag)
{
/* prefix success */
char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ?
- VoidString : prefix->aff[j]->flag;
+ VoidString : prefix->aff[j]->flag;
if (FindWord(Conf, pnewword, ff, flag))
cur += addToResult(forms, cur, pnewword);
diff --git a/src/backend/utils/Gen_dummy_probes.pl b/src/backend/utils/Gen_dummy_probes.pl
index 668b915a4d..f289b19344 100644
--- a/src/backend/utils/Gen_dummy_probes.pl
+++ b/src/backend/utils/Gen_dummy_probes.pl
@@ -44,7 +44,7 @@ sub Run()
# Initialize.
openARGV();
- $Hold = '';
+ $Hold = '';
$CondReg = 0;
$doPrint = $doAutoPrint;
CYCLE:
diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl
index 2792373fed..764216c56d 100644
--- a/src/backend/utils/Gen_fmgrtab.pl
+++ b/src/backend/utils/Gen_fmgrtab.pl
@@ -24,7 +24,7 @@ my $output_path = '';
my $include_path;
GetOptions(
- 'output:s' => \$output_path,
+ 'output:s' => \$output_path,
'include-path:s' => \$include_path) || usage();
# Make sure output_path ends in a slash.
@@ -34,7 +34,7 @@ if ($output_path ne '' && substr($output_path, -1) ne '/')
}
# Sanity check arguments.
-die "No input files.\n" unless @ARGV;
+die "No input files.\n" unless @ARGV;
die "--include-path must be specified.\n" unless $include_path;
# Read all the input files into internal data structures.
@@ -56,7 +56,7 @@ foreach my $datfile (@ARGV)
my $catalog = Catalog::ParseHeader($header);
my $catname = $catalog->{catname};
- my $schema = $catalog->{columns};
+ my $schema = $catalog->{columns};
$catalogs{$catname} = $catalog;
$catalog_data{$catname} = Catalog::ParseData($datfile, $schema, 0);
@@ -72,14 +72,14 @@ foreach my $row (@{ $catalog_data{pg_proc} })
push @fmgr,
{
- oid => $bki_values{oid},
- name => $bki_values{proname},
- lang => $bki_values{prolang},
- kind => $bki_values{prokind},
+ oid => $bki_values{oid},
+ name => $bki_values{proname},
+ lang => $bki_values{prolang},
+ kind => $bki_values{prokind},
strict => $bki_values{proisstrict},
retset => $bki_values{proretset},
- nargs => $bki_values{pronargs},
- args => $bki_values{proargtypes},
+ nargs => $bki_values{pronargs},
+ args => $bki_values{proargtypes},
prosrc => $bki_values{prosrc},
};
@@ -88,10 +88,10 @@ foreach my $row (@{ $catalog_data{pg_proc} })
}
# Emit headers for both files
-my $tmpext = ".tmp$$";
-my $oidsfile = $output_path . 'fmgroids.h';
+my $tmpext = ".tmp$$";
+my $oidsfile = $output_path . 'fmgroids.h';
my $protosfile = $output_path . 'fmgrprotos.h';
-my $tabfile = $output_path . 'fmgrtab.c';
+my $tabfile = $output_path . 'fmgrtab.c';
open my $ofh, '>', $oidsfile . $tmpext
or die "Could not open $oidsfile$tmpext: $!";
@@ -213,7 +213,8 @@ $bmap{'t'} = 'true';
$bmap{'f'} = 'false';
my @fmgr_builtin_oid_index;
my $last_builtin_oid = 0;
-my $fmgr_count = 0;
+my $fmgr_count = 0;
+
foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
{
next if $s->{lang} ne 'internal';
@@ -273,9 +274,9 @@ close($pfh);
close($tfh);
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($oidsfile, $tmpext);
+Catalog::RenameTempFile($oidsfile, $tmpext);
Catalog::RenameTempFile($protosfile, $tmpext);
-Catalog::RenameTempFile($tabfile, $tmpext);
+Catalog::RenameTempFile($tabfile, $tmpext);
sub usage
{
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index f6edfc76ac..0cdb552631 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -1186,7 +1186,7 @@ pgstat_flush_pending_entries(bool nowait)
while (cur)
{
PgStat_EntryRef *entry_ref =
- dlist_container(PgStat_EntryRef, pending_node, cur);
+ dlist_container(PgStat_EntryRef, pending_node, cur);
PgStat_HashKey key = entry_ref->shared_entry->key;
PgStat_Kind kind = key.kind;
const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c
index 09fffd0e82..d1149adf70 100644
--- a/src/backend/utils/activity/pgstat_shmem.c
+++ b/src/backend/utils/activity/pgstat_shmem.c
@@ -865,7 +865,7 @@ pgstat_drop_entry(PgStat_Kind kind, Oid dboid, Oid objoid)
if (pgStatEntryRefHash)
{
PgStat_EntryRefHashEntry *lohashent =
- pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
+ pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
if (lohashent)
pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref,
diff --git a/src/backend/utils/activity/pgstat_xact.c b/src/backend/utils/activity/pgstat_xact.c
index 91cdd9222e..369239d501 100644
--- a/src/backend/utils/activity/pgstat_xact.c
+++ b/src/backend/utils/activity/pgstat_xact.c
@@ -76,7 +76,7 @@ AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit)
dclist_foreach_modify(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
xl_xact_stats_item *it = &pending->item;
if (isCommit && !pending->is_create)
@@ -148,7 +148,7 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state,
dclist_foreach_modify(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
xl_xact_stats_item *it = &pending->item;
dclist_delete_from(&xact_state->pending_drops, &pending->node);
@@ -290,7 +290,7 @@ pgstat_get_transactional_drops(bool isCommit, xl_xact_stats_item **items)
dclist_foreach(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
if (isCommit && pending->is_create)
continue;
@@ -335,7 +335,7 @@ create_drop_transactional_internal(PgStat_Kind kind, Oid dboid, Oid objoid, bool
int nest_level = GetCurrentTransactionNestLevel();
PgStat_SubXactStatus *xact_state;
PgStat_PendingDroppedStatsItem *drop = (PgStat_PendingDroppedStatsItem *)
- MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem));
+ MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem));
xact_state = pgstat_get_xact_stack_level(nest_level);
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index be2e55bb29..5d8d583ddc 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -4482,17 +4482,17 @@ EncodeInterval(struct pg_itm *itm, int style, char *str)
case INTSTYLE_SQL_STANDARD:
{
bool has_negative = year < 0 || mon < 0 ||
- mday < 0 || hour < 0 ||
- min < 0 || sec < 0 || fsec < 0;
+ mday < 0 || hour < 0 ||
+ min < 0 || sec < 0 || fsec < 0;
bool has_positive = year > 0 || mon > 0 ||
- mday > 0 || hour > 0 ||
- min > 0 || sec > 0 || fsec > 0;
+ mday > 0 || hour > 0 ||
+ min > 0 || sec > 0 || fsec > 0;
bool has_year_month = year != 0 || mon != 0;
bool has_day_time = mday != 0 || hour != 0 ||
- min != 0 || sec != 0 || fsec != 0;
+ min != 0 || sec != 0 || fsec != 0;
bool has_day = mday != 0;
bool sql_standard_value = !(has_negative && has_positive) &&
- !(has_year_month && has_day_time);
+ !(has_year_month && has_day_time);
/*
* SQL Standard wants only 1 "<sign>" preceding the whole
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 9b51da2382..dfa90a04fb 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -189,8 +189,7 @@ float4in_internal(char *num, char **endptr_p,
/*
* endptr points to the first character _after_ the sequence we recognized
* as a valid floating point number. orig_string points to the original
- * input
- * string.
+ * input string.
*/
/* skip leading whitespace */
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 4c5abaff25..70cb922e6b 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -3219,9 +3219,9 @@ static RecordIOData *
allocate_record_info(MemoryContext mcxt, int ncolumns)
{
RecordIOData *data = (RecordIOData *)
- MemoryContextAlloc(mcxt,
- offsetof(RecordIOData, columns) +
- ncolumns * sizeof(ColumnIOData));
+ MemoryContextAlloc(mcxt,
+ offsetof(RecordIOData, columns) +
+ ncolumns * sizeof(ColumnIOData));
data->record_type = InvalidOid;
data->record_typmod = 0;
diff --git a/src/backend/utils/adt/jsonpath.c b/src/backend/utils/adt/jsonpath.c
index 0021b01830..7891fde310 100644
--- a/src/backend/utils/adt/jsonpath.c
+++ b/src/backend/utils/adt/jsonpath.c
@@ -76,7 +76,7 @@
static Datum jsonPathFromCstring(char *in, int len, struct Node *escontext);
static char *jsonPathToCstring(StringInfo out, JsonPath *in,
int estimated_len);
-static bool flattenJsonPathParseItem(StringInfo buf, int *result,
+static bool flattenJsonPathParseItem(StringInfo buf, int *result,
struct Node *escontext,
JsonPathParseItem *item,
int nestingLevel, bool insideArraySubscript);
@@ -234,7 +234,7 @@ jsonPathToCstring(StringInfo out, JsonPath *in, int estimated_len)
* children into a binary representation.
*/
static bool
-flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
+flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
JsonPathParseItem *item, int nestingLevel,
bool insideArraySubscript)
{
@@ -306,19 +306,19 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
if (!item->value.args.left)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.args.left,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.args.left,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + left) = chld - pos;
if (!item->value.args.right)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.args.right,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.args.right,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + right) = chld - pos;
}
@@ -338,10 +338,10 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
item->value.like_regex.patternlen);
appendStringInfoChar(buf, '\0');
- if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.like_regex.expr,
- nestingLevel,
- insideArraySubscript))
+ if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.like_regex.expr,
+ nestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + offs) = chld - pos;
}
@@ -360,10 +360,10 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
if (!item->value.arg)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.arg,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.arg,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + arg) = chld - pos;
}
@@ -405,17 +405,17 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
int32 topos;
int32 frompos;
- if (! flattenJsonPathParseItem(buf, &frompos, escontext,
- item->value.array.elems[i].from,
- nestingLevel, true))
+ if (!flattenJsonPathParseItem(buf, &frompos, escontext,
+ item->value.array.elems[i].from,
+ nestingLevel, true))
return false;
frompos -= pos;
if (item->value.array.elems[i].to)
{
- if (! flattenJsonPathParseItem(buf, &topos, escontext,
- item->value.array.elems[i].to,
- nestingLevel, true))
+ if (!flattenJsonPathParseItem(buf, &topos, escontext,
+ item->value.array.elems[i].to,
+ nestingLevel, true))
return false;
topos -= pos;
}
@@ -451,9 +451,9 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
if (item->next)
{
- if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->next, nestingLevel,
- insideArraySubscript))
+ if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->next, nestingLevel,
+ insideArraySubscript))
return false;
chld -= pos;
*(int32 *) (buf->data + next) = chld;
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index b561f0e7e8..41430bab7e 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -1326,8 +1326,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp,
*/
JsonValueList vals = {0};
JsonPathExecResult res =
- executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
- false, &vals);
+ executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
+ false, &vals);
if (jperIsError(res))
return jpbUnknown;
@@ -1337,8 +1337,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp,
else
{
JsonPathExecResult res =
- executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
- false, NULL);
+ executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
+ false, NULL);
if (jperIsError(res))
return jpbUnknown;
@@ -1869,7 +1869,7 @@ executeDateTimeMethod(JsonPathExecContext *cxt, JsonPathItem *jsp,
if (!fmt_txt[i])
{
MemoryContext oldcxt =
- MemoryContextSwitchTo(TopMemoryContext);
+ MemoryContextSwitchTo(TopMemoryContext);
fmt_txt[i] = cstring_to_text(fmt_str[i]);
MemoryContextSwitchTo(oldcxt);
diff --git a/src/backend/utils/adt/jsonpath_internal.h b/src/backend/utils/adt/jsonpath_internal.h
index 2e12de038c..90eea6e961 100644
--- a/src/backend/utils/adt/jsonpath_internal.h
+++ b/src/backend/utils/adt/jsonpath_internal.h
@@ -20,7 +20,7 @@ typedef struct JsonPathString
char *val;
int len;
int total;
-} JsonPathString;
+} JsonPathString;
#include "utils/jsonpath.h"
#include "jsonpath_gram.h"
@@ -29,8 +29,8 @@ typedef struct JsonPathString
JsonPathParseResult **result, \
struct Node *escontext)
YY_DECL;
-extern int jsonpath_yyparse(JsonPathParseResult **result,
- struct Node *escontext);
+extern int jsonpath_yyparse(JsonPathParseResult **result,
+ struct Node *escontext);
extern void jsonpath_yyerror(JsonPathParseResult **result,
struct Node *escontext,
const char *message);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index eea1d1ae0f..31e3b16ae0 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -1794,8 +1794,7 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2,
else
#endif
result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
- if (result == 2147483647) /* _NLSCMPERROR; missing from mingw
- * headers */
+ if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */
ereport(ERROR,
(errmsg("could not compare Unicode strings: %m")));
@@ -1818,14 +1817,15 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2,
static int
pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale)
{
- int result;
+ int result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
#ifdef WIN32
if (GetDatabaseEncoding() == PG_UTF8)
{
- size_t len1 = strlen(arg1);
- size_t len2 = strlen(arg2);
+ size_t len1 = strlen(arg1);
+ size_t len2 = strlen(arg2);
+
result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale);
}
else
@@ -1854,13 +1854,13 @@ static int
pg_strncoll_libc(const char *arg1, size_t len1, const char *arg2, size_t len2,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- size_t bufsize1 = len1 + 1;
- size_t bufsize2 = len2 + 1;
- char *arg1n;
- char *arg2n;
- int result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ size_t bufsize1 = len1 + 1;
+ size_t bufsize2 = len2 + 1;
+ char *arg1n;
+ char *arg2n;
+ int result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
@@ -1906,15 +1906,15 @@ static int
pg_strncoll_icu_no_utf8(const char *arg1, int32_t len1,
const char *arg2, int32_t len2, pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- int32_t ulen1;
- int32_t ulen2;
- size_t bufsize1;
- size_t bufsize2;
- UChar *uchar1,
- *uchar2;
- int result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ int32_t ulen1;
+ int32_t ulen2;
+ size_t bufsize1;
+ size_t bufsize2;
+ UChar *uchar1,
+ *uchar2;
+ int result;
Assert(locale->provider == COLLPROVIDER_ICU);
#ifdef HAVE_UCOL_STRCOLLUTF8
@@ -1961,7 +1961,7 @@ static int
pg_strncoll_icu(const char *arg1, int32_t len1, const char *arg2, int32_t len2,
pg_locale_t locale)
{
- int result;
+ int result;
Assert(locale->provider == COLLPROVIDER_ICU);
@@ -2042,7 +2042,7 @@ int
pg_strncoll(const char *arg1, size_t len1, const char *arg2, size_t len2,
pg_locale_t locale)
{
- int result;
+ int result;
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strncoll_libc(arg1, len1, arg2, len2, locale);
@@ -2074,7 +2074,7 @@ pg_strxfrm_libc(char *dest, const char *src, size_t destsize,
#else
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
#endif
}
@@ -2082,10 +2082,10 @@ static size_t
pg_strnxfrm_libc(char *dest, const char *src, size_t srclen, size_t destsize,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- size_t bufsize = srclen + 1;
- size_t result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ size_t bufsize = srclen + 1;
+ size_t result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
@@ -2114,12 +2114,12 @@ static size_t
pg_strnxfrm_icu(char *dest, const char *src, int32_t srclen, int32_t destsize,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- UChar *uchar;
- int32_t ulen;
- size_t uchar_bsize;
- Size result_bsize;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ UChar *uchar;
+ int32_t ulen;
+ size_t uchar_bsize;
+ Size result_bsize;
Assert(locale->provider == COLLPROVIDER_ICU);
@@ -2161,15 +2161,15 @@ static size_t
pg_strnxfrm_prefix_icu_no_utf8(char *dest, const char *src, int32_t srclen,
int32_t destsize, pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- UCharIterator iter;
- uint32_t state[2];
- UErrorCode status;
- int32_t ulen = -1;
- UChar *uchar = NULL;
- size_t uchar_bsize;
- Size result_bsize;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ UCharIterator iter;
+ uint32_t state[2];
+ UErrorCode status;
+ int32_t ulen = -1;
+ UChar *uchar = NULL;
+ size_t uchar_bsize;
+ Size result_bsize;
Assert(locale->provider == COLLPROVIDER_ICU);
Assert(GetDatabaseEncoding() != PG_UTF8);
@@ -2209,7 +2209,7 @@ static size_t
pg_strnxfrm_prefix_icu(char *dest, const char *src, int32_t srclen,
int32_t destsize, pg_locale_t locale)
{
- size_t result;
+ size_t result;
Assert(locale->provider == COLLPROVIDER_ICU);
@@ -2271,7 +2271,7 @@ pg_strxfrm_enabled(pg_locale_t locale)
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
/*
@@ -2291,7 +2291,7 @@ pg_strxfrm_enabled(pg_locale_t locale)
size_t
pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strxfrm_libc(dest, src, destsize, locale);
@@ -2328,7 +2328,7 @@ size_t
pg_strnxfrm(char *dest, size_t destsize, const char *src, size_t srclen,
pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strnxfrm_libc(dest, src, srclen, destsize, locale);
@@ -2358,7 +2358,7 @@ pg_strxfrm_prefix_enabled(pg_locale_t locale)
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
/*
@@ -2378,7 +2378,7 @@ size_t
pg_strxfrm_prefix(char *dest, const char *src, size_t destsize,
pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
elog(ERROR, "collprovider '%c' does not support pg_strxfrm_prefix()",
@@ -2415,7 +2415,7 @@ size_t
pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src,
size_t srclen, pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
elog(ERROR, "collprovider '%c' does not support pg_strnxfrm_prefix()",
@@ -2491,7 +2491,7 @@ pg_ucol_open(const char *loc_str)
collator = ucol_open(loc_str, &status);
if (U_FAILURE(status))
ereport(ERROR,
- /* use original string for error report */
+ /* use original string for error report */
(errmsg("could not open collator for locale \"%s\": %s",
orig_str, u_errorName(status))));
@@ -2554,6 +2554,7 @@ uchar_length(UConverter *converter, const char *str, int32_t len)
{
UErrorCode status = U_ZERO_ERROR;
int32_t ulen;
+
ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status);
if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR)
ereport(ERROR,
@@ -2571,6 +2572,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen,
{
UErrorCode status = U_ZERO_ERROR;
int32_t ulen;
+
status = U_ZERO_ERROR;
ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status);
if (U_FAILURE(status))
@@ -2594,7 +2596,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen,
int32_t
icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes)
{
- int32_t len_uchar;
+ int32_t len_uchar;
init_icu_converter();
@@ -2781,11 +2783,11 @@ char *
icu_language_tag(const char *loc_str, int elevel)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- char *langtag;
- size_t buflen = 32; /* arbitrary starting buffer size */
- const bool strict = true;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ char *langtag;
+ size_t buflen = 32; /* arbitrary starting buffer size */
+ const bool strict = true;
status = U_ZERO_ERROR;
uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
@@ -2803,8 +2805,8 @@ icu_language_tag(const char *loc_str, int elevel)
return pstrdup("en-US-u-va-posix");
/*
- * A BCP47 language tag doesn't have a clearly-defined upper limit
- * (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
+ * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
+ * RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop.
*/
@@ -2843,7 +2845,7 @@ icu_language_tag(const char *loc_str, int elevel)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("ICU is not supported in this build")));
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
#endif /* not USE_ICU */
}
@@ -2854,11 +2856,11 @@ void
icu_validate_locale(const char *loc_str)
{
#ifdef USE_ICU
- UCollator *collator;
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- bool found = false;
- int elevel = icu_validation_level;
+ UCollator *collator;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ bool found = false;
+ int elevel = icu_validation_level;
/* no validation */
if (elevel < 0)
@@ -2889,8 +2891,8 @@ icu_validate_locale(const char *loc_str)
/* search for matching language within ICU */
for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
{
- const char *otherloc = uloc_getAvailable(i);
- char otherlang[ULOC_LANG_CAPACITY];
+ const char *otherloc = uloc_getAvailable(i);
+ char otherlang[ULOC_LANG_CAPACITY];
status = U_ZERO_ERROR;
uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 6d673493cb..d3a973d86b 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -12587,7 +12587,7 @@ get_range_partbound_string(List *bound_datums)
foreach(cell, bound_datums)
{
PartitionRangeDatum *datum =
- lfirst_node(PartitionRangeDatum, cell);
+ lfirst_node(PartitionRangeDatum, cell);
appendStringInfoString(buf, sep);
if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE)
diff --git a/src/backend/utils/adt/tsquery_op.c b/src/backend/utils/adt/tsquery_op.c
index 7e3bd51c1f..2bc4ec904f 100644
--- a/src/backend/utils/adt/tsquery_op.c
+++ b/src/backend/utils/adt/tsquery_op.c
@@ -150,9 +150,9 @@ Datum
tsquery_phrase(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall3(tsquery_phrase_distance,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1),
- Int32GetDatum(1)));
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1),
+ Int32GetDatum(1)));
}
Datum
diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c
index a38db4697d..4457c5d4f9 100644
--- a/src/backend/utils/adt/tsvector_op.c
+++ b/src/backend/utils/adt/tsvector_op.c
@@ -525,7 +525,7 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
if (arrin[i].haspos)
{
int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos)
- + sizeof(uint16);
+ + sizeof(uint16);
curoff = SHORTALIGN(curoff);
memcpy(dataout + curoff,
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 592afc18ec..b92ff4d266 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -1021,7 +1021,8 @@ hashbpchar(PG_FUNCTION_ARGS)
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
@@ -1033,8 +1034,8 @@ hashbpchar(PG_FUNCTION_ARGS)
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any((uint8_t *) buf, bsize + 1);
@@ -1076,7 +1077,8 @@ hashbpcharextended(PG_FUNCTION_ARGS)
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
@@ -1088,8 +1090,8 @@ hashbpcharextended(PG_FUNCTION_ARGS)
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1));
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index b571876468..884bfbc8ce 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -2312,8 +2312,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup)
memcpy(sss->buf1, authoritative_data, len);
/*
- * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated
- * strings.
+ * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings.
*/
sss->buf1[len] = '\0';
sss->last_len1 = len;
@@ -4523,7 +4522,7 @@ text_to_array(PG_FUNCTION_ARGS)
PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID));
PG_RETURN_DATUM(makeArrayResult(tstate.astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
}
/*
diff --git a/src/backend/utils/adt/xid8funcs.c b/src/backend/utils/adt/xid8funcs.c
index 24271dfff7..06ae940df6 100644
--- a/src/backend/utils/adt/xid8funcs.c
+++ b/src/backend/utils/adt/xid8funcs.c
@@ -519,7 +519,7 @@ pg_snapshot_recv(PG_FUNCTION_ARGS)
for (i = 0; i < nxip; i++)
{
FullTransactionId cur =
- FullTransactionIdFromU64((uint64) pq_getmsgint64(buf));
+ FullTransactionIdFromU64((uint64) pq_getmsgint64(buf));
if (FullTransactionIdPrecedes(cur, last) ||
FullTransactionIdPrecedes(cur, xmin) ||
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 15adbd6a01..866d0d649a 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -630,7 +630,7 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent)
XmlOptionType parsed_xmloptiontype;
xmlNodePtr content_nodes;
volatile xmlBufferPtr buf = NULL;
- volatile xmlSaveCtxtPtr ctxt = NULL;
+ volatile xmlSaveCtxtPtr ctxt = NULL;
ErrorSaveContext escontext = {T_ErrorSaveContext};
PgXmlErrorContext *xmlerrcxt;
#endif
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index c7607895cd..60978f9415 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -3603,7 +3603,7 @@ char *
get_publication_name(Oid pubid, bool missing_ok)
{
HeapTuple tup;
- char *pubname;
+ char *pubname;
Form_pg_publication pubform;
tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
@@ -3630,16 +3630,16 @@ get_publication_name(Oid pubid, bool missing_ok)
* return InvalidOid.
*/
Oid
-get_subscription_oid(const char* subname, bool missing_ok)
+get_subscription_oid(const char *subname, bool missing_ok)
{
Oid oid;
oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
- MyDatabaseId, CStringGetDatum(subname));
+ MyDatabaseId, CStringGetDatum(subname));
if (!OidIsValid(oid) && !missing_ok)
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("subscription \"%s\" does not exist", subname)));
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("subscription \"%s\" does not exist", subname)));
return oid;
}
@@ -3653,7 +3653,7 @@ char *
get_subscription_name(Oid subid, bool missing_ok)
{
HeapTuple tup;
- char* subname;
+ char *subname;
Form_pg_subscription subform;
tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 40140de958..8a08463c2b 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -3084,10 +3084,10 @@ static void
AssertPendingSyncConsistency(Relation relation)
{
bool relcache_verdict =
- RelationIsPermanent(relation) &&
- ((relation->rd_createSubid != InvalidSubTransactionId &&
- RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
- relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
+ RelationIsPermanent(relation) &&
+ ((relation->rd_createSubid != InvalidSubTransactionId &&
+ RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
+ relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator));
@@ -3765,12 +3765,12 @@ RelationSetNewRelfilenumber(Relation relation, char persistence)
*/
if (IsBinaryUpgrade)
{
- SMgrRelation srel;
+ SMgrRelation srel;
/*
* During a binary upgrade, we use this code path to ensure that
- * pg_largeobject and its index have the same relfilenumbers as in
- * the old cluster. This is necessary because pg_upgrade treats
+ * pg_largeobject and its index have the same relfilenumbers as in the
+ * old cluster. This is necessary because pg_upgrade treats
* pg_largeobject like a user table, not a system table. It is however
* possible that a table or index may need to end up with the same
* relfilenumber in the new cluster as what it had in the old cluster.
@@ -5171,8 +5171,8 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Bitmapset *uindexattrs; /* columns in unique indexes */
Bitmapset *pkindexattrs; /* columns in the primary index */
Bitmapset *idindexattrs; /* columns in the replica identity */
- Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
- Bitmapset *summarizedattrs; /* columns with summarizing indexes */
+ Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
+ Bitmapset *summarizedattrs; /* columns with summarizing indexes */
List *indexoidlist;
List *newindexoidlist;
Oid relpkindex;
@@ -5314,8 +5314,8 @@ restart:
* when the column value changes, thus require a separate
* attribute bitmapset.
*
- * Obviously, non-key columns couldn't be referenced by
- * foreign key or identity key. Hence we do not include them into
+ * Obviously, non-key columns couldn't be referenced by foreign
+ * key or identity key. Hence we do not include them into
* uindexattrs, pkindexattrs and idindexattrs bitmaps.
*/
if (attrnum != 0)
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index 4c21129707..26575cae6c 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -801,11 +801,11 @@ read_relmap_file(RelMapFile *map, char *dbpath, bool lock_held, int elevel)
/*
* Open the target file.
*
- * Because Windows isn't happy about the idea of renaming over a file
- * that someone has open, we only open this file after acquiring the lock,
- * and for the same reason, we close it before releasing the lock. That
- * way, by the time write_relmap_file() acquires an exclusive lock, no
- * one else will have it open.
+ * Because Windows isn't happy about the idea of renaming over a file that
+ * someone has open, we only open this file after acquiring the lock, and
+ * for the same reason, we close it before releasing the lock. That way,
+ * by the time write_relmap_file() acquires an exclusive lock, no one else
+ * will have it open.
*/
snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath,
RELMAPPER_FILENAME);
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 7458ef5c90..9208c31fe0 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -2150,7 +2150,7 @@ CheckFunctionValidatorAccess(Oid validatorOid, Oid functionOid)
/* first validate that we have permissions to use the language */
aclresult = object_aclcheck(LanguageRelationId, procStruct->prolang, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_LANGUAGE,
NameStr(langStruct->lanname));
diff --git a/src/backend/utils/generate-errcodes.pl b/src/backend/utils/generate-errcodes.pl
index dd8ac6d56d..34d0f25c23 100644
--- a/src/backend/utils/generate-errcodes.pl
+++ b/src/backend/utils/generate-errcodes.pl
@@ -7,10 +7,9 @@ use strict;
use warnings;
use Getopt::Long;
-my $outfile = '';
+my $outfile = '';
-GetOptions(
- 'outfile=s' => \$outfile) or die "$0: wrong arguments";
+GetOptions('outfile=s' => \$outfile) or die "$0: wrong arguments";
open my $errcodes, '<', $ARGV[0]
or die "$0: could not open input file '$ARGV[0]': $!\n";
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 53420f4974..88434c3e5d 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -362,7 +362,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect
*/
if (!am_superuser &&
object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CONNECT) != ACLCHECK_OK)
+ ACL_CONNECT) != ACLCHECK_OK)
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for database \"%s\"", name),
@@ -933,10 +933,10 @@ InitPostgres(const char *in_dbname, Oid dboid,
}
/*
- * The last few connection slots are reserved for superusers and roles with
- * privileges of pg_use_reserved_connections. Replication connections are
- * drawn from slots reserved with max_wal_senders and are not limited by
- * max_connections, superuser_reserved_connections, or
+ * The last few connection slots are reserved for superusers and roles
+ * with privileges of pg_use_reserved_connections. Replication
+ * connections are drawn from slots reserved with max_wal_senders and are
+ * not limited by max_connections, superuser_reserved_connections, or
* reserved_connections.
*
* Note: At this point, the new backend has already claimed a proc struct,
diff --git a/src/backend/utils/init/usercontext.c b/src/backend/utils/init/usercontext.c
index 38bcfa60df..dd9a0dd6a8 100644
--- a/src/backend/utils/init/usercontext.c
+++ b/src/backend/utils/init/usercontext.c
@@ -61,15 +61,15 @@ SwitchToUntrustedUser(Oid userid, UserContext *context)
}
else
{
- int sec_context = context->save_sec_context;
+ int sec_context = context->save_sec_context;
/*
* This user can SET ROLE to the target user, but not the other way
* around, so protect ourselves against the target user by setting
* SECURITY_RESTRICTED_OPERATION to prevent certain changes to the
- * session state. Also set up a new GUC nest level, so that we can roll
- * back any GUC changes that may be made by code running as the target
- * user, inasmuch as they could be malicious.
+ * session state. Also set up a new GUC nest level, so that we can
+ * roll back any GUC changes that may be made by code running as the
+ * target user, inasmuch as they could be malicious.
*/
sec_context |= SECURITY_RESTRICTED_OPERATION;
SetUserIdAndSecContext(userid, sec_context);
diff --git a/src/backend/utils/mb/Unicode/UCS_to_BIG5.pl b/src/backend/utils/mb/Unicode/UCS_to_BIG5.pl
index 40b3fb6db6..4c5724b8b7 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_BIG5.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_BIG5.pl
@@ -40,7 +40,7 @@ my $cp950txt = &read_source("CP950.TXT");
foreach my $i (@$cp950txt)
{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
# Pick only the ETEN extended characters in the range 0xf9d6 - 0xf9dc
# from CP950.TXT
@@ -51,12 +51,12 @@ foreach my $i (@$cp950txt)
{
push @$all,
{
- code => $code,
- ucs => $ucs,
- comment => $i->{comment},
+ code => $code,
+ ucs => $ucs,
+ comment => $i->{comment},
direction => BOTH,
- f => $i->{f},
- l => $i->{l}
+ f => $i->{f},
+ l => $i->{l}
};
}
}
@@ -64,7 +64,7 @@ foreach my $i (@$cp950txt)
foreach my $i (@$all)
{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
# BIG5.TXT maps several BIG5 characters to U+FFFD. The UTF-8 to BIG5 mapping can
# contain only one of them. XXX: Doesn't really make sense to include any of them,
diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl
index adfdca24f7..f9ff2bd3d2 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl
@@ -33,7 +33,7 @@ while (<$in>)
next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
my ($u, $c) = ($1, $2);
$c =~ s/ //g;
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
# The GB-18030 character set, which we use as the source, contains
@@ -73,11 +73,11 @@ while (<$in>)
push @mapping,
{
- ucs => $ucs,
- code => $code,
+ ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
+ f => $in_file,
+ l => $.
};
}
close($in);
diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl
index b7715ed419..2d0e05fb79 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl
@@ -37,13 +37,13 @@ while (my $line = <$in>)
push @all,
{
- direction => BOTH,
- ucs => $ucs1,
+ direction => BOTH,
+ ucs => $ucs1,
ucs_second => $ucs2,
- code => $code,
- comment => $rest,
- f => $in_file,
- l => $.
+ code => $code,
+ comment => $rest,
+ f => $in_file,
+ l => $.
};
}
elsif ($line =~ /^0x(\w+)\s*U\+(\w+)\s*#\s*(\S.*)?\s*$/)
@@ -51,7 +51,7 @@ while (my $line = <$in>)
# non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
next if ($code < 0x80 && $ucs < 0x80);
@@ -59,11 +59,11 @@ while (my $line = <$in>)
push @all,
{
direction => BOTH,
- ucs => $ucs,
- code => $code,
- comment => $rest,
- f => $in_file,
- l => $.
+ ucs => $ucs,
+ code => $code,
+ comment => $rest,
+ f => $in_file,
+ l => $.
};
}
}
diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl
index 9c949f95b1..4073578027 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl
@@ -120,521 +120,521 @@ foreach my $i (grep defined $_->{sjis}, @mapping)
push @mapping, (
{
direction => BOTH,
- ucs => 0x4efc,
- code => 0x8ff4af,
- comment => '# CJK(4EFC)'
+ ucs => 0x4efc,
+ code => 0x8ff4af,
+ comment => '# CJK(4EFC)'
},
{
direction => BOTH,
- ucs => 0x50f4,
- code => 0x8ff4b0,
- comment => '# CJK(50F4)'
+ ucs => 0x50f4,
+ code => 0x8ff4b0,
+ comment => '# CJK(50F4)'
},
{
direction => BOTH,
- ucs => 0x51EC,
- code => 0x8ff4b1,
- comment => '# CJK(51EC)'
+ ucs => 0x51EC,
+ code => 0x8ff4b1,
+ comment => '# CJK(51EC)'
},
{
direction => BOTH,
- ucs => 0x5307,
- code => 0x8ff4b2,
- comment => '# CJK(5307)'
+ ucs => 0x5307,
+ code => 0x8ff4b2,
+ comment => '# CJK(5307)'
},
{
direction => BOTH,
- ucs => 0x5324,
- code => 0x8ff4b3,
- comment => '# CJK(5324)'
+ ucs => 0x5324,
+ code => 0x8ff4b3,
+ comment => '# CJK(5324)'
},
{
direction => BOTH,
- ucs => 0x548A,
- code => 0x8ff4b5,
- comment => '# CJK(548A)'
+ ucs => 0x548A,
+ code => 0x8ff4b5,
+ comment => '# CJK(548A)'
},
{
direction => BOTH,
- ucs => 0x5759,
- code => 0x8ff4b6,
- comment => '# CJK(5759)'
+ ucs => 0x5759,
+ code => 0x8ff4b6,
+ comment => '# CJK(5759)'
},
{
direction => BOTH,
- ucs => 0x589E,
- code => 0x8ff4b9,
- comment => '# CJK(589E)'
+ ucs => 0x589E,
+ code => 0x8ff4b9,
+ comment => '# CJK(589E)'
},
{
direction => BOTH,
- ucs => 0x5BEC,
- code => 0x8ff4ba,
- comment => '# CJK(5BEC)'
+ ucs => 0x5BEC,
+ code => 0x8ff4ba,
+ comment => '# CJK(5BEC)'
},
{
direction => BOTH,
- ucs => 0x5CF5,
- code => 0x8ff4bb,
- comment => '# CJK(5CF5)'
+ ucs => 0x5CF5,
+ code => 0x8ff4bb,
+ comment => '# CJK(5CF5)'
},
{
direction => BOTH,
- ucs => 0x5D53,
- code => 0x8ff4bc,
- comment => '# CJK(5D53)'
+ ucs => 0x5D53,
+ code => 0x8ff4bc,
+ comment => '# CJK(5D53)'
},
{
direction => BOTH,
- ucs => 0x5FB7,
- code => 0x8ff4be,
- comment => '# CJK(5FB7)'
+ ucs => 0x5FB7,
+ code => 0x8ff4be,
+ comment => '# CJK(5FB7)'
},
{
direction => BOTH,
- ucs => 0x6085,
- code => 0x8ff4bf,
- comment => '# CJK(6085)'
+ ucs => 0x6085,
+ code => 0x8ff4bf,
+ comment => '# CJK(6085)'
},
{
direction => BOTH,
- ucs => 0x6120,
- code => 0x8ff4c0,
- comment => '# CJK(6120)'
+ ucs => 0x6120,
+ code => 0x8ff4c0,
+ comment => '# CJK(6120)'
},
{
direction => BOTH,
- ucs => 0x654E,
- code => 0x8ff4c1,
- comment => '# CJK(654E)'
+ ucs => 0x654E,
+ code => 0x8ff4c1,
+ comment => '# CJK(654E)'
},
{
direction => BOTH,
- ucs => 0x663B,
- code => 0x8ff4c2,
- comment => '# CJK(663B)'
+ ucs => 0x663B,
+ code => 0x8ff4c2,
+ comment => '# CJK(663B)'
},
{
direction => BOTH,
- ucs => 0x6665,
- code => 0x8ff4c3,
- comment => '# CJK(6665)'
+ ucs => 0x6665,
+ code => 0x8ff4c3,
+ comment => '# CJK(6665)'
},
{
direction => BOTH,
- ucs => 0x6801,
- code => 0x8ff4c6,
- comment => '# CJK(6801)'
+ ucs => 0x6801,
+ code => 0x8ff4c6,
+ comment => '# CJK(6801)'
},
{
direction => BOTH,
- ucs => 0x6A6B,
- code => 0x8ff4c9,
- comment => '# CJK(6A6B)'
+ ucs => 0x6A6B,
+ code => 0x8ff4c9,
+ comment => '# CJK(6A6B)'
},
{
direction => BOTH,
- ucs => 0x6AE2,
- code => 0x8ff4ca,
- comment => '# CJK(6AE2)'
+ ucs => 0x6AE2,
+ code => 0x8ff4ca,
+ comment => '# CJK(6AE2)'
},
{
direction => BOTH,
- ucs => 0x6DF2,
- code => 0x8ff4cc,
- comment => '# CJK(6DF2)'
+ ucs => 0x6DF2,
+ code => 0x8ff4cc,
+ comment => '# CJK(6DF2)'
},
{
direction => BOTH,
- ucs => 0x6DF8,
- code => 0x8ff4cb,
- comment => '# CJK(6DF8)'
+ ucs => 0x6DF8,
+ code => 0x8ff4cb,
+ comment => '# CJK(6DF8)'
},
{
direction => BOTH,
- ucs => 0x7028,
- code => 0x8ff4cd,
- comment => '# CJK(7028)'
+ ucs => 0x7028,
+ code => 0x8ff4cd,
+ comment => '# CJK(7028)'
},
{
direction => BOTH,
- ucs => 0x70BB,
- code => 0x8ff4ae,
- comment => '# CJK(70BB)'
+ ucs => 0x70BB,
+ code => 0x8ff4ae,
+ comment => '# CJK(70BB)'
},
{
direction => BOTH,
- ucs => 0x7501,
- code => 0x8ff4d0,
- comment => '# CJK(7501)'
+ ucs => 0x7501,
+ code => 0x8ff4d0,
+ comment => '# CJK(7501)'
},
{
direction => BOTH,
- ucs => 0x7682,
- code => 0x8ff4d1,
- comment => '# CJK(7682)'
+ ucs => 0x7682,
+ code => 0x8ff4d1,
+ comment => '# CJK(7682)'
},
{
direction => BOTH,
- ucs => 0x769E,
- code => 0x8ff4d2,
- comment => '# CJK(769E)'
+ ucs => 0x769E,
+ code => 0x8ff4d2,
+ comment => '# CJK(769E)'
},
{
direction => BOTH,
- ucs => 0x7930,
- code => 0x8ff4d4,
- comment => '# CJK(7930)'
+ ucs => 0x7930,
+ code => 0x8ff4d4,
+ comment => '# CJK(7930)'
},
{
direction => BOTH,
- ucs => 0x7AE7,
- code => 0x8ff4d9,
- comment => '# CJK(7AE7)'
+ ucs => 0x7AE7,
+ code => 0x8ff4d9,
+ comment => '# CJK(7AE7)'
},
{
direction => BOTH,
- ucs => 0x7DA0,
- code => 0x8ff4dc,
- comment => '# CJK(7DA0)'
+ ucs => 0x7DA0,
+ code => 0x8ff4dc,
+ comment => '# CJK(7DA0)'
},
{
direction => BOTH,
- ucs => 0x7DD6,
- code => 0x8ff4dd,
- comment => '# CJK(7DD6)'
+ ucs => 0x7DD6,
+ code => 0x8ff4dd,
+ comment => '# CJK(7DD6)'
},
{
direction => BOTH,
- ucs => 0x8362,
- code => 0x8ff4df,
- comment => '# CJK(8362)'
+ ucs => 0x8362,
+ code => 0x8ff4df,
+ comment => '# CJK(8362)'
},
{
direction => BOTH,
- ucs => 0x85B0,
- code => 0x8ff4e1,
- comment => '# CJK(85B0)'
+ ucs => 0x85B0,
+ code => 0x8ff4e1,
+ comment => '# CJK(85B0)'
},
{
direction => BOTH,
- ucs => 0x8807,
- code => 0x8ff4e4,
- comment => '# CJK(8807)'
+ ucs => 0x8807,
+ code => 0x8ff4e4,
+ comment => '# CJK(8807)'
},
{
direction => BOTH,
- ucs => 0x8B7F,
- code => 0x8ff4e6,
- comment => '# CJK(8B7F)'
+ ucs => 0x8B7F,
+ code => 0x8ff4e6,
+ comment => '# CJK(8B7F)'
},
{
direction => BOTH,
- ucs => 0x8CF4,
- code => 0x8ff4e7,
- comment => '# CJK(8CF4)'
+ ucs => 0x8CF4,
+ code => 0x8ff4e7,
+ comment => '# CJK(8CF4)'
},
{
direction => BOTH,
- ucs => 0x8D76,
- code => 0x8ff4e8,
- comment => '# CJK(8D76)'
+ ucs => 0x8D76,
+ code => 0x8ff4e8,
+ comment => '# CJK(8D76)'
},
{
direction => BOTH,
- ucs => 0x90DE,
- code => 0x8ff4ec,
- comment => '# CJK(90DE)'
+ ucs => 0x90DE,
+ code => 0x8ff4ec,
+ comment => '# CJK(90DE)'
},
{
direction => BOTH,
- ucs => 0x9115,
- code => 0x8ff4ee,
- comment => '# CJK(9115)'
+ ucs => 0x9115,
+ code => 0x8ff4ee,
+ comment => '# CJK(9115)'
},
{
direction => BOTH,
- ucs => 0x9592,
- code => 0x8ff4f1,
- comment => '# CJK(9592)'
+ ucs => 0x9592,
+ code => 0x8ff4f1,
+ comment => '# CJK(9592)'
},
{
direction => BOTH,
- ucs => 0x973B,
- code => 0x8ff4f4,
- comment => '# CJK(973B)'
+ ucs => 0x973B,
+ code => 0x8ff4f4,
+ comment => '# CJK(973B)'
},
{
direction => BOTH,
- ucs => 0x974D,
- code => 0x8ff4f5,
- comment => '# CJK(974D)'
+ ucs => 0x974D,
+ code => 0x8ff4f5,
+ comment => '# CJK(974D)'
},
{
direction => BOTH,
- ucs => 0x9751,
- code => 0x8ff4f6,
- comment => '# CJK(9751)'
+ ucs => 0x9751,
+ code => 0x8ff4f6,
+ comment => '# CJK(9751)'
},
{
direction => BOTH,
- ucs => 0x999E,
- code => 0x8ff4fa,
- comment => '# CJK(999E)'
+ ucs => 0x999E,
+ code => 0x8ff4fa,
+ comment => '# CJK(999E)'
},
{
direction => BOTH,
- ucs => 0x9AD9,
- code => 0x8ff4fb,
- comment => '# CJK(9AD9)'
+ ucs => 0x9AD9,
+ code => 0x8ff4fb,
+ comment => '# CJK(9AD9)'
},
{
direction => BOTH,
- ucs => 0x9B72,
- code => 0x8ff4fc,
- comment => '# CJK(9B72)'
+ ucs => 0x9B72,
+ code => 0x8ff4fc,
+ comment => '# CJK(9B72)'
},
{
direction => BOTH,
- ucs => 0x9ED1,
- code => 0x8ff4fe,
- comment => '# CJK(9ED1)'
+ ucs => 0x9ED1,
+ code => 0x8ff4fe,
+ comment => '# CJK(9ED1)'
},
{
direction => BOTH,
- ucs => 0xF929,
- code => 0x8ff4c5,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-F929'
+ ucs => 0xF929,
+ code => 0x8ff4c5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-F929'
},
{
direction => BOTH,
- ucs => 0xF9DC,
- code => 0x8ff4f2,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'
+ ucs => 0xF9DC,
+ code => 0x8ff4f2,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'
},
{
direction => BOTH,
- ucs => 0xFA0E,
- code => 0x8ff4b4,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'
+ ucs => 0xFA0E,
+ code => 0x8ff4b4,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'
},
{
direction => BOTH,
- ucs => 0xFA0F,
- code => 0x8ff4b7,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'
+ ucs => 0xFA0F,
+ code => 0x8ff4b7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'
},
{
direction => BOTH,
- ucs => 0xFA10,
- code => 0x8ff4b8,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10'
+ ucs => 0xFA10,
+ code => 0x8ff4b8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10'
},
{
direction => BOTH,
- ucs => 0xFA11,
- code => 0x8ff4bd,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11'
+ ucs => 0xFA11,
+ code => 0x8ff4bd,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11'
},
{
direction => BOTH,
- ucs => 0xFA12,
- code => 0x8ff4c4,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12'
+ ucs => 0xFA12,
+ code => 0x8ff4c4,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12'
},
{
direction => BOTH,
- ucs => 0xFA13,
- code => 0x8ff4c7,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13'
+ ucs => 0xFA13,
+ code => 0x8ff4c7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13'
},
{
direction => BOTH,
- ucs => 0xFA14,
- code => 0x8ff4c8,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14'
+ ucs => 0xFA14,
+ code => 0x8ff4c8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14'
},
{
direction => BOTH,
- ucs => 0xFA15,
- code => 0x8ff4ce,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15'
+ ucs => 0xFA15,
+ code => 0x8ff4ce,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15'
},
{
direction => BOTH,
- ucs => 0xFA16,
- code => 0x8ff4cf,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16'
+ ucs => 0xFA16,
+ code => 0x8ff4cf,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16'
},
{
direction => BOTH,
- ucs => 0xFA17,
- code => 0x8ff4d3,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17'
+ ucs => 0xFA17,
+ code => 0x8ff4d3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17'
},
{
direction => BOTH,
- ucs => 0xFA18,
- code => 0x8ff4d5,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18'
+ ucs => 0xFA18,
+ code => 0x8ff4d5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18'
},
{
direction => BOTH,
- ucs => 0xFA19,
- code => 0x8ff4d6,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19'
+ ucs => 0xFA19,
+ code => 0x8ff4d6,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19'
},
{
direction => BOTH,
- ucs => 0xFA1A,
- code => 0x8ff4d7,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'
+ ucs => 0xFA1A,
+ code => 0x8ff4d7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'
},
{
direction => BOTH,
- ucs => 0xFA1B,
- code => 0x8ff4d8,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'
+ ucs => 0xFA1B,
+ code => 0x8ff4d8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'
},
{
direction => BOTH,
- ucs => 0xFA1C,
- code => 0x8ff4da,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'
+ ucs => 0xFA1C,
+ code => 0x8ff4da,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'
},
{
direction => BOTH,
- ucs => 0xFA1D,
- code => 0x8ff4db,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'
+ ucs => 0xFA1D,
+ code => 0x8ff4db,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'
},
{
direction => BOTH,
- ucs => 0xFA1E,
- code => 0x8ff4de,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'
+ ucs => 0xFA1E,
+ code => 0x8ff4de,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'
},
{
direction => BOTH,
- ucs => 0xFA1F,
- code => 0x8ff4e0,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'
+ ucs => 0xFA1F,
+ code => 0x8ff4e0,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'
},
{
direction => BOTH,
- ucs => 0xFA20,
- code => 0x8ff4e2,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20'
+ ucs => 0xFA20,
+ code => 0x8ff4e2,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20'
},
{
direction => BOTH,
- ucs => 0xFA21,
- code => 0x8ff4e3,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21'
+ ucs => 0xFA21,
+ code => 0x8ff4e3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21'
},
{
direction => BOTH,
- ucs => 0xFA22,
- code => 0x8ff4e5,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22'
+ ucs => 0xFA22,
+ code => 0x8ff4e5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22'
},
{
direction => BOTH,
- ucs => 0xFA23,
- code => 0x8ff4e9,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23'
+ ucs => 0xFA23,
+ code => 0x8ff4e9,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23'
},
{
direction => BOTH,
- ucs => 0xFA24,
- code => 0x8ff4ea,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24'
+ ucs => 0xFA24,
+ code => 0x8ff4ea,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24'
},
{
direction => BOTH,
- ucs => 0xFA25,
- code => 0x8ff4eb,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25'
+ ucs => 0xFA25,
+ code => 0x8ff4eb,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25'
},
{
direction => BOTH,
- ucs => 0xFA26,
- code => 0x8ff4ed,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26'
+ ucs => 0xFA26,
+ code => 0x8ff4ed,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26'
},
{
direction => BOTH,
- ucs => 0xFA27,
- code => 0x8ff4ef,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27'
+ ucs => 0xFA27,
+ code => 0x8ff4ef,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27'
},
{
direction => BOTH,
- ucs => 0xFA28,
- code => 0x8ff4f0,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28'
+ ucs => 0xFA28,
+ code => 0x8ff4f0,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28'
},
{
direction => BOTH,
- ucs => 0xFA29,
- code => 0x8ff4f3,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29'
+ ucs => 0xFA29,
+ code => 0x8ff4f3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29'
},
{
direction => BOTH,
- ucs => 0xFA2A,
- code => 0x8ff4f7,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'
+ ucs => 0xFA2A,
+ code => 0x8ff4f7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'
},
{
direction => BOTH,
- ucs => 0xFA2B,
- code => 0x8ff4f8,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'
+ ucs => 0xFA2B,
+ code => 0x8ff4f8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'
},
{
direction => BOTH,
- ucs => 0xFA2C,
- code => 0x8ff4f9,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'
+ ucs => 0xFA2C,
+ code => 0x8ff4f9,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'
},
{
direction => BOTH,
- ucs => 0xFA2D,
- code => 0x8ff4fd,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'
+ ucs => 0xFA2D,
+ code => 0x8ff4fd,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'
},
{
direction => BOTH,
- ucs => 0xFF07,
- code => 0x8ff4a9,
- comment => '# FULLWIDTH APOSTROPHE'
+ ucs => 0xFF07,
+ code => 0x8ff4a9,
+ comment => '# FULLWIDTH APOSTROPHE'
},
{
direction => BOTH,
- ucs => 0xFFE4,
- code => 0x8fa2c3,
- comment => '# FULLWIDTH BROKEN BAR'
+ ucs => 0xFFE4,
+ code => 0x8fa2c3,
+ comment => '# FULLWIDTH BROKEN BAR'
},
# additional conversions for EUC_JP -> UTF-8 conversion
{
direction => TO_UNICODE,
- ucs => 0x2116,
- code => 0x8ff4ac,
- comment => '# NUMERO SIGN'
+ ucs => 0x2116,
+ code => 0x8ff4ac,
+ comment => '# NUMERO SIGN'
},
{
direction => TO_UNICODE,
- ucs => 0x2121,
- code => 0x8ff4ad,
- comment => '# TELEPHONE SIGN'
+ ucs => 0x2121,
+ code => 0x8ff4ad,
+ comment => '# TELEPHONE SIGN'
},
{
direction => TO_UNICODE,
- ucs => 0x3231,
- code => 0x8ff4ab,
- comment => '# PARENTHESIZED IDEOGRAPH STOCK'
+ ucs => 0x3231,
+ code => 0x8ff4ab,
+ comment => '# PARENTHESIZED IDEOGRAPH STOCK'
});
print_conversion_tables($this_script, "EUC_JP", \@mapping);
diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl
index 4c3989d2c5..9112e1cfe9 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl
@@ -36,27 +36,27 @@ foreach my $i (@$mapping)
push @$mapping,
( {
direction => BOTH,
- ucs => 0x20AC,
- code => 0xa2e6,
- comment => '# EURO SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x20AC,
+ code => 0xa2e6,
+ comment => '# EURO SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => BOTH,
- ucs => 0x00AE,
- code => 0xa2e7,
- comment => '# REGISTERED SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00AE,
+ code => 0xa2e7,
+ comment => '# REGISTERED SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => BOTH,
- ucs => 0x327E,
- code => 0xa2e8,
- comment => '# CIRCLED HANGUL IEUNG U',
- f => $this_script,
- l => __LINE__
+ ucs => 0x327E,
+ code => 0xa2e8,
+ comment => '# CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__
});
print_conversion_tables($this_script, "EUC_KR", $mapping);
diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl
index ecc175528e..4ad17064ab 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl
@@ -30,8 +30,8 @@ my @extras;
foreach my $i (@$mapping)
{
- my $ucs = $i->{ucs};
- my $code = $i->{code};
+ my $ucs = $i->{ucs};
+ my $code = $i->{code};
my $origcode = $i->{code};
my $plane = ($code & 0x1f0000) >> 16;
@@ -56,12 +56,12 @@ foreach my $i (@$mapping)
{
push @extras,
{
- ucs => $i->{ucs},
- code => ($i->{code} + 0x8ea10000),
- rest => $i->{rest},
+ ucs => $i->{ucs},
+ code => ($i->{code} + 0x8ea10000),
+ rest => $i->{rest},
direction => TO_UNICODE,
- f => $i->{f},
- l => $i->{l}
+ f => $i->{f},
+ l => $i->{l}
};
}
}
diff --git a/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl b/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
index fb401e6099..9c8a983bf7 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
@@ -33,17 +33,17 @@ while (<$in>)
next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
my ($u, $c) = ($1, $2);
$c =~ s/ //g;
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
if ($code >= 0x80 && $ucs >= 0x0080)
{
push @mapping,
{
- ucs => $ucs,
- code => $code,
+ ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
+ f => $in_file,
+ l => $.
};
}
}
diff --git a/src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl b/src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl
index 370c5b801c..f50baa8f1f 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl
@@ -30,27 +30,27 @@ my $mapping = &read_source("JOHAB.TXT");
push @$mapping,
( {
direction => BOTH,
- ucs => 0x20AC,
- code => 0xd9e6,
- comment => '# EURO SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x20AC,
+ code => 0xd9e6,
+ comment => '# EURO SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => BOTH,
- ucs => 0x00AE,
- code => 0xd9e7,
- comment => '# REGISTERED SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00AE,
+ code => 0xd9e7,
+ comment => '# REGISTERED SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => BOTH,
- ucs => 0x327E,
- code => 0xd9e8,
- comment => '# CIRCLED HANGUL IEUNG U',
- f => $this_script,
- l => __LINE__
+ ucs => 0x327E,
+ code => 0xd9e8,
+ comment => '# CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__
});
print_conversion_tables($this_script, "JOHAB", $mapping);
diff --git a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
index 6431aba555..ed010a58fa 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
@@ -37,13 +37,13 @@ while (my $line = <$in>)
push @mapping,
{
- code => $code,
- ucs => $ucs1,
+ code => $code,
+ ucs => $ucs1,
ucs_second => $ucs2,
- comment => $rest,
- direction => BOTH,
- f => $in_file,
- l => $.
+ comment => $rest,
+ direction => BOTH,
+ f => $in_file,
+ l => $.
};
}
elsif ($line =~ /^0x(\w+)\s*U\+(\w+)\s*#\s*(\S.*)?\s*$/)
@@ -51,7 +51,7 @@ while (my $line = <$in>)
# non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
my $direction;
@@ -74,12 +74,12 @@ while (my $line = <$in>)
push @mapping,
{
- code => $code,
- ucs => $ucs,
- comment => $rest,
+ code => $code,
+ ucs => $ucs,
+ comment => $rest,
direction => $direction,
- f => $in_file,
- l => $.
+ f => $in_file,
+ l => $.
};
}
}
diff --git a/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl b/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl
index 6426cf4794..0808c6836b 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl
@@ -22,13 +22,13 @@ my $mapping = read_source("CP932.TXT");
# Drop these SJIS codes from the source for UTF8=>SJIS conversion
my @reject_sjis = (
0xed40 .. 0xeefc, 0x8754 .. 0x875d, 0x878a, 0x8782,
- 0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792,
+ 0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792,
0x8795 .. 0x8797, 0x879a .. 0x879c);
foreach my $i (@$mapping)
{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
if (grep { $code == $_ } @reject_sjis)
{
@@ -40,67 +40,67 @@ foreach my $i (@$mapping)
push @$mapping,
( {
direction => FROM_UNICODE,
- ucs => 0x00a2,
- code => 0x8191,
- comment => '# CENT SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00a2,
+ code => 0x8191,
+ comment => '# CENT SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x00a3,
- code => 0x8192,
- comment => '# POUND SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00a3,
+ code => 0x8192,
+ comment => '# POUND SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x00a5,
- code => 0x5c,
- comment => '# YEN SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00a5,
+ code => 0x5c,
+ comment => '# YEN SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x00ac,
- code => 0x81ca,
- comment => '# NOT SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00ac,
+ code => 0x81ca,
+ comment => '# NOT SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x2016,
- code => 0x8161,
- comment => '# DOUBLE VERTICAL LINE',
- f => $this_script,
- l => __LINE__
+ ucs => 0x2016,
+ code => 0x8161,
+ comment => '# DOUBLE VERTICAL LINE',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x203e,
- code => 0x7e,
- comment => '# OVERLINE',
- f => $this_script,
- l => __LINE__
+ ucs => 0x203e,
+ code => 0x7e,
+ comment => '# OVERLINE',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x2212,
- code => 0x817c,
- comment => '# MINUS SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x2212,
+ code => 0x817c,
+ comment => '# MINUS SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x301c,
- code => 0x8160,
- comment => '# WAVE DASH',
- f => $this_script,
- l => __LINE__
+ ucs => 0x301c,
+ code => 0x8160,
+ comment => '# WAVE DASH',
+ f => $this_script,
+ l => __LINE__
});
print_conversion_tables($this_script, "SJIS", $mapping);
diff --git a/src/backend/utils/mb/Unicode/UCS_to_UHC.pl b/src/backend/utils/mb/Unicode/UCS_to_UHC.pl
index 5ec9c069b7..207677d76d 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_UHC.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_UHC.pl
@@ -33,7 +33,7 @@ while (<$in>)
next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
my ($u, $c) = ($1, $2);
$c =~ s/ //g;
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
next if ($code == 0x0080 || $code == 0x00FF);
@@ -42,11 +42,11 @@ while (<$in>)
{
push @mapping,
{
- ucs => $ucs,
- code => $code,
+ ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
+ f => $in_file,
+ l => $.
};
}
}
@@ -56,11 +56,11 @@ close($in);
push @mapping,
{
direction => BOTH,
- code => 0xa2e8,
- ucs => 0x327e,
- comment => 'CIRCLED HANGUL IEUNG U',
- f => $this_script,
- l => __LINE__
+ code => 0xa2e8,
+ ucs => 0x327e,
+ comment => 'CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__
};
print_conversion_tables($this_script, "UHC", \@mapping);
diff --git a/src/backend/utils/mb/Unicode/UCS_to_most.pl b/src/backend/utils/mb/Unicode/UCS_to_most.pl
index 1917f86f0a..a1947308ff 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_most.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_most.pl
@@ -23,33 +23,33 @@ use convutils;
my $this_script = 'src/backend/utils/mb/Unicode/UCS_to_most.pl';
my %filename = (
- 'WIN866' => 'CP866.TXT',
- 'WIN874' => 'CP874.TXT',
- 'WIN1250' => 'CP1250.TXT',
- 'WIN1251' => 'CP1251.TXT',
- 'WIN1252' => 'CP1252.TXT',
- 'WIN1253' => 'CP1253.TXT',
- 'WIN1254' => 'CP1254.TXT',
- 'WIN1255' => 'CP1255.TXT',
- 'WIN1256' => 'CP1256.TXT',
- 'WIN1257' => 'CP1257.TXT',
- 'WIN1258' => 'CP1258.TXT',
- 'ISO8859_2' => '8859-2.TXT',
- 'ISO8859_3' => '8859-3.TXT',
- 'ISO8859_4' => '8859-4.TXT',
- 'ISO8859_5' => '8859-5.TXT',
- 'ISO8859_6' => '8859-6.TXT',
- 'ISO8859_7' => '8859-7.TXT',
- 'ISO8859_8' => '8859-8.TXT',
- 'ISO8859_9' => '8859-9.TXT',
+ 'WIN866' => 'CP866.TXT',
+ 'WIN874' => 'CP874.TXT',
+ 'WIN1250' => 'CP1250.TXT',
+ 'WIN1251' => 'CP1251.TXT',
+ 'WIN1252' => 'CP1252.TXT',
+ 'WIN1253' => 'CP1253.TXT',
+ 'WIN1254' => 'CP1254.TXT',
+ 'WIN1255' => 'CP1255.TXT',
+ 'WIN1256' => 'CP1256.TXT',
+ 'WIN1257' => 'CP1257.TXT',
+ 'WIN1258' => 'CP1258.TXT',
+ 'ISO8859_2' => '8859-2.TXT',
+ 'ISO8859_3' => '8859-3.TXT',
+ 'ISO8859_4' => '8859-4.TXT',
+ 'ISO8859_5' => '8859-5.TXT',
+ 'ISO8859_6' => '8859-6.TXT',
+ 'ISO8859_7' => '8859-7.TXT',
+ 'ISO8859_8' => '8859-8.TXT',
+ 'ISO8859_9' => '8859-9.TXT',
'ISO8859_10' => '8859-10.TXT',
'ISO8859_13' => '8859-13.TXT',
'ISO8859_14' => '8859-14.TXT',
'ISO8859_15' => '8859-15.TXT',
'ISO8859_16' => '8859-16.TXT',
- 'KOI8R' => 'KOI8-R.TXT',
- 'KOI8U' => 'KOI8-U.TXT',
- 'GBK' => 'CP936.TXT');
+ 'KOI8R' => 'KOI8-R.TXT',
+ 'KOI8U' => 'KOI8-U.TXT',
+ 'GBK' => 'CP936.TXT');
# make maps for all encodings if not specified
my @charsets = (scalar(@ARGV) > 0) ? @ARGV : sort keys(%filename);
diff --git a/src/backend/utils/mb/Unicode/convutils.pm b/src/backend/utils/mb/Unicode/convutils.pm
index fd019424fd..77de7b1a4d 100644
--- a/src/backend/utils/mb/Unicode/convutils.pm
+++ b/src/backend/utils/mb/Unicode/convutils.pm
@@ -16,10 +16,10 @@ our @EXPORT =
# Constants used in the 'direction' field of the character maps
use constant {
- NONE => 0,
- TO_UNICODE => 1,
+ NONE => 0,
+ TO_UNICODE => 1,
FROM_UNICODE => 2,
- BOTH => 3
+ BOTH => 3
};
#######################################################################
@@ -53,12 +53,12 @@ sub read_source
exit;
}
my $out = {
- code => hex($1),
- ucs => hex($2),
- comment => $4,
+ code => hex($1),
+ ucs => hex($2),
+ comment => $4,
direction => BOTH,
- f => $fname,
- l => $.
+ f => $fname,
+ l => $.
};
# Ignore pure ASCII mappings. PostgreSQL character conversion code
@@ -124,14 +124,14 @@ sub print_conversion_tables_direction
my $tblname;
if ($direction == TO_UNICODE)
{
- $fname = lc("${csname}_to_utf8.map");
+ $fname = lc("${csname}_to_utf8.map");
$tblname = lc("${csname}_to_unicode_tree");
print "- Writing ${csname}=>UTF8 conversion table: $fname\n";
}
else
{
- $fname = lc("utf8_to_${csname}.map");
+ $fname = lc("utf8_to_${csname}.map");
$tblname = lc("${csname}_from_unicode_tree");
print "- Writing UTF8=>${csname} conversion table: $fname\n";
@@ -378,10 +378,10 @@ sub print_radix_table
unshift @segments,
{
- header => "Dummy map, for invalid values",
+ header => "Dummy map, for invalid values",
min_idx => 0,
max_idx => $widest_range,
- label => "dummy map"
+ label => "dummy map"
};
###
@@ -397,7 +397,7 @@ sub print_radix_table
###
for (my $j = 0; $j < $#segments - 1; $j++)
{
- my $seg = $segments[$j];
+ my $seg = $segments[$j];
my $nextseg = $segments[ $j + 1 ];
# Count the number of zero values at the end of this segment.
@@ -527,17 +527,17 @@ sub print_radix_table
if ($max_val <= 0xffff)
{
$vals_per_line = 8;
- $colwidth = 4;
+ $colwidth = 4;
}
elsif ($max_val <= 0xffffff)
{
$vals_per_line = 4;
- $colwidth = 6;
+ $colwidth = 6;
}
else
{
$vals_per_line = 4;
- $colwidth = 8;
+ $colwidth = 8;
}
###
@@ -607,8 +607,10 @@ sub print_radix_table
# Print the next line's worth of values.
# XXX pad to begin at a nice boundary
printf $out " /* %02x */ ", $i;
- for (my $j = 0;
- $j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
+ for (
+ my $j = 0;
+ $j < $vals_per_line && $i <= $seg->{max_idx};
+ $j++)
{
# missing values represent zero.
my $val = $seg->{values}->{$i} || 0;
@@ -671,10 +673,10 @@ sub build_segments_recurse
push @segments,
{
header => $header . ", leaf: ${path}xx",
- label => $label,
- level => $level,
- depth => $depth,
- path => $path,
+ label => $label,
+ level => $level,
+ depth => $depth,
+ path => $path,
values => $map
};
}
@@ -696,10 +698,10 @@ sub build_segments_recurse
push @segments,
{
header => $header . ", byte #$level: ${path}xx",
- label => $label,
- level => $level,
- depth => $depth,
- path => $path,
+ label => $label,
+ level => $level,
+ depth => $depth,
+ path => $path,
values => \%children
};
}
@@ -789,12 +791,12 @@ sub make_charmap_combined
if (defined $c->{ucs_second})
{
my $entry = {
- utf8 => ucs2utf($c->{ucs}),
+ utf8 => ucs2utf($c->{ucs}),
utf8_second => ucs2utf($c->{ucs_second}),
- code => $c->{code},
- comment => $c->{comment},
- f => $c->{f},
- l => $c->{l}
+ code => $c->{code},
+ comment => $c->{comment},
+ f => $c->{f},
+ l => $c->{l}
};
push @combined, $entry;
}
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 67c37c49cb..a9033b7a54 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -1470,8 +1470,8 @@ check_GUC_init(struct config_generic *gconf)
/* Flag combinations */
/*
- * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part
- * of SHOW ALL should not be hidden in postgresql.conf.sample.
+ * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of
+ * SHOW ALL should not be hidden in postgresql.conf.sample.
*/
if ((gconf->flags & GUC_NO_SHOW_ALL) &&
!(gconf->flags & GUC_NOT_IN_SAMPLE))
diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c
index 844781a7f5..c27eb36758 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -4685,8 +4685,8 @@ struct config_enum ConfigureNamesEnum[] =
{
{"icu_validation_level", PGC_USERSET, CLIENT_CONN_LOCALE,
- gettext_noop("Log level for reporting invalid ICU locale strings."),
- NULL
+ gettext_noop("Log level for reporting invalid ICU locale strings."),
+ NULL
},
&icu_validation_level,
WARNING, icu_validation_level_options,
diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c
index f5a62061a3..7a3781466e 100644
--- a/src/backend/utils/mmgr/dsa.c
+++ b/src/backend/utils/mmgr/dsa.c
@@ -1369,7 +1369,7 @@ init_span(dsa_area *area,
if (DsaPointerIsValid(pool->spans[1]))
{
dsa_area_span *head = (dsa_area_span *)
- dsa_get_address(area, pool->spans[1]);
+ dsa_get_address(area, pool->spans[1]);
head->prevspan = span_pointer;
}
@@ -2215,7 +2215,7 @@ make_new_segment(dsa_area *area, size_t requested_pages)
if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
{
dsa_segment_map *next =
- get_segment_by_index(area, segment_map->header->next);
+ get_segment_by_index(area, segment_map->header->next);
Assert(next->header->bin == segment_map->header->bin);
next->header->prev = new_index;
diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c
index 722a2e34db..8f9ea090fa 100644
--- a/src/backend/utils/mmgr/freepage.c
+++ b/src/backend/utils/mmgr/freepage.c
@@ -285,7 +285,7 @@ sum_free_pages(FreePageManager *fpm)
if (!relptr_is_null(fpm->freelist[list]))
{
FreePageSpanLeader *candidate =
- relptr_access(base, fpm->freelist[list]);
+ relptr_access(base, fpm->freelist[list]);
do
{
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 42b90e4d4f..9fc83f11f6 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -734,9 +734,9 @@ MemoryContextStatsDetail(MemoryContext context, int max_children,
*
* We don't buffer the information about all memory contexts in a
* backend into StringInfo and log it as one message. That would
- * require the buffer to be enlarged, risking an OOM as there could
- * be a large number of memory contexts in a backend. Instead, we
- * log one message per memory context.
+ * require the buffer to be enlarged, risking an OOM as there could be
+ * a large number of memory contexts in a backend. Instead, we log
+ * one message per memory context.
*/
ereport(LOG_SERVER_ONLY,
(errhidestmt(true),
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 7dec652106..f926f1faad 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -587,7 +587,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
while (ResourceArrayGetAny(&(owner->cryptohasharr), &foundres))
{
pg_cryptohash_ctx *context =
- (pg_cryptohash_ctx *) DatumGetPointer(foundres);
+ (pg_cryptohash_ctx *) DatumGetPointer(foundres);
if (isCommit)
PrintCryptoHashLeakWarning(foundres);
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 95c3970437..e5a4e5b371 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -1438,8 +1438,8 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
* in memory, using a heap to eliminate excess tuples. Now we
- * have to transform the heap to a properly-sorted array.
- * Note that sort_bounded_heap sets the correct state->status.
+ * have to transform the heap to a properly-sorted array. Note
+ * that sort_bounded_heap sets the correct state->status.
*/
sort_bounded_heap(state);
state->current = 0;
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index c9ca44d8b7..3a419e348f 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -1990,7 +1990,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin)
int bucket = (oldSnapshotControl->head_offset
+ ((ts - oldSnapshotControl->head_timestamp)
/ USECS_PER_MINUTE))
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin))
oldSnapshotControl->xid_by_minute[bucket] = xmin;
@@ -2057,7 +2057,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin)
/* Extend map to unused entry. */
int new_tail = (oldSnapshotControl->head_offset
+ oldSnapshotControl->count_used)
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
oldSnapshotControl->count_used++;
oldSnapshotControl->xid_by_minute[new_tail] = xmin;
@@ -2188,7 +2188,7 @@ SerializeSnapshot(Snapshot snapshot, char *start_address)
if (serialized_snapshot.subxcnt > 0)
{
Size subxipoff = sizeof(SerializedSnapshotData) +
- snapshot->xcnt * sizeof(TransactionId);
+ snapshot->xcnt * sizeof(TransactionId);
memcpy((TransactionId *) (start_address + subxipoff),
snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId));
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 30b576932f..31156e863b 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -1565,8 +1565,8 @@ static void
setup_auth(FILE *cmdfd)
{
/*
- * The authid table shouldn't be readable except through views, to
- * ensure passwords are not publicly visible.
+ * The authid table shouldn't be readable except through views, to ensure
+ * passwords are not publicly visible.
*/
PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n");
@@ -1957,9 +1957,9 @@ make_template0(FILE *cmdfd)
" STRATEGY = file_copy;\n\n");
/*
- * template0 shouldn't have any collation-dependent objects, so unset
- * the collation version. This disables collation version checks when
- * making a new database from it.
+ * template0 shouldn't have any collation-dependent objects, so unset the
+ * collation version. This disables collation version checks when making
+ * a new database from it.
*/
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n");
@@ -1969,9 +1969,8 @@ make_template0(FILE *cmdfd)
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n");
/*
- * Explicitly revoke public create-schema and create-temp-table
- * privileges in template1 and template0; else the latter would be on
- * by default
+ * Explicitly revoke public create-schema and create-temp-table privileges
+ * in template1 and template0; else the latter would be on by default
*/
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n");
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n");
@@ -2244,11 +2243,11 @@ static char *
icu_language_tag(const char *loc_str)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- char *langtag;
- size_t buflen = 32; /* arbitrary starting buffer size */
- const bool strict = true;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ char *langtag;
+ size_t buflen = 32; /* arbitrary starting buffer size */
+ const bool strict = true;
status = U_ZERO_ERROR;
uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
@@ -2264,8 +2263,8 @@ icu_language_tag(const char *loc_str)
return pstrdup("en-US-u-va-posix");
/*
- * A BCP47 language tag doesn't have a clearly-defined upper limit
- * (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
+ * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
+ * RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop.
*/
@@ -2298,7 +2297,7 @@ icu_language_tag(const char *loc_str)
return langtag;
#else
pg_fatal("ICU is not supported in this build");
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
#endif
}
@@ -2311,9 +2310,9 @@ static void
icu_validate_locale(const char *loc_str)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- bool found = false;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ bool found = false;
/* validate that we can extract the language */
status = U_ZERO_ERROR;
@@ -2334,8 +2333,8 @@ icu_validate_locale(const char *loc_str)
/* search for matching language within ICU */
for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
{
- const char *otherloc = uloc_getAvailable(i);
- char otherlang[ULOC_LANG_CAPACITY];
+ const char *otherloc = uloc_getAvailable(i);
+ char otherlang[ULOC_LANG_CAPACITY];
status = U_ZERO_ERROR;
uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
@@ -2366,10 +2365,10 @@ static char *
default_icu_locale(void)
{
#ifdef USE_ICU
- UCollator *collator;
- UErrorCode status;
- const char *valid_locale;
- char *default_locale;
+ UCollator *collator;
+ UErrorCode status;
+ const char *valid_locale;
+ char *default_locale;
status = U_ZERO_ERROR;
collator = ucol_open(NULL, &status);
@@ -2449,7 +2448,7 @@ setlocales(void)
if (locale_provider == COLLPROVIDER_ICU)
{
- char *langtag;
+ char *langtag;
/* acquire default locale from the environment, if not specified */
if (icu_locale == NULL)
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index 17a444d80c..fa00bb3dab 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -105,7 +105,7 @@ if ($ENV{with_icu} eq 'yes')
{
command_ok(
[
- 'initdb', '--no-sync',
+ 'initdb', '--no-sync',
'--locale-provider=icu', '--icu-locale=en',
"$tempdir/data3"
],
@@ -113,7 +113,7 @@ if ($ENV{with_icu} eq 'yes')
command_fails_like(
[
- 'initdb', '--no-sync',
+ 'initdb', '--no-sync',
'--locale-provider=icu', '--icu-locale=@colNumeric=lower',
"$tempdir/dataX"
],
@@ -122,7 +122,7 @@ if ($ENV{with_icu} eq 'yes')
command_fails_like(
[
- 'initdb', '--no-sync',
+ 'initdb', '--no-sync',
'--locale-provider=icu', '--encoding=SQL_ASCII',
'--icu-locale=en', "$tempdir/dataX"
],
@@ -131,18 +131,18 @@ if ($ENV{with_icu} eq 'yes')
command_fails_like(
[
- 'initdb', '--no-sync',
- '--locale-provider=icu',
- '--icu-locale=nonsense-nowhere', "$tempdir/dataX"
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=nonsense-nowhere',
+ "$tempdir/dataX"
],
qr/error: locale "nonsense-nowhere" has unknown language "nonsense"/,
'fails for nonsense language');
command_fails_like(
[
- 'initdb', '--no-sync',
- '--locale-provider=icu',
- '--icu-locale=@colNumeric=lower', "$tempdir/dataX"
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
+ "$tempdir/dataX"
],
qr/could not open collator for locale "und-u-kn-lower": U_ILLEGAL_ARGUMENT_ERROR/,
'fails for invalid collation argument');
@@ -160,7 +160,7 @@ command_fails(
command_fails(
[
- 'initdb', '--no-sync',
+ 'initdb', '--no-sync',
'--locale-provider=libc', '--icu-locale=en',
"$tempdir/dataX"
],
diff --git a/src/bin/pg_amcheck/t/002_nonesuch.pl b/src/bin/pg_amcheck/t/002_nonesuch.pl
index e3cfae9cd4..cf2438717e 100644
--- a/src/bin/pg_amcheck/t/002_nonesuch.pl
+++ b/src/bin/pg_amcheck/t/002_nonesuch.pl
@@ -183,7 +183,7 @@ $node->command_checks_all(
$node->command_checks_all(
[
'pg_amcheck', '--no-strict-names',
- '-t', 'this.is.a.really.long.dotted.string'
+ '-t', 'this.is.a.really.long.dotted.string'
],
2,
[qr/^$/],
@@ -252,20 +252,20 @@ $node->command_checks_all(
$node->command_checks_all(
[
'pg_amcheck', '--no-strict-names',
- '-t', 'no_such_table',
- '-t', 'no*such*table',
- '-i', 'no_such_index',
- '-i', 'no*such*index',
- '-r', 'no_such_relation',
- '-r', 'no*such*relation',
- '-d', 'no_such_database',
- '-d', 'no*such*database',
- '-r', 'none.none',
- '-r', 'none.none.none',
- '-r', 'postgres.none.none',
- '-r', 'postgres.pg_catalog.none',
- '-r', 'postgres.none.pg_class',
- '-t', 'postgres.pg_catalog.pg_class', # This exists
+ '-t', 'no_such_table',
+ '-t', 'no*such*table',
+ '-i', 'no_such_index',
+ '-i', 'no*such*index',
+ '-r', 'no_such_relation',
+ '-r', 'no*such*relation',
+ '-d', 'no_such_database',
+ '-d', 'no*such*database',
+ '-r', 'none.none',
+ '-r', 'none.none.none',
+ '-r', 'postgres.none.none',
+ '-r', 'postgres.pg_catalog.none',
+ '-r', 'postgres.none.pg_class',
+ '-t', 'postgres.pg_catalog.pg_class', # This exists
],
0,
[qr/^$/],
@@ -304,13 +304,13 @@ $node->safe_psql('postgres', q(CREATE DATABASE another_db));
$node->command_checks_all(
[
'pg_amcheck', '-d',
- 'postgres', '--no-strict-names',
- '-t', 'template1.public.foo',
- '-t', 'another_db.public.foo',
- '-t', 'no_such_database.public.foo',
- '-i', 'template1.public.foo_idx',
- '-i', 'another_db.public.foo_idx',
- '-i', 'no_such_database.public.foo_idx',
+ 'postgres', '--no-strict-names',
+ '-t', 'template1.public.foo',
+ '-t', 'another_db.public.foo',
+ '-t', 'no_such_database.public.foo',
+ '-i', 'template1.public.foo_idx',
+ '-i', 'another_db.public.foo_idx',
+ '-i', 'no_such_database.public.foo_idx',
],
1,
[qr/^$/],
@@ -334,8 +334,8 @@ $node->command_checks_all(
$node->command_checks_all(
[
'pg_amcheck', '--all', '--no-strict-names', '-S',
- 'public', '-S', 'pg_catalog', '-S',
- 'pg_toast', '-S', 'information_schema',
+ 'public', '-S', 'pg_catalog', '-S',
+ 'pg_toast', '-S', 'information_schema',
],
1,
[qr/^$/],
@@ -348,9 +348,9 @@ $node->command_checks_all(
# Check with schema exclusion patterns overriding relation and schema inclusion patterns
$node->command_checks_all(
[
- 'pg_amcheck', '--all', '--no-strict-names', '-s',
- 'public', '-s', 'pg_catalog', '-s',
- 'pg_toast', '-s', 'information_schema', '-t',
+ 'pg_amcheck', '--all', '--no-strict-names', '-s',
+ 'public', '-s', 'pg_catalog', '-s',
+ 'pg_toast', '-s', 'information_schema', '-t',
'pg_catalog.pg_class', '-S*'
],
1,
diff --git a/src/bin/pg_amcheck/t/003_check.pl b/src/bin/pg_amcheck/t/003_check.pl
index 359abe25a1..d577cffa30 100644
--- a/src/bin/pg_amcheck/t/003_check.pl
+++ b/src/bin/pg_amcheck/t/003_check.pl
@@ -319,7 +319,7 @@ plan_to_remove_relation_file('db2', 's1.t1_btree');
my @cmd = ('pg_amcheck', '-p', $port);
# Regular expressions to match various expected output
-my $no_output_re = qr/^$/;
+my $no_output_re = qr/^$/;
my $line_pointer_corruption_re = qr/line pointer/;
my $missing_file_re = qr/could not open file ".*": No such file or directory/;
my $index_missing_relation_fork_re =
diff --git a/src/bin/pg_amcheck/t/004_verify_heapam.pl b/src/bin/pg_amcheck/t/004_verify_heapam.pl
index aa62422316..1b5027c420 100644
--- a/src/bin/pg_amcheck/t/004_verify_heapam.pl
+++ b/src/bin/pg_amcheck/t/004_verify_heapam.pl
@@ -105,31 +105,31 @@ sub read_tuple
@_ = unpack(HEAPTUPLE_PACK_CODE, $buffer);
%tup = (
- t_xmin => shift,
- t_xmax => shift,
- t_field3 => shift,
- bi_hi => shift,
- bi_lo => shift,
- ip_posid => shift,
- t_infomask2 => shift,
- t_infomask => shift,
- t_hoff => shift,
- t_bits => shift,
- a_1 => shift,
- a_2 => shift,
- b_header => shift,
- b_body1 => shift,
- b_body2 => shift,
- b_body3 => shift,
- b_body4 => shift,
- b_body5 => shift,
- b_body6 => shift,
- b_body7 => shift,
- c_va_header => shift,
- c_va_vartag => shift,
- c_va_rawsize => shift,
- c_va_extinfo => shift,
- c_va_valueid => shift,
+ t_xmin => shift,
+ t_xmax => shift,
+ t_field3 => shift,
+ bi_hi => shift,
+ bi_lo => shift,
+ ip_posid => shift,
+ t_infomask2 => shift,
+ t_infomask => shift,
+ t_hoff => shift,
+ t_bits => shift,
+ a_1 => shift,
+ a_2 => shift,
+ b_header => shift,
+ b_body1 => shift,
+ b_body2 => shift,
+ b_body3 => shift,
+ b_body4 => shift,
+ b_body5 => shift,
+ b_body6 => shift,
+ b_body7 => shift,
+ c_va_header => shift,
+ c_va_vartag => shift,
+ c_va_rawsize => shift,
+ c_va_extinfo => shift,
+ c_va_valueid => shift,
c_va_toastrelid => shift);
# Stitch together the text for column 'b'
$tup{b} = join('', map { chr($tup{"b_body$_"}) } (1 .. 7));
@@ -151,17 +151,17 @@ sub write_tuple
my ($fh, $offset, $tup) = @_;
my $buffer = pack(
HEAPTUPLE_PACK_CODE,
- $tup->{t_xmin}, $tup->{t_xmax},
- $tup->{t_field3}, $tup->{bi_hi},
- $tup->{bi_lo}, $tup->{ip_posid},
- $tup->{t_infomask2}, $tup->{t_infomask},
- $tup->{t_hoff}, $tup->{t_bits},
- $tup->{a_1}, $tup->{a_2},
- $tup->{b_header}, $tup->{b_body1},
- $tup->{b_body2}, $tup->{b_body3},
- $tup->{b_body4}, $tup->{b_body5},
- $tup->{b_body6}, $tup->{b_body7},
- $tup->{c_va_header}, $tup->{c_va_vartag},
+ $tup->{t_xmin}, $tup->{t_xmax},
+ $tup->{t_field3}, $tup->{bi_hi},
+ $tup->{bi_lo}, $tup->{ip_posid},
+ $tup->{t_infomask2}, $tup->{t_infomask},
+ $tup->{t_hoff}, $tup->{t_bits},
+ $tup->{a_1}, $tup->{a_2},
+ $tup->{b_header}, $tup->{b_body1},
+ $tup->{b_body2}, $tup->{b_body3},
+ $tup->{b_body4}, $tup->{b_body5},
+ $tup->{b_body6}, $tup->{b_body7},
+ $tup->{c_va_header}, $tup->{c_va_vartag},
$tup->{c_va_rawsize}, $tup->{c_va_extinfo},
$tup->{c_va_valueid}, $tup->{c_va_toastrelid});
sysseek($fh, $offset, 0)
@@ -188,7 +188,7 @@ $node->append_conf('postgresql.conf', 'max_prepared_transactions=10');
# Start the node and load the extensions. We depend on both
# amcheck and pageinspect for this test.
$node->start;
-my $port = $node->port;
+my $port = $node->port;
my $pgdata = $node->data_dir;
$node->safe_psql('postgres', "CREATE EXTENSION amcheck");
$node->safe_psql('postgres', "CREATE EXTENSION pageinspect");
@@ -354,23 +354,23 @@ binmode $file;
my $ENDIANNESS;
for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
{
- my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
+ my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
my $offset = $lp_off[$tupidx];
- next if $offset == -1; # ignore redirect line pointers
+ next if $offset == -1; # ignore redirect line pointers
my $tup = read_tuple($file, $offset);
# Sanity-check that the data appears on the page where we expect.
my $a_1 = $tup->{a_1};
my $a_2 = $tup->{a_2};
- my $b = $tup->{b};
+ my $b = $tup->{b};
if ($a_1 != 0xDEADF9F9 || $a_2 != 0xDEADF9F9 || $b ne 'abcdefg')
{
close($file); # ignore errors on close; we're exiting anyway
$node->clean_node;
plan skip_all =>
sprintf(
- "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")", $tupidx,
- 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
+ "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")",
+ $tupidx, 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
exit;
}
@@ -395,18 +395,18 @@ $node->command_ok([ 'pg_amcheck', '-p', $port, 'postgres' ],
$node->stop;
# Some #define constants from access/htup_details.h for use while corrupting.
-use constant HEAP_HASNULL => 0x0001;
+use constant HEAP_HASNULL => 0x0001;
use constant HEAP_XMAX_LOCK_ONLY => 0x0080;
use constant HEAP_XMIN_COMMITTED => 0x0100;
-use constant HEAP_XMIN_INVALID => 0x0200;
+use constant HEAP_XMIN_INVALID => 0x0200;
use constant HEAP_XMAX_COMMITTED => 0x0400;
-use constant HEAP_XMAX_INVALID => 0x0800;
-use constant HEAP_NATTS_MASK => 0x07FF;
-use constant HEAP_XMAX_IS_MULTI => 0x1000;
-use constant HEAP_KEYS_UPDATED => 0x2000;
-use constant HEAP_HOT_UPDATED => 0x4000;
-use constant HEAP_ONLY_TUPLE => 0x8000;
-use constant HEAP_UPDATED => 0x2000;
+use constant HEAP_XMAX_INVALID => 0x0800;
+use constant HEAP_NATTS_MASK => 0x07FF;
+use constant HEAP_XMAX_IS_MULTI => 0x1000;
+use constant HEAP_KEYS_UPDATED => 0x2000;
+use constant HEAP_HOT_UPDATED => 0x4000;
+use constant HEAP_ONLY_TUPLE => 0x8000;
+use constant HEAP_UPDATED => 0x2000;
# Helper function to generate a regular expression matching the header we
# expect verify_heapam() to return given which fields we expect to be non-null.
@@ -436,7 +436,7 @@ binmode $file;
for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
{
- my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
+ my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
my $offset = $lp_off[$tupidx];
my $header = header(0, $offnum, undef);
@@ -534,7 +534,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
# Corrupt the tuple to look like it has lots of attributes, some of
# them null. This falsely creates the impression that the t_bits
# array is longer than just one byte, but t_hoff still says otherwise.
- $tup->{t_infomask} |= HEAP_HASNULL;
+ $tup->{t_infomask} |= HEAP_HASNULL;
$tup->{t_infomask2} |= HEAP_NATTS_MASK;
$tup->{t_bits} = 0xAA;
@@ -544,7 +544,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
elsif ($offnum == 11)
{
# Same as above, but this time t_hoff plays along
- $tup->{t_infomask} |= HEAP_HASNULL;
+ $tup->{t_infomask} |= HEAP_HASNULL;
$tup->{t_infomask2} |= (HEAP_NATTS_MASK & 0x40);
$tup->{t_bits} = 0xAA;
$tup->{t_hoff} = 32;
@@ -568,9 +568,9 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
# bytes with 0xFF using 0x3FFFFFFF.
#
$tup->{b_header} = $ENDIANNESS eq 'little' ? 0xFC : 0x3F;
- $tup->{b_body1} = 0xFF;
- $tup->{b_body2} = 0xFF;
- $tup->{b_body3} = 0xFF;
+ $tup->{b_body1} = 0xFF;
+ $tup->{b_body2} = 0xFF;
+ $tup->{b_body3} = 0xFF;
$header = header(0, $offnum, 1);
push @expected,
@@ -620,7 +620,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
# at offnum 19 we will unset HEAP_ONLY_TUPLE flag
die "offnum $offnum should be a redirect" if defined $tup;
push @expected,
- qr/${header}redirected line pointer points to a non-heap-only tuple at offset \d+/;
+ qr/${header}redirected line pointer points to a non-heap-only tuple at offset \d+/;
}
elsif ($offnum == 18)
{
@@ -628,8 +628,8 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
die "offnum $offnum should be a redirect" if defined $tup;
sysseek($file, 92, 0) or BAIL_OUT("sysseek failed: $!");
syswrite($file,
- pack("L", $ENDIANNESS eq 'little' ? 0x00010011 : 0x00230000))
- or BAIL_OUT("syswrite failed: $!");
+ pack("L", $ENDIANNESS eq 'little' ? 0x00010011 : 0x00230000))
+ or BAIL_OUT("syswrite failed: $!");
push @expected,
qr/${header}redirected line pointer points to another redirected line pointer at offset \d+/;
}
@@ -644,8 +644,8 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
# rewrite line pointer with lp.off = 25, lp_flags = 2, lp_len = 0
sysseek($file, 108, 0) or BAIL_OUT("sysseek failed: $!");
syswrite($file,
- pack("L", $ENDIANNESS eq 'little' ? 0x00010019 : 0x00330000))
- or BAIL_OUT("syswrite failed: $!");
+ pack("L", $ENDIANNESS eq 'little' ? 0x00010019 : 0x00330000))
+ or BAIL_OUT("syswrite failed: $!");
push @expected,
qr/${header}redirect line pointer points to offset \d+, but offset \d+ also points there/;
}
@@ -756,7 +756,7 @@ $node->command_checks_all(
[ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ],
2, [@expected], [], 'Expected corruption message output');
$node->safe_psql(
- 'postgres', qq(
+ 'postgres', qq(
COMMIT PREPARED 'in_progress_tx';
));
diff --git a/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl b/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl
index 76321d1284..cc3386d146 100644
--- a/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl
+++ b/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl
@@ -14,7 +14,7 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir;
my @walfiles = (
'00000001000000370000000C.gz', '00000001000000370000000D',
- '00000001000000370000000E', '00000001000000370000000F.partial',);
+ '00000001000000370000000E', '00000001000000370000000F.partial',);
sub create_files
{
@@ -57,8 +57,10 @@ command_fails_like(
{
# like command_like but checking stderr
my $stderr;
- my $result = IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
- $walfiles[2] ], '2>', \$stderr;
+ my $result =
+ IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
+ $walfiles[2] ],
+ '2>', \$stderr;
ok($result, "pg_archivecleanup dry run: exit code 0");
like(
$stderr,
@@ -98,8 +100,8 @@ sub run_check
return;
}
-run_check('', 'pg_archivecleanup');
-run_check('.partial', 'pg_archivecleanup with .partial file');
+run_check('', 'pg_archivecleanup');
+run_check('.partial', 'pg_archivecleanup with .partial file');
run_check('.00000020.backup', 'pg_archivecleanup with .backup file');
done_testing();
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index ba471f898c..1dc8efe0cb 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -341,18 +341,18 @@ tablespace_list_append(const char *arg)
/*
* All tablespaces are created with absolute directories, so specifying a
- * non-absolute path here would just never match, possibly confusing users.
- * Since we don't know whether the remote side is Windows or not, and it
- * might be different than the local side, permit any path that could be
- * absolute under either set of rules.
+ * non-absolute path here would just never match, possibly confusing
+ * users. Since we don't know whether the remote side is Windows or not,
+ * and it might be different than the local side, permit any path that
+ * could be absolute under either set of rules.
*
* (There is little practical risk of confusion here, because someone
* running entirely on Linux isn't likely to have a relative path that
* begins with a backslash or something that looks like a drive
- * specification. If they do, and they also incorrectly believe that
- * a relative path is acceptable here, we'll silently fail to warn them
- * of their mistake, and the -T option will just not get applied, same
- * as if they'd specified -T for a nonexistent tablespace.)
+ * specification. If they do, and they also incorrectly believe that a
+ * relative path is acceptable here, we'll silently fail to warn them of
+ * their mistake, and the -T option will just not get applied, same as if
+ * they'd specified -T for a nonexistent tablespace.)
*/
if (!is_nonwindows_absolute_path(cell->old_dir) &&
!is_windows_absolute_path(cell->old_dir))
diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c
index fb9e29682b..d0a4079d50 100644
--- a/src/bin/pg_basebackup/pg_receivewal.c
+++ b/src/bin/pg_basebackup/pg_receivewal.c
@@ -43,7 +43,7 @@
static char *basedir = NULL;
static int verbose = 0;
static int compresslevel = 0;
-static bool noloop = false;
+static bool noloop = false;
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static volatile sig_atomic_t time_to_stop = false;
static bool do_create_slot = false;
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 4d130a7f94..793d64863c 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -4,7 +4,7 @@
use strict;
use warnings;
use File::Basename qw(basename dirname);
-use File::Path qw(rmtree);
+use File::Path qw(rmtree);
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
@@ -29,7 +29,7 @@ umask(0077);
# Initialize node without replication settings
$node->init(
- extra => ['--data-checksums'],
+ extra => ['--data-checksums'],
auth_extra => [ '--create-role', 'backupuser' ]);
$node->start;
my $pgdata = $node->data_dir;
@@ -144,8 +144,7 @@ SKIP:
'gzip:long',
'invalid compression specification: compression algorithm "gzip" does not support long-distance mode',
'failure on long mode for gzip'
- ],
- );
+ ],);
for my $cft (@compression_failure_tests)
{
@@ -153,7 +152,7 @@ SKIP:
my $sfail = quotemeta($server_fails . $cft->[1]);
$node->command_fails_like(
[
- 'pg_basebackup', '-D',
+ 'pg_basebackup', '-D',
"$tempdir/backup", '--compress',
$cft->[0]
],
@@ -161,7 +160,7 @@ SKIP:
'client ' . $cft->[2]);
$node->command_fails_like(
[
- 'pg_basebackup', '-D',
+ 'pg_basebackup', '-D',
"$tempdir/backup", '--compress',
'server-' . $cft->[0]
],
@@ -193,7 +192,7 @@ my $baseUnloggedPath = $node->safe_psql('postgres',
# Make sure main and init forks exist
ok(-f "$pgdata/${baseUnloggedPath}_init", 'unlogged init fork in base');
-ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base');
+ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base');
# Create files that look like temporary relations to ensure they are ignored.
my $postgresOid = $node->safe_psql('postgres',
@@ -211,7 +210,7 @@ foreach my $filename (@tempRelationFiles)
$node->command_ok(
[ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
'pg_basebackup runs');
-ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
+ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
# Permissions on backup should be default
@@ -274,13 +273,13 @@ unlink("$pgdata/backup_label")
$node->command_ok(
[
@pg_basebackup_defs, '-D',
- "$tempdir/backup2", '--no-manifest',
- '--waldir', "$tempdir/xlog2"
+ "$tempdir/backup2", '--no-manifest',
+ '--waldir', "$tempdir/xlog2"
],
'separate xlog directory');
-ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
+ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
ok(!-f "$tempdir/backup2/backup_manifest", 'manifest was suppressed');
-ok(-d "$tempdir/xlog2/", 'xlog directory was created');
+ok(-d "$tempdir/xlog2/", 'xlog directory was created');
rmtree("$tempdir/backup2");
rmtree("$tempdir/xlog2");
@@ -346,7 +345,7 @@ $node->start;
# to our physical temp location. That way we can use shorter names
# for the tablespace directories, which hopefully won't run afoul of
# the 99 character length limit.
-my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
+my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
my $real_sys_tempdir = "$sys_tempdir/tempdir";
dir_symlink "$tempdir", $real_sys_tempdir;
@@ -355,7 +354,7 @@ my $realTsDir = "$real_sys_tempdir/tblspc1";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';");
$node->safe_psql('postgres',
- "CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
+ "CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
. "INSERT INTO test1 VALUES (1234);");
$node->backup('tarbackup2', backup_options => ['-Ft']);
# empty test1, just so that it's different from the to-be-restored data
@@ -363,7 +362,7 @@ $node->safe_psql('postgres', "TRUNCATE TABLE test1;");
# basic checks on the output
my $backupdir = $node->backup_dir . '/tarbackup2';
-ok(-f "$backupdir/base.tar", 'backup tar was created');
+ok(-f "$backupdir/base.tar", 'backup tar was created');
ok(-f "$backupdir/pg_wal.tar", 'WAL tar was created');
my @tblspc_tars = glob "$backupdir/[0-9]*.tar";
is(scalar(@tblspc_tars), 1, 'one tablespace tar was created');
@@ -385,7 +384,7 @@ SKIP:
$node2->init_from_backup($node, 'tarbackup2', tar_program => $tar);
# Recover tablespace into a new directory (not where it was!)
- my $repTsDir = "$tempdir/tblspc1replica";
+ my $repTsDir = "$tempdir/tblspc1replica";
my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
mkdir $repTsDir;
PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0],
@@ -394,7 +393,7 @@ SKIP:
# Update tablespace map to point to new directory.
# XXX Ideally pg_basebackup would handle this.
$tblspc_tars[0] =~ m|/([0-9]*)\.tar$|;
- my $tblspcoid = $1;
+ my $tblspcoid = $1;
my $escapedRepTsDir = $realRepTsDir;
$escapedRepTsDir =~ s/\\/\\\\/g;
open my $mapfile, '>', $node2->data_dir . '/tablespace_map';
@@ -442,7 +441,7 @@ $node->command_fails(
$node->command_ok(
[
@pg_basebackup_defs, '-D',
- "$tempdir/backup1", '-Fp',
+ "$tempdir/backup1", '-Fp',
"-T$realTsDir=$tempdir/tbackup/tblspc1",
],
'plain format with tablespaces succeeds with tablespace mapping');
@@ -512,7 +511,7 @@ $realTsDir =~ s/=/\\=/;
$node->command_ok(
[
@pg_basebackup_defs, '-D',
- "$tempdir/backup3", '-Fp',
+ "$tempdir/backup3", '-Fp',
"-T$realTsDir=$tempdir/tbackup/tbl\\=spc2",
],
'mapping tablespace with = sign in path');
@@ -533,7 +532,7 @@ rmtree("$tempdir/tarbackup_l3");
$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ],
'pg_basebackup -R runs');
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
-ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
+ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
rmtree("$tempdir/backupR");
@@ -572,9 +571,9 @@ ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
rmtree("$tempdir/backupxst");
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupnoslot", '-X',
- 'stream', '--no-slot'
+ 'stream', '--no-slot'
],
'pg_basebackup -X stream runs with --no-slot');
rmtree("$tempdir/backupnoslot");
@@ -597,7 +596,7 @@ $node->command_fails_like(
$node->command_fails_like(
[
@pg_basebackup_defs, '--target', 'blackhole', '-X',
- 'none', '-D', "$tempdir/blackhole"
+ 'none', '-D', "$tempdir/blackhole"
],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
@@ -610,7 +609,7 @@ $node->command_ok(
'backup target blackhole');
$node->command_ok(
[
- @pg_basebackup_defs, '--target',
+ @pg_basebackup_defs, '--target',
"server:$tempdir/backuponserver", '-X',
'none'
],
@@ -634,9 +633,9 @@ rmtree("$tempdir/backuponserver");
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_sl_fail", '-X',
- 'stream', '-S',
+ 'stream', '-S',
'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
@@ -647,9 +646,9 @@ $node->command_fails(
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot", '-C',
- '-S', 'slot0',
+ '-S', 'slot0',
'--no-slot'
],
'pg_basebackup fails with -C -S --no-slot');
@@ -667,9 +666,9 @@ $node->command_ok(
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_sl_fail", '-X',
- 'stream', '-S',
+ 'stream', '-S',
'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
@@ -680,18 +679,18 @@ $node->command_fails(
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot", '-C',
- '-S', 'slot0',
+ '-S', 'slot0',
'--no-slot'
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot", '-C',
- '-S', 'slot0'
+ '-S', 'slot0'
],
'pg_basebackup -C runs');
rmtree("$tempdir/backupxs_slot");
@@ -712,9 +711,9 @@ isnt(
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot1", '-C',
- '-S', 'slot0'
+ '-S', 'slot0'
],
'pg_basebackup fails with -C -S and a previously existing slot');
@@ -727,13 +726,13 @@ is($lsn, '', 'restart LSN of new slot is null');
$node->command_fails(
[
@pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
- 'slot1', '-X', 'none'
+ 'slot1', '-X', 'none'
],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
@pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X',
- 'stream', '-S', 'slot1'
+ 'stream', '-S', 'slot1'
],
'pg_basebackup -X stream with replication slot runs');
$lsn = $node->safe_psql('postgres',
@@ -745,7 +744,7 @@ rmtree("$tempdir/backupxs_sl");
$node->command_ok(
[
@pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X',
- 'stream', '-S', 'slot1', '-R',
+ 'stream', '-S', 'slot1', '-R',
],
'pg_basebackup with replication slot and -R runs');
like(
@@ -813,7 +812,7 @@ rmtree("$tempdir/backup_corrupt3");
# do not verify checksums, should return ok
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backup_corrupt4", '--no-verify-checksums',
],
'pg_basebackup with -k does not report checksum mismatch');
@@ -832,24 +831,24 @@ SKIP:
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backup_gzip", '--compress',
- '1', '--format',
+ '1', '--format',
't'
],
'pg_basebackup with --compress');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backup_gzip2", '--gzip',
- '--format', 't'
+ '--format', 't'
],
'pg_basebackup with --gzip');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backup_gzip3", '--compress',
- 'gzip:1', '--format',
+ 'gzip:1', '--format',
't'
],
'pg_basebackup with --compress=gzip:1');
@@ -895,8 +894,8 @@ my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
my $sigchld_bb = IPC::Run::start(
[
@pg_basebackup_defs, '--wal-method=stream',
- '-D', "$tempdir/sigchld",
- '--max-rate=32', '-d',
+ '-D', "$tempdir/sigchld",
+ '--max-rate=32', '-d',
$node->connstr('postgres')
],
'<',
@@ -916,16 +915,17 @@ is( $node->poll_query_until(
"Walsender killed");
ok( pump_until(
- $sigchld_bb, $sigchld_bb_timeout,
+ $sigchld_bb, $sigchld_bb_timeout,
\$sigchld_bb_stderr, qr/background process terminated unexpectedly/),
'background process exit message');
$sigchld_bb->finish();
# Test that we can back up an in-place tablespace
$node->safe_psql('postgres',
- "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';");
+ "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';"
+);
$node->safe_psql('postgres',
- "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
+ "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
. "INSERT INTO test2 VALUES (1234);");
my $tblspc_oid = $node->safe_psql('postgres',
"SELECT oid FROM pg_tablespace WHERE spcname = 'tblspc2';");
diff --git a/src/bin/pg_basebackup/t/020_pg_receivewal.pl b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
index 50ac4f94ec..374f090a8b 100644
--- a/src/bin/pg_basebackup/t/020_pg_receivewal.pl
+++ b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
@@ -66,8 +66,8 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (1);');
# compression involved.
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--synchronous', '--no-loop'
+ 'pg_receivewal', '-D', $stream_dir, '--verbose',
+ '--endpos', $nextlsn, '--synchronous', '--no-loop'
],
'streaming some WAL with --synchronous');
@@ -92,8 +92,8 @@ SKIP:
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--compress', 'gzip:1',
+ 'pg_receivewal', '-D', $stream_dir, '--verbose',
+ '--endpos', $nextlsn, '--compress', 'gzip:1',
'--no-loop'
],
"streaming some WAL using ZLIB compression");
@@ -145,8 +145,8 @@ SKIP:
# Stream up to the given position.
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--no-loop', '--compress',
+ 'pg_receivewal', '-D', $stream_dir, '--verbose',
+ '--endpos', $nextlsn, '--no-loop', '--compress',
'lz4'
],
'streaming some WAL using --compress=lz4');
@@ -191,8 +191,8 @@ chomp($nextlsn);
$primary->psql('postgres', 'INSERT INTO test_table VALUES (4);');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--no-loop'
+ 'pg_receivewal', '-D', $stream_dir, '--verbose',
+ '--endpos', $nextlsn, '--no-loop'
],
"streaming some WAL");
@@ -247,17 +247,17 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (6);');
# Check case where the slot does not exist.
$primary->command_fails_like(
[
- 'pg_receivewal', '-D', $slot_dir, '--slot',
+ 'pg_receivewal', '-D', $slot_dir, '--slot',
'nonexistentslot', '-n', '--no-sync', '--verbose',
- '--endpos', $nextlsn
+ '--endpos', $nextlsn
],
qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/,
'pg_receivewal fails with non-existing slot');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $slot_dir, '--slot',
- $slot_name, '-n', '--no-sync', '--verbose',
- '--endpos', $nextlsn
+ 'pg_receivewal', '-D', $slot_dir, '--slot',
+ $slot_name, '-n', '--no-sync', '--verbose',
+ '--endpos', $nextlsn
],
"WAL streamed from the slot's restart_lsn");
ok(-e "$slot_dir/$walfile_streamed",
@@ -281,7 +281,7 @@ $standby->psql(
$primary->wait_for_catchup($standby);
# Get a walfilename from before the promotion to make sure it is archived
# after promotion
-my $standby_slot = $standby->slot($archive_slot);
+my $standby_slot = $standby->slot($archive_slot);
my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
# pg_walfile_name() is not supported while in recovery, so use the primary
@@ -311,9 +311,9 @@ mkdir($timeline_dir);
$standby->command_ok(
[
- 'pg_receivewal', '-D', $timeline_dir, '--verbose',
- '--endpos', $nextlsn, '--slot', $archive_slot,
- '--no-sync', '-n'
+ 'pg_receivewal', '-D', $timeline_dir, '--verbose',
+ '--endpos', $nextlsn, '--slot', $archive_slot,
+ '--no-sync', '-n'
],
"Stream some wal after promoting, resuming from the slot's position");
ok(-e "$timeline_dir/$walfile_before_promotion",
diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
index 6947d12ca8..62dca5b67a 100644
--- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
+++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
@@ -34,16 +34,16 @@ $node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ],
'pg_recvlogical needs an action');
$node->command_fails(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--start'
],
'no destination file');
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--create-slot'
],
'slot created');
@@ -67,8 +67,8 @@ $node->command_ok(
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--drop-slot'
],
'slot dropped');
@@ -76,8 +76,8 @@ $node->command_ok(
#test with two-phase option enabled
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--create-slot',
'--two-phase'
],
@@ -94,12 +94,12 @@ chomp($nextlsn);
$node->command_fails(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--start',
- '--endpos', "$nextlsn",
- '--two-phase', '--no-loop',
- '-f', '-'
+ '--endpos', "$nextlsn",
+ '--two-phase', '--no-loop',
+ '-f', '-'
],
'incorrect usage');
diff --git a/src/bin/pg_basebackup/walmethods.c b/src/bin/pg_basebackup/walmethods.c
index 1934b7dd46..376ddf72b7 100644
--- a/src/bin/pg_basebackup/walmethods.c
+++ b/src/bin/pg_basebackup/walmethods.c
@@ -44,14 +44,14 @@ static Walfile *dir_open_for_write(WalWriteMethod *wwmethod,
const char *pathname,
const char *temp_suffix,
size_t pad_to_size);
-static int dir_close(Walfile *f, WalCloseMethod method);
+static int dir_close(Walfile *f, WalCloseMethod method);
static bool dir_existsfile(WalWriteMethod *wwmethod, const char *pathname);
static ssize_t dir_get_file_size(WalWriteMethod *wwmethod,
const char *pathname);
static char *dir_get_file_name(WalWriteMethod *wwmethod,
const char *pathname, const char *temp_suffix);
static ssize_t dir_write(Walfile *f, const void *buf, size_t count);
-static int dir_sync(Walfile *f);
+static int dir_sync(Walfile *f);
static bool dir_finish(WalWriteMethod *wwmethod);
static void dir_free(WalWriteMethod *wwmethod);
@@ -72,7 +72,7 @@ const WalWriteMethodOps WalDirectoryMethodOps = {
*/
typedef struct DirectoryMethodData
{
- WalWriteMethod base;
+ WalWriteMethod base;
char *basedir;
} DirectoryMethodData;
@@ -660,14 +660,14 @@ static Walfile *tar_open_for_write(WalWriteMethod *wwmethod,
const char *pathname,
const char *temp_suffix,
size_t pad_to_size);
-static int tar_close(Walfile *f, WalCloseMethod method);
+static int tar_close(Walfile *f, WalCloseMethod method);
static bool tar_existsfile(WalWriteMethod *wwmethod, const char *pathname);
static ssize_t tar_get_file_size(WalWriteMethod *wwmethod,
const char *pathname);
static char *tar_get_file_name(WalWriteMethod *wwmethod,
const char *pathname, const char *temp_suffix);
static ssize_t tar_write(Walfile *f, const void *buf, size_t count);
-static int tar_sync(Walfile *f);
+static int tar_sync(Walfile *f);
static bool tar_finish(WalWriteMethod *wwmethod);
static void tar_free(WalWriteMethod *wwmethod);
@@ -693,7 +693,7 @@ typedef struct TarMethodFile
typedef struct TarMethodData
{
- WalWriteMethod base;
+ WalWriteMethod base;
char *tarfilename;
int fd;
TarMethodFile *currentfile;
@@ -1353,7 +1353,7 @@ CreateWalTarMethod(const char *tarbase,
{
TarMethodData *wwmethod;
const char *suffix = (compression_algorithm == PG_COMPRESSION_GZIP) ?
- ".tar.gz" : ".tar";
+ ".tar.gz" : ".tar";
wwmethod = pg_malloc0(sizeof(TarMethodData));
*((const WalWriteMethodOps **) &wwmethod->base.ops) =
diff --git a/src/bin/pg_basebackup/walmethods.h b/src/bin/pg_basebackup/walmethods.h
index d7284c08ce..54a22fe607 100644
--- a/src/bin/pg_basebackup/walmethods.h
+++ b/src/bin/pg_basebackup/walmethods.h
@@ -19,11 +19,12 @@ typedef struct
WalWriteMethod *wwmethod;
off_t currpos;
char *pathname;
+
/*
* MORE DATA FOLLOWS AT END OF STRUCT
*
- * Each WalWriteMethod is expected to embed this as the first member of
- * a larger struct with method-specific fields following.
+ * Each WalWriteMethod is expected to embed this as the first member of a
+ * larger struct with method-specific fields following.
*/
} Walfile;
@@ -45,7 +46,7 @@ typedef struct WalWriteMethodOps
* automatically renamed in close(). If pad_to_size is specified, the file
* will be padded with NUL up to that size, if supported by the Walmethod.
*/
- Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size);
+ Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size);
/*
* Close an open Walfile, using one or more methods for handling automatic
@@ -107,11 +108,12 @@ struct WalWriteMethod
bool sync;
const char *lasterrstring; /* if set, takes precedence over lasterrno */
int lasterrno;
+
/*
* MORE DATA FOLLOWS AT END OF STRUCT
*
- * Each WalWriteMethod is expected to embed this as the first member of
- * a larger struct with method-specific fields following.
+ * Each WalWriteMethod is expected to embed this as the first member of a
+ * larger struct with method-specific fields following.
*/
};
diff --git a/src/bin/pg_checksums/t/002_actions.pl b/src/bin/pg_checksums/t/002_actions.pl
index 2316f611b2..2d63182d59 100644
--- a/src/bin/pg_checksums/t/002_actions.pl
+++ b/src/bin/pg_checksums/t/002_actions.pl
@@ -18,10 +18,10 @@ use Test::More;
# at the end.
sub check_relation_corruption
{
- my $node = shift;
- my $table = shift;
+ my $node = shift;
+ my $table = shift;
my $tablespace = shift;
- my $pgdata = $node->data_dir;
+ my $pgdata = $node->data_dir;
# Create table and discover its filesystem location.
$node->safe_psql(
@@ -44,8 +44,8 @@ sub check_relation_corruption
command_ok(
[
'pg_checksums', '--check',
- '-D', $pgdata,
- '--filenode', $relfilenode_corrupted
+ '-D', $pgdata,
+ '--filenode', $relfilenode_corrupted
],
"succeeds for single relfilenode on tablespace $tablespace with offline cluster"
);
@@ -57,8 +57,8 @@ sub check_relation_corruption
$node->command_checks_all(
[
'pg_checksums', '--check',
- '-D', $pgdata,
- '--filenode', $relfilenode_corrupted
+ '-D', $pgdata,
+ '--filenode', $relfilenode_corrupted
],
1,
[qr/Bad checksums:.*1/],
@@ -97,21 +97,21 @@ command_like(
'checksums disabled in control file');
# These are correct but empty files, so they should pass through.
-append_to_file "$pgdata/global/99999", "";
-append_to_file "$pgdata/global/99999.123", "";
-append_to_file "$pgdata/global/99999_fsm", "";
-append_to_file "$pgdata/global/99999_init", "";
-append_to_file "$pgdata/global/99999_vm", "";
+append_to_file "$pgdata/global/99999", "";
+append_to_file "$pgdata/global/99999.123", "";
+append_to_file "$pgdata/global/99999_fsm", "";
+append_to_file "$pgdata/global/99999_init", "";
+append_to_file "$pgdata/global/99999_vm", "";
append_to_file "$pgdata/global/99999_init.123", "";
-append_to_file "$pgdata/global/99999_fsm.123", "";
-append_to_file "$pgdata/global/99999_vm.123", "";
+append_to_file "$pgdata/global/99999_fsm.123", "";
+append_to_file "$pgdata/global/99999_vm.123", "";
# These are temporary files and folders with dummy contents, which
# should be ignored by the scan.
append_to_file "$pgdata/global/pgsql_tmp_123", "foo";
mkdir "$pgdata/global/pgsql_tmp";
-append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo";
-append_to_file "$pgdata/global/pg_internal.init", "foo";
+append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo";
+append_to_file "$pgdata/global/pg_internal.init", "foo";
append_to_file "$pgdata/global/pg_internal.init.123", "foo";
# Enable checksums.
@@ -197,7 +197,7 @@ command_fails([ 'pg_checksums', '--check', '-D', $pgdata ],
check_relation_corruption($node, 'corrupt1', 'pg_default');
# Create tablespace to check corruptions in a non-default tablespace.
-my $basedir = $node->basedir;
+my $basedir = $node->basedir;
my $tablespace_dir = "$basedir/ts_corrupt_dir";
mkdir($tablespace_dir);
$node->safe_psql('postgres',
@@ -208,8 +208,8 @@ check_relation_corruption($node, 'corrupt2', 'ts_corrupt');
# correctly-named relation files filled with some corrupted data.
sub fail_corrupt
{
- my $node = shift;
- my $file = shift;
+ my $node = shift;
+ my $file = shift;
my $pgdata = $node->data_dir;
# Create the file with some dummy data in it.
diff --git a/src/bin/pg_controldata/t/001_pg_controldata.pl b/src/bin/pg_controldata/t/001_pg_controldata.pl
index a502bce3c9..0c641036e9 100644
--- a/src/bin/pg_controldata/t/001_pg_controldata.pl
+++ b/src/bin/pg_controldata/t/001_pg_controldata.pl
@@ -24,7 +24,7 @@ command_like([ 'pg_controldata', $node->data_dir ],
# check with a corrupted pg_control
my $pg_control = $node->data_dir . '/global/pg_control';
-my $size = (stat($pg_control))[7];
+my $size = (stat($pg_control))[7];
open my $fh, '>', $pg_control or BAIL_OUT($!);
binmode $fh;
diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl
index 11bc805354..f019fe1703 100644
--- a/src/bin/pg_ctl/t/001_start_stop.pl
+++ b/src/bin/pg_ctl/t/001_start_stop.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
my $tempdir_short = PostgreSQL::Test::Utils::tempdir_short;
program_help_ok('pg_ctl');
diff --git a/src/bin/pg_ctl/t/004_logrotate.pl b/src/bin/pg_ctl/t/004_logrotate.pl
index 10815a60d4..8d48e56ee9 100644
--- a/src/bin/pg_ctl/t/004_logrotate.pl
+++ b/src/bin/pg_ctl/t/004_logrotate.pl
@@ -14,8 +14,8 @@ use Time::HiRes qw(usleep);
sub fetch_file_name
{
my $logfiles = shift;
- my $format = shift;
- my @lines = split(/\n/, $logfiles);
+ my $format = shift;
+ my @lines = split(/\n/, $logfiles);
my $filename = undef;
foreach my $line (@lines)
{
@@ -33,11 +33,11 @@ sub check_log_pattern
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
- my $format = shift;
+ my $format = shift;
my $logfiles = shift;
- my $pattern = shift;
- my $node = shift;
- my $lfname = fetch_file_name($logfiles, $format);
+ my $pattern = shift;
+ my $node = shift;
+ my $lfname = fetch_file_name($logfiles, $format);
my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default;
@@ -100,8 +100,8 @@ csvlog log/postgresql-.*csv
jsonlog log/postgresql-.*json$|,
'current_logfiles is sane');
-check_log_pattern('stderr', $current_logfiles, 'division by zero', $node);
-check_log_pattern('csvlog', $current_logfiles, 'division by zero', $node);
+check_log_pattern('stderr', $current_logfiles, 'division by zero', $node);
+check_log_pattern('csvlog', $current_logfiles, 'division by zero', $node);
check_log_pattern('jsonlog', $current_logfiles, 'division by zero', $node);
# Sleep 2 seconds and ask for log rotation; this should result in
@@ -131,8 +131,8 @@ jsonlog log/postgresql-.*json$|,
# Verify that log output gets to this file, too
$node->psql('postgres', 'fee fi fo fum');
-check_log_pattern('stderr', $new_current_logfiles, 'syntax error', $node);
-check_log_pattern('csvlog', $new_current_logfiles, 'syntax error', $node);
+check_log_pattern('stderr', $new_current_logfiles, 'syntax error', $node);
+check_log_pattern('csvlog', $new_current_logfiles, 'syntax error', $node);
check_log_pattern('jsonlog', $new_current_logfiles, 'syntax error', $node);
$node->stop();
diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c
index f97fb1aaff..4fee6e2434 100644
--- a/src/bin/pg_dump/compress_io.c
+++ b/src/bin/pg_dump/compress_io.c
@@ -87,8 +87,8 @@
char *
supports_compression(const pg_compress_specification compression_spec)
{
- const pg_compress_algorithm algorithm = compression_spec.algorithm;
- bool supported = false;
+ const pg_compress_algorithm algorithm = compression_spec.algorithm;
+ bool supported = false;
if (algorithm == PG_COMPRESSION_NONE)
supported = true;
diff --git a/src/bin/pg_dump/compress_lz4.c b/src/bin/pg_dump/compress_lz4.c
index 8d7b28e510..52214b31ee 100644
--- a/src/bin/pg_dump/compress_lz4.c
+++ b/src/bin/pg_dump/compress_lz4.c
@@ -44,8 +44,8 @@ typedef struct LZ4State
LZ4F_preferences_t prefs;
- LZ4F_compressionContext_t ctx;
- LZ4F_decompressionContext_t dtx;
+ LZ4F_compressionContext_t ctx;
+ LZ4F_decompressionContext_t dtx;
/*
* Used by the Stream API's lazy initialization.
@@ -148,8 +148,8 @@ ReadDataFromArchiveLZ4(ArchiveHandle *AH, CompressorState *cs)
char *outbuf;
char *readbuf;
LZ4F_decompressionContext_t ctx = NULL;
- LZ4F_decompressOptions_t dec_opt;
- LZ4F_errorCode_t status;
+ LZ4F_decompressOptions_t dec_opt;
+ LZ4F_errorCode_t status;
memset(&dec_opt, 0, sizeof(dec_opt));
status = LZ4F_createDecompressionContext(&ctx, LZ4F_VERSION);
@@ -651,8 +651,8 @@ LZ4Stream_gets(char *ptr, int size, CompressFileHandle *CFH)
return NULL;
/*
- * Our caller expects the return string to be NULL terminated
- * and we know that ret is greater than zero.
+ * Our caller expects the return string to be NULL terminated and we know
+ * that ret is greater than zero.
*/
ptr[ret - 1] = '\0';
diff --git a/src/bin/pg_dump/compress_zstd.c b/src/bin/pg_dump/compress_zstd.c
index 9fbdc0a87d..82e3310100 100644
--- a/src/bin/pg_dump/compress_zstd.c
+++ b/src/bin/pg_dump/compress_zstd.c
@@ -82,8 +82,8 @@ _ZstdCStreamParams(pg_compress_specification compress)
if (compress.options & PG_COMPRESSION_OPTION_LONG_DISTANCE)
_Zstd_CCtx_setParam_or_die(cstream,
- ZSTD_c_enableLongDistanceMatching,
- compress.long_distance, "long");
+ ZSTD_c_enableLongDistanceMatching,
+ compress.long_distance, "long");
return cstream;
}
diff --git a/src/bin/pg_dump/compress_zstd.h b/src/bin/pg_dump/compress_zstd.h
index 2aaa6b100b..d0ab1351fd 100644
--- a/src/bin/pg_dump/compress_zstd.h
+++ b/src/bin/pg_dump/compress_zstd.h
@@ -18,8 +18,8 @@
#include "compress_io.h"
extern void InitCompressorZstd(CompressorState *cs,
- const pg_compress_specification compression_spec);
+ const pg_compress_specification compression_spec);
extern void InitCompressFileHandleZstd(CompressFileHandle *CFH,
- const pg_compress_specification compression_spec);
+ const pg_compress_specification compression_spec);
-#endif /* COMPRESS_ZSTD_H */
+#endif /* COMPRESS_ZSTD_H */
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index d518349e10..39ebcfec32 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -386,10 +386,11 @@ RestoreArchive(Archive *AHX)
{
if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
{
- char *errmsg = supports_compression(AH->compression_spec);
+ char *errmsg = supports_compression(AH->compression_spec);
+
if (errmsg)
pg_fatal("cannot restore from compressed archive (%s)",
- errmsg);
+ errmsg);
else
break;
}
@@ -2985,11 +2986,11 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
if (!te->hadDumper)
{
/*
- * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then
- * it is considered a data entry. We don't need to check for the
- * BLOBS entry or old-style BLOB COMMENTS, because they will have
- * hadDumper = true ... but we do need to check new-style BLOB ACLs,
- * comments, etc.
+ * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it
+ * is considered a data entry. We don't need to check for the BLOBS
+ * entry or old-style BLOB COMMENTS, because they will have hadDumper
+ * = true ... but we do need to check new-style BLOB ACLs, comments,
+ * etc.
*/
if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
strcmp(te->desc, "BLOB") == 0 ||
@@ -3480,6 +3481,7 @@ _getObjectDescription(PQExpBuffer buf, const TocEntry *te)
{
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
}
+
/*
* These object types require additional decoration. Fortunately, the
* information needed is exactly what's in the DROP command.
@@ -3639,6 +3641,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
initPQExpBuffer(&temp);
_getObjectDescription(&temp, te);
+
/*
* If _getObjectDescription() didn't fill the buffer, then there is no
* owner.
@@ -3802,7 +3805,7 @@ ReadHead(ArchiveHandle *AH)
if (errmsg)
{
pg_log_warning("archive is compressed, but this installation does not support compression (%s) -- no data will be available",
- errmsg);
+ errmsg);
pg_free(errmsg);
}
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index babd23b4eb..db5fb43bae 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -684,10 +684,10 @@ _LoadLOs(ArchiveHandle *AH)
tarClose(AH, th);
/*
- * Once we have found the first LO, stop at the first non-LO
- * entry (which will be 'blobs.toc'). This coding would eat all
- * the rest of the archive if there are no LOs ... but this
- * function shouldn't be called at all in that case.
+ * Once we have found the first LO, stop at the first non-LO entry
+ * (which will be 'blobs.toc'). This coding would eat all the
+ * rest of the archive if there are no LOs ... but this function
+ * shouldn't be called at all in that case.
*/
if (foundLO)
break;
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index f325045f9f..3af97a6039 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -756,9 +756,9 @@ main(int argc, char **argv)
pg_fatal("%s", error_detail);
/*
- * Disable support for zstd workers for now - these are based on threading,
- * and it's unclear how it interacts with parallel dumps on platforms where
- * that relies on threads too (e.g. Windows).
+ * Disable support for zstd workers for now - these are based on
+ * threading, and it's unclear how it interacts with parallel dumps on
+ * platforms where that relies on threads too (e.g. Windows).
*/
if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
@@ -879,8 +879,8 @@ main(int argc, char **argv)
/*
* Dumping LOs is the default for dumps where an inclusion switch is not
* used (an "include everything" dump). -B can be used to exclude LOs
- * from those dumps. -b can be used to include LOs even when an
- * inclusion switch is used.
+ * from those dumps. -b can be used to include LOs even when an inclusion
+ * switch is used.
*
* -s means "schema only" and LOs are data, not schema, so we never
* include LOs when -s is used.
@@ -915,8 +915,8 @@ main(int argc, char **argv)
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
*
- * However, we do need to collect LO information as there may be
- * comments or other information on LOs that we do need to dump out.
+ * However, we do need to collect LO information as there may be comments
+ * or other information on LOs that we do need to dump out.
*/
if (dopt.outputLOs || dopt.binary_upgrade)
getLOs(fout);
@@ -3323,8 +3323,8 @@ dumpDatabase(Archive *fout)
appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
for (int i = 0; i < PQntuples(lo_res); ++i)
{
- Oid oid;
- RelFileNumber relfilenumber;
+ Oid oid;
+ RelFileNumber relfilenumber;
appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n"
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
@@ -3590,8 +3590,8 @@ getLOs(Archive *fout)
loinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
/*
- * In binary-upgrade mode for LOs, we do *not* dump out the LO
- * data, as it will be copied by pg_upgrade, which simply copies the
+ * In binary-upgrade mode for LOs, we do *not* dump out the LO data,
+ * as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
* pg_largeobject_metadata, after the dump is restored.
@@ -14828,7 +14828,10 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
if (dopt->no_security_labels)
return;
- /* Security labels are schema not data ... except large object labels are data */
+ /*
+ * Security labels are schema not data ... except large object labels are
+ * data
+ */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
@@ -15161,7 +15164,7 @@ dumpTable(Archive *fout, const TableInfo *tbinfo)
if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL)
{
const char *objtype =
- (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
+ (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
tableAclDumpId =
dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId,
@@ -16632,10 +16635,12 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
{
appendPQExpBufferStr(q,
coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
+
/*
* PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
* indexes. Being able to create this was fixed, but we need to
- * make the index distinct in order to be able to restore the dump.
+ * make the index distinct in order to be able to restore the
+ * dump.
*/
if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
appendPQExpBufferStr(q, " NULLS NOT DISTINCT");
@@ -17857,7 +17862,7 @@ processExtensionTables(Archive *fout, ExtensionInfo extinfo[],
TableInfo *configtbl;
Oid configtbloid = atooid(extconfigarray[j]);
bool dumpobj =
- curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
+ curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
configtbl = findTableByOid(configtbloid);
if (configtbl == NULL)
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index c5647d059b..3627b69e2a 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -949,7 +949,7 @@ static void
dumpRoleMembership(PGconn *conn)
{
PQExpBuffer buf = createPQExpBuffer();
- PQExpBuffer optbuf = createPQExpBuffer();
+ PQExpBuffer optbuf = createPQExpBuffer();
PGresult *res;
int start = 0,
end,
@@ -996,8 +996,8 @@ dumpRoleMembership(PGconn *conn)
/*
* We can't dump these GRANT commands in arbitrary order, because a role
- * that is named as a grantor must already have ADMIN OPTION on the
- * role for which it is granting permissions, except for the bootstrap
+ * that is named as a grantor must already have ADMIN OPTION on the role
+ * for which it is granting permissions, except for the bootstrap
* superuser, who can always be named as the grantor.
*
* We handle this by considering these grants role by role. For each role,
@@ -1005,8 +1005,8 @@ dumpRoleMembership(PGconn *conn)
* superuser. Every time we grant ADMIN OPTION on the role to some user,
* that user also becomes an allowable grantor. We make repeated passes
* over the grants for the role, each time dumping those whose grantors
- * are allowable and which we haven't done yet. Eventually this should
- * let us dump all the grants.
+ * are allowable and which we haven't done yet. Eventually this should let
+ * us dump all the grants.
*/
total = PQntuples(res);
while (start < total)
@@ -1021,7 +1021,7 @@ dumpRoleMembership(PGconn *conn)
/* All memberships for a single role should be adjacent. */
for (end = start; end < total; ++end)
{
- char *otherrole;
+ char *otherrole;
otherrole = PQgetvalue(res, end, 0);
if (strcmp(role, otherrole) != 0)
@@ -1105,7 +1105,7 @@ dumpRoleMembership(PGconn *conn)
appendPQExpBufferStr(optbuf, "ADMIN OPTION");
if (dump_grant_options)
{
- char *inherit_option;
+ char *inherit_option;
if (optbuf->data[0] != '\0')
appendPQExpBufferStr(optbuf, ", ");
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index d66f3b42ea..387c5d3afb 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -53,10 +53,10 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir;
# database and then pg_dump *that* database (or something along
# those lines) to validate that part of the process.
-my $supports_icu = ($ENV{with_icu} eq 'yes');
+my $supports_icu = ($ENV{with_icu} eq 'yes');
my $supports_gzip = check_pg_config("#define HAVE_LIBZ 1");
-my $supports_lz4 = check_pg_config("#define USE_LZ4 1");
-my $supports_zstd = check_pg_config("#define USE_ZSTD 1");
+my $supports_lz4 = check_pg_config("#define USE_LZ4 1");
+my $supports_zstd = check_pg_config("#define USE_ZSTD 1");
my %pgdump_runs = (
binary_upgrade => {
@@ -79,10 +79,10 @@ my %pgdump_runs = (
# Do not use --no-sync to give test coverage for data sync.
compression_gzip_custom => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'gzip',
- dump_cmd => [
- 'pg_dump', '--format=custom',
+ dump_cmd => [
+ 'pg_dump', '--format=custom',
'--compress=1', "--file=$tempdir/compression_gzip_custom.dump",
'postgres',
],
@@ -96,24 +96,24 @@ my %pgdump_runs = (
'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump",
],
expected => qr/Compression: gzip/,
- name => 'data content is gzip-compressed'
+ name => 'data content is gzip-compressed'
},
},
# Do not use --no-sync to give test coverage for data sync.
compression_gzip_dir => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'gzip',
- dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=gzip:1',
+ dump_cmd => [
+ 'pg_dump', '--jobs=2',
+ '--format=directory', '--compress=gzip:1',
"--file=$tempdir/compression_gzip_dir", 'postgres',
],
# Give coverage for manually compressed blob.toc files during
# restore.
compress_cmd => {
program => $ENV{'GZIP_PROGRAM'},
- args => [ '-f', "$tempdir/compression_gzip_dir/blobs.toc", ],
+ args => [ '-f', "$tempdir/compression_gzip_dir/blobs.toc", ],
},
# Verify that only data files were compressed
glob_patterns => [
@@ -128,25 +128,25 @@ my %pgdump_runs = (
},
compression_gzip_plain => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'gzip',
- dump_cmd => [
+ dump_cmd => [
'pg_dump', '--format=plain', '-Z1',
"--file=$tempdir/compression_gzip_plain.sql.gz", 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
program => $ENV{'GZIP_PROGRAM'},
- args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ],
+ args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ],
},
},
# Do not use --no-sync to give test coverage for data sync.
compression_lz4_custom => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'lz4',
- dump_cmd => [
- 'pg_dump', '--format=custom',
+ dump_cmd => [
+ 'pg_dump', '--format=custom',
'--compress=lz4', "--file=$tempdir/compression_lz4_custom.dump",
'postgres',
],
@@ -156,10 +156,8 @@ my %pgdump_runs = (
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
- command => [
- 'pg_restore',
- '-l', "$tempdir/compression_lz4_custom.dump",
- ],
+ command =>
+ [ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ],
expected => qr/Compression: lz4/,
name => 'data content is lz4 compressed'
},
@@ -167,18 +165,18 @@ my %pgdump_runs = (
# Do not use --no-sync to give test coverage for data sync.
compression_lz4_dir => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'lz4',
- dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=lz4:1',
+ dump_cmd => [
+ 'pg_dump', '--jobs=2',
+ '--format=directory', '--compress=lz4:1',
"--file=$tempdir/compression_lz4_dir", 'postgres',
],
# Give coverage for manually compressed blob.toc files during
# restore.
compress_cmd => {
program => $ENV{'LZ4'},
- args => [
+ args => [
'-z', '-f', '--rm',
"$tempdir/compression_lz4_dir/blobs.toc",
"$tempdir/compression_lz4_dir/blobs.toc.lz4",
@@ -187,7 +185,7 @@ my %pgdump_runs = (
# Verify that data files were compressed
glob_patterns => [
"$tempdir/compression_lz4_dir/toc.dat",
- "$tempdir/compression_lz4_dir/*.dat.lz4",
+ "$tempdir/compression_lz4_dir/*.dat.lz4",
],
restore_cmd => [
'pg_restore', '--jobs=2',
@@ -197,16 +195,16 @@ my %pgdump_runs = (
},
compression_lz4_plain => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'lz4',
- dump_cmd => [
+ dump_cmd => [
'pg_dump', '--format=plain', '--compress=lz4',
"--file=$tempdir/compression_lz4_plain.sql.lz4", 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
program => $ENV{'LZ4'},
- args => [
+ args => [
'-d', '-f',
"$tempdir/compression_lz4_plain.sql.lz4",
"$tempdir/compression_lz4_plain.sql",
@@ -215,10 +213,10 @@ my %pgdump_runs = (
},
compression_zstd_custom => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'zstd',
- dump_cmd => [
- 'pg_dump', '--format=custom',
+ dump_cmd => [
+ 'pg_dump', '--format=custom',
'--compress=zstd', "--file=$tempdir/compression_zstd_custom.dump",
'postgres',
],
@@ -229,8 +227,7 @@ my %pgdump_runs = (
],
command_like => {
command => [
- 'pg_restore',
- '-l', "$tempdir/compression_zstd_custom.dump",
+ 'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump",
],
expected => qr/Compression: zstd/,
name => 'data content is zstd compressed'
@@ -238,27 +235,27 @@ my %pgdump_runs = (
},
compression_zstd_dir => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'zstd',
- dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=zstd:1',
+ dump_cmd => [
+ 'pg_dump', '--jobs=2',
+ '--format=directory', '--compress=zstd:1',
"--file=$tempdir/compression_zstd_dir", 'postgres',
],
# Give coverage for manually compressed blob.toc files during
# restore.
compress_cmd => {
program => $ENV{'ZSTD'},
- args => [
- '-z', '-f', '--rm',
- "$tempdir/compression_zstd_dir/blobs.toc",
+ args => [
+ '-z', '-f',
+ '--rm', "$tempdir/compression_zstd_dir/blobs.toc",
"-o", "$tempdir/compression_zstd_dir/blobs.toc.zst",
],
},
# Verify that data files were compressed
glob_patterns => [
- "$tempdir/compression_zstd_dir/toc.dat",
- "$tempdir/compression_zstd_dir/*.dat.zst",
+ "$tempdir/compression_zstd_dir/toc.dat",
+ "$tempdir/compression_zstd_dir/*.dat.zst",
],
restore_cmd => [
'pg_restore', '--jobs=2',
@@ -269,19 +266,19 @@ my %pgdump_runs = (
# Exercise long mode for test coverage
compression_zstd_plain => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'zstd',
- dump_cmd => [
+ dump_cmd => [
'pg_dump', '--format=plain', '--compress=zstd:long',
"--file=$tempdir/compression_zstd_plain.sql.zst", 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
program => $ENV{'ZSTD'},
- args => [
+ args => [
'-d', '-f',
- "$tempdir/compression_zstd_plain.sql.zst",
- "-o", "$tempdir/compression_zstd_plain.sql",
+ "$tempdir/compression_zstd_plain.sql.zst", "-o",
+ "$tempdir/compression_zstd_plain.sql",
],
},
},
@@ -308,9 +305,9 @@ my %pgdump_runs = (
},
column_inserts => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/column_inserts.sql", '-a',
- '--column-inserts', 'postgres',
+ '--column-inserts', 'postgres',
],
},
createdb => {
@@ -339,7 +336,7 @@ my %pgdump_runs = (
defaults => {
dump_cmd => [
'pg_dump', '--no-sync',
- '-f', "$tempdir/defaults.sql",
+ '-f', "$tempdir/defaults.sql",
'postgres',
],
},
@@ -385,9 +382,9 @@ my %pgdump_runs = (
command_like => {
command =>
[ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ],
- expected => $supports_gzip ?
- qr/Compression: gzip/ :
- qr/Compression: none/,
+ expected => $supports_gzip
+ ? qr/Compression: gzip/
+ : qr/Compression: none/,
name => 'data content is gzip-compressed by default if available',
},
},
@@ -399,7 +396,7 @@ my %pgdump_runs = (
defaults_dir_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Fd',
+ 'pg_dump', '-Fd',
"--file=$tempdir/defaults_dir_format", 'postgres',
],
restore_cmd => [
@@ -410,17 +407,15 @@ my %pgdump_runs = (
command_like => {
command =>
[ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ],
- expected => $supports_gzip ?
- qr/Compression: gzip/ :
- qr/Compression: none/,
+ expected => $supports_gzip ? qr/Compression: gzip/
+ : qr/Compression: none/,
name => 'data content is gzip-compressed by default',
},
glob_patterns => [
"$tempdir/defaults_dir_format/toc.dat",
"$tempdir/defaults_dir_format/blobs.toc",
- $supports_gzip ?
- "$tempdir/defaults_dir_format/*.dat.gz" :
- "$tempdir/defaults_dir_format/*.dat",
+ $supports_gzip ? "$tempdir/defaults_dir_format/*.dat.gz"
+ : "$tempdir/defaults_dir_format/*.dat",
],
},
@@ -442,7 +437,7 @@ my %pgdump_runs = (
defaults_tar_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Ft',
+ 'pg_dump', '-Ft',
"--file=$tempdir/defaults_tar_format.tar", 'postgres',
],
restore_cmd => [
@@ -468,7 +463,8 @@ my %pgdump_runs = (
},
exclude_measurement => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump',
+ '--no-sync',
"--file=$tempdir/exclude_measurement.sql",
'--exclude-table-and-children=dump_test.measurement',
'postgres',
@@ -496,9 +492,9 @@ my %pgdump_runs = (
},
inserts => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/inserts.sql", '-a',
- '--inserts', 'postgres',
+ '--inserts', 'postgres',
],
},
pg_dumpall_globals => {
@@ -534,21 +530,20 @@ my %pgdump_runs = (
},
no_large_objects => {
dump_cmd => [
- 'pg_dump', '--no-sync',
- "--file=$tempdir/no_large_objects.sql", '-B',
- 'postgres',
+ 'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql",
+ '-B', 'postgres',
],
},
no_privs => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/no_privs.sql", '-x',
'postgres',
],
},
no_owner => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/no_owner.sql", '-O',
'postgres',
],
@@ -630,21 +625,21 @@ my %pgdump_runs = (
},
schema_only => {
dump_cmd => [
- 'pg_dump', '--format=plain',
+ 'pg_dump', '--format=plain',
"--file=$tempdir/schema_only.sql", '--no-sync',
- '-s', 'postgres',
+ '-s', 'postgres',
],
},
section_pre_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_pre_data.sql",
+ 'pg_dump', "--file=$tempdir/section_pre_data.sql",
'--section=pre-data', '--no-sync',
'postgres',
],
},
section_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_data.sql",
+ 'pg_dump', "--file=$tempdir/section_data.sql",
'--section=data', '--no-sync',
'postgres',
],
@@ -705,38 +700,38 @@ my %pgdump_runs = (
# Tests which target the 'dump_test' schema, specifically.
my %dump_test_schema_runs = (
- only_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_test_schema => 1,
+ only_dump_measurement => 1,
test_schema_plus_large_objects => 1,);
# Tests which are considered 'full' dumps by pg_dump, but there
# are flags used to exclude specific items (ACLs, LOs, etc).
my %full_runs = (
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- compression => 1,
- createdb => 1,
- defaults => 1,
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ compression => 1,
+ createdb => 1,
+ defaults => 1,
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- exclude_measurement => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ exclude_measurement => 1,
exclude_measurement_data => 1,
- no_toast_compression => 1,
- no_large_objects => 1,
- no_owner => 1,
- no_privs => 1,
- no_table_access_method => 1,
- pg_dumpall_dbprivs => 1,
- pg_dumpall_exclude => 1,
- schema_only => 1,);
+ no_toast_compression => 1,
+ no_large_objects => 1,
+ no_owner => 1,
+ no_privs => 1,
+ no_table_access_method => 1,
+ pg_dumpall_dbprivs => 1,
+ pg_dumpall_exclude => 1,
+ schema_only => 1,);
# This is where the actual tests are defined.
my %tests = (
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT' => {
create_order => 14,
- create_sql => 'ALTER DEFAULT PRIVILEGES
+ create_sql => 'ALTER DEFAULT PRIVILEGES
FOR ROLE regress_dump_test_role IN SCHEMA dump_test
GRANT SELECT ON TABLES TO regress_dump_test_role;',
regexp => qr/^
@@ -748,15 +743,15 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT EXECUTE ON FUNCTIONS'
=> {
create_order => 15,
- create_sql => 'ALTER DEFAULT PRIVILEGES
+ create_sql => 'ALTER DEFAULT PRIVILEGES
FOR ROLE regress_dump_test_role IN SCHEMA dump_test
GRANT EXECUTE ON FUNCTIONS TO regress_dump_test_role;',
regexp => qr/^
@@ -768,14 +763,14 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE' => {
create_order => 55,
- create_sql => 'ALTER DEFAULT PRIVILEGES
+ create_sql => 'ALTER DEFAULT PRIVILEGES
FOR ROLE regress_dump_test_role
REVOKE EXECUTE ON FUNCTIONS FROM PUBLIC;',
regexp => qr/^
@@ -790,7 +785,7 @@ my %tests = (
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE SELECT'
=> {
create_order => 56,
- create_sql => 'ALTER DEFAULT PRIVILEGES
+ create_sql => 'ALTER DEFAULT PRIVILEGES
FOR ROLE regress_dump_test_role
REVOKE SELECT ON TABLES FROM regress_dump_test_role;',
regexp => qr/^
@@ -812,29 +807,29 @@ my %tests = (
\QNOREPLICATION NOBYPASSRLS;\E
/xm,
like => {
- pg_dumpall_dbprivs => 1,
- pg_dumpall_globals => 1,
+ pg_dumpall_dbprivs => 1,
+ pg_dumpall_globals => 1,
pg_dumpall_globals_clean => 1,
- pg_dumpall_exclude => 1,
+ pg_dumpall_exclude => 1,
},
},
'ALTER COLLATION test0 OWNER TO' => {
- regexp => qr/^\QALTER COLLATION public.test0 OWNER TO \E.+;/m,
+ regexp => qr/^\QALTER COLLATION public.test0 OWNER TO \E.+;/m,
collation => 1,
- like => { %full_runs, section_pre_data => 1, },
- unlike => { %dump_test_schema_runs, no_owner => 1, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { %dump_test_schema_runs, no_owner => 1, },
},
'ALTER FOREIGN DATA WRAPPER dummy OWNER TO' => {
regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .+;/m,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
unlike => { no_owner => 1, },
},
'ALTER SERVER s1 OWNER TO' => {
regexp => qr/^ALTER SERVER s1 OWNER TO .+;/m,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
unlike => { no_owner => 1, },
},
@@ -847,8 +842,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
@@ -861,8 +856,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
@@ -897,7 +892,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -910,37 +905,37 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
'ALTER PUBLICATION pub1 OWNER TO' => {
regexp => qr/^ALTER PUBLICATION pub1 OWNER TO .+;/m,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => { no_owner => 1, },
},
'ALTER LARGE OBJECT ... OWNER TO' => {
regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .+;/m,
- like => {
+ like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_pre_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_pre_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
no_large_objects => 1,
- no_owner => 1,
+ no_owner => 1,
schema_only => 1,
},
},
'ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO' => {
regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .+;/m,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
unlike => { no_owner => 1, },
},
@@ -950,16 +945,16 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
'ALTER SCHEMA dump_test_second_schema OWNER TO' => {
regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .+;/m,
- like => {
+ like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
},
unlike => { no_owner => 1, },
@@ -970,14 +965,14 @@ my %tests = (
create_sql =>
'ALTER SCHEMA public OWNER TO "regress_quoted \"" role";',
regexp => qr/^ALTER SCHEMA public OWNER TO .+;/m,
- like => {
+ like => {
%full_runs, section_pre_data => 1,
},
unlike => { no_owner => 1, },
},
'ALTER SCHEMA public OWNER TO (w/o ACL changes)' => {
- database => 'regress_public_owner',
+ database => 'regress_public_owner',
create_order => 100,
create_sql =>
'ALTER SCHEMA public OWNER TO "regress_quoted \"" role";',
@@ -993,12 +988,12 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
@@ -1011,18 +1006,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'ALTER TABLE (partitioned) ADD CONSTRAINT ... FOREIGN KEY' => {
create_order => 4,
- create_sql => 'CREATE TABLE dump_test.test_table_fk (
+ create_sql => 'CREATE TABLE dump_test.test_table_fk (
col1 int references dump_test.test_table)
PARTITION BY RANGE (col1);
CREATE TABLE dump_test.test_table_fk_1
@@ -1036,7 +1031,7 @@ my %tests = (
},
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -1051,12 +1046,12 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
@@ -1071,12 +1066,12 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
@@ -1091,12 +1086,12 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
@@ -1111,12 +1106,12 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
@@ -1128,9 +1123,9 @@ my %tests = (
/xm,
like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
- binary_upgrade => 1,
+ binary_upgrade => 1,
only_dump_measurement => 1,
},
unlike => {
@@ -1149,12 +1144,12 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
@@ -1178,29 +1173,29 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'ALTER TABLE test_table OWNER TO' => {
regexp => qr/^\QALTER TABLE dump_test.test_table OWNER TO \E.+;/m,
- like => {
+ like => {
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
- no_owner => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
+ no_owner => 1,
},
},
'ALTER TABLE test_table ENABLE ROW LEVEL SECURITY' => {
create_order => 23,
- create_sql => 'ALTER TABLE dump_test.test_table
+ create_sql => 'ALTER TABLE dump_test.test_table
ENABLE ROW LEVEL SECURITY;',
regexp =>
qr/^\QALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;\E/m,
@@ -1208,12 +1203,12 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
@@ -1224,8 +1219,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
@@ -1239,8 +1234,8 @@ my %tests = (
},
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- exclude_measurement => 1,
+ no_owner => 1,
+ exclude_measurement => 1,
},
},
@@ -1249,7 +1244,7 @@ my %tests = (
qr/^\QALTER TABLE dump_test_second_schema.measurement_y2006m2 OWNER TO \E.+;/m,
like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
only_dump_measurement => 1,
},
@@ -1266,8 +1261,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
@@ -1278,8 +1273,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
@@ -1290,10 +1285,10 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_test_table => 1,
- no_owner => 1,
- role => 1,
- only_dump_measurement => 1,
+ only_dump_test_table => 1,
+ no_owner => 1,
+ role => 1,
+ only_dump_measurement => 1,
},
},
@@ -1302,12 +1297,12 @@ my %tests = (
create_sql =>
'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');',
regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m,
- like => {
+ like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_pre_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_pre_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
@@ -1325,39 +1320,38 @@ my %tests = (
/xm,
like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
binary_upgrade => 1,
no_large_objects => 1,
- schema_only => 1,
+ schema_only => 1,
},
},
'LO create (with no data)' => {
- create_sql =>
- 'SELECT pg_catalog.lo_create(0);',
+ create_sql => 'SELECT pg_catalog.lo_create(0);',
regexp => qr/^
\QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n
\QSELECT pg_catalog.lo_close(0);\E
/xm,
- like => {
+ like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
- binary_upgrade => 1,
- no_large_objects => 1,
- schema_only => 1,
- section_pre_data => 1,
+ binary_upgrade => 1,
+ no_large_objects => 1,
+ schema_only => 1,
+ section_pre_data => 1,
},
},
@@ -1385,16 +1379,16 @@ my %tests = (
},
'COMMENT ON SCHEMA public IS NULL' => {
- database => 'regress_public_owner',
+ database => 'regress_public_owner',
create_order => 100,
- create_sql => 'COMMENT ON SCHEMA public IS NULL;',
- regexp => qr/^COMMENT ON SCHEMA public IS '';/m,
- like => { defaults_public_owner => 1 },
+ create_sql => 'COMMENT ON SCHEMA public IS NULL;',
+ regexp => qr/^COMMENT ON SCHEMA public IS '';/m,
+ like => { defaults_public_owner => 1 },
},
'COMMENT ON TABLE dump_test.test_table' => {
create_order => 36,
- create_sql => 'COMMENT ON TABLE dump_test.test_table
+ create_sql => 'COMMENT ON TABLE dump_test.test_table
IS \'comment on table\';',
regexp =>
qr/^\QCOMMENT ON TABLE dump_test.test_table IS 'comment on table';\E/m,
@@ -1402,18 +1396,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON COLUMN dump_test.test_table.col1' => {
create_order => 36,
- create_sql => 'COMMENT ON COLUMN dump_test.test_table.col1
+ create_sql => 'COMMENT ON COLUMN dump_test.test_table.col1
IS \'comment on column\';',
regexp => qr/^
\QCOMMENT ON COLUMN dump_test.test_table.col1 IS 'comment on column';\E
@@ -1422,18 +1416,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON COLUMN dump_test.composite.f1' => {
create_order => 44,
- create_sql => 'COMMENT ON COLUMN dump_test.composite.f1
+ create_sql => 'COMMENT ON COLUMN dump_test.composite.f1
IS \'comment on column of type\';',
regexp => qr/^
\QCOMMENT ON COLUMN dump_test.composite.f1 IS 'comment on column of type';\E
@@ -1442,13 +1436,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON COLUMN dump_test.test_second_table.col1' => {
create_order => 63,
- create_sql => 'COMMENT ON COLUMN dump_test.test_second_table.col1
+ create_sql => 'COMMENT ON COLUMN dump_test.test_second_table.col1
IS \'comment on column col1\';',
regexp => qr/^
\QCOMMENT ON COLUMN dump_test.test_second_table.col1 IS 'comment on column col1';\E
@@ -1457,13 +1451,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON COLUMN dump_test.test_second_table.col2' => {
create_order => 64,
- create_sql => 'COMMENT ON COLUMN dump_test.test_second_table.col2
+ create_sql => 'COMMENT ON COLUMN dump_test.test_second_table.col2
IS \'comment on column col2\';',
regexp => qr/^
\QCOMMENT ON COLUMN dump_test.test_second_table.col2 IS 'comment on column col2';\E
@@ -1472,13 +1466,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON CONVERSION dump_test.test_conversion' => {
create_order => 79,
- create_sql => 'COMMENT ON CONVERSION dump_test.test_conversion
+ create_sql => 'COMMENT ON CONVERSION dump_test.test_conversion
IS \'comment on test conversion\';',
regexp =>
qr/^\QCOMMENT ON CONVERSION dump_test.test_conversion IS 'comment on test conversion';\E/m,
@@ -1486,23 +1480,23 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON COLLATION test0' => {
create_order => 77,
- create_sql => 'COMMENT ON COLLATION test0
+ create_sql => 'COMMENT ON COLLATION test0
IS \'comment on test0 collation\';',
regexp =>
qr/^\QCOMMENT ON COLLATION public.test0 IS 'comment on test0 collation';\E/m,
collation => 1,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
},
'COMMENT ON LARGE OBJECT ...' => {
create_order => 65,
- create_sql => 'DO $$
+ create_sql => 'DO $$
DECLARE myoid oid;
BEGIN
SELECT loid FROM pg_largeobject INTO myoid;
@@ -1514,10 +1508,10 @@ my %tests = (
/xm,
like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_pre_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_pre_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
@@ -1528,7 +1522,7 @@ my %tests = (
'COMMENT ON PUBLICATION pub1' => {
create_order => 55,
- create_sql => 'COMMENT ON PUBLICATION pub1
+ create_sql => 'COMMENT ON PUBLICATION pub1
IS \'comment on publication\';',
regexp =>
qr/^COMMENT ON PUBLICATION pub1 IS 'comment on publication';/m,
@@ -1537,7 +1531,7 @@ my %tests = (
'COMMENT ON SUBSCRIPTION sub1' => {
create_order => 55,
- create_sql => 'COMMENT ON SUBSCRIPTION sub1
+ create_sql => 'COMMENT ON SUBSCRIPTION sub1
IS \'comment on subscription\';',
regexp =>
qr/^COMMENT ON SUBSCRIPTION sub1 IS 'comment on subscription';/m,
@@ -1555,7 +1549,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -1570,13 +1564,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1' => {
create_order => 84,
- create_sql => 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1
+ create_sql => 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1
IS \'comment on text search parser\';',
regexp =>
qr/^\QCOMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1 IS 'comment on text search parser';\E/m,
@@ -1584,7 +1578,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -1598,13 +1592,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON TYPE dump_test.planets - ENUM' => {
create_order => 68,
- create_sql => 'COMMENT ON TYPE dump_test.planets
+ create_sql => 'COMMENT ON TYPE dump_test.planets
IS \'comment on enum type\';',
regexp =>
qr/^\QCOMMENT ON TYPE dump_test.planets IS 'comment on enum type';\E/m,
@@ -1612,13 +1606,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON TYPE dump_test.textrange - RANGE' => {
create_order => 69,
- create_sql => 'COMMENT ON TYPE dump_test.textrange
+ create_sql => 'COMMENT ON TYPE dump_test.textrange
IS \'comment on range type\';',
regexp =>
qr/^\QCOMMENT ON TYPE dump_test.textrange IS 'comment on range type';\E/m,
@@ -1626,13 +1620,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON TYPE dump_test.int42 - Regular' => {
create_order => 70,
- create_sql => 'COMMENT ON TYPE dump_test.int42
+ create_sql => 'COMMENT ON TYPE dump_test.int42
IS \'comment on regular type\';',
regexp =>
qr/^\QCOMMENT ON TYPE dump_test.int42 IS 'comment on regular type';\E/m,
@@ -1640,13 +1634,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COMMENT ON TYPE dump_test.undefined - Undefined' => {
create_order => 71,
- create_sql => 'COMMENT ON TYPE dump_test.undefined
+ create_sql => 'COMMENT ON TYPE dump_test.undefined
IS \'comment on undefined type\';',
regexp =>
qr/^\QCOMMENT ON TYPE dump_test.undefined IS 'comment on undefined type';\E/m,
@@ -1654,13 +1648,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'COPY test_table' => {
create_order => 4,
- create_sql => 'INSERT INTO dump_test.test_table (col1) '
+ create_sql => 'INSERT INTO dump_test.test_table (col1) '
. 'SELECT generate_series FROM generate_series(1,9);',
regexp => qr/^
\QCOPY dump_test.test_table (col1, col2, col3, col4) FROM stdin;\E
@@ -1669,17 +1663,17 @@ my %tests = (
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
+ data_only => 1,
only_dump_test_table => 1,
- section_data => 1,
+ section_data => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -1694,16 +1688,16 @@ my %tests = (
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
- exclude_test_table => 1,
+ data_only => 1,
+ exclude_test_table => 1,
exclude_test_table_data => 1,
- section_data => 1,
+ section_data => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -1732,14 +1726,14 @@ my %tests = (
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
+ data_only => 1,
section_data => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -1754,14 +1748,14 @@ my %tests = (
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
+ data_only => 1,
section_data => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -1777,14 +1771,14 @@ my %tests = (
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
+ data_only => 1,
section_data => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -1799,14 +1793,14 @@ my %tests = (
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
+ data_only => 1,
section_data => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -1821,14 +1815,14 @@ my %tests = (
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
+ data_only => 1,
section_data => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -1891,24 +1885,24 @@ my %tests = (
'CREATE ROLE regress_dump_test_role' => {
create_order => 1,
- create_sql => 'CREATE ROLE regress_dump_test_role;',
- regexp => qr/^CREATE ROLE regress_dump_test_role;/m,
- like => {
- pg_dumpall_dbprivs => 1,
- pg_dumpall_exclude => 1,
- pg_dumpall_globals => 1,
+ create_sql => 'CREATE ROLE regress_dump_test_role;',
+ regexp => qr/^CREATE ROLE regress_dump_test_role;/m,
+ like => {
+ pg_dumpall_dbprivs => 1,
+ pg_dumpall_exclude => 1,
+ pg_dumpall_globals => 1,
pg_dumpall_globals_clean => 1,
},
},
'CREATE ROLE regress_quoted...' => {
create_order => 1,
- create_sql => 'CREATE ROLE "regress_quoted \"" role";',
- regexp => qr/^CREATE ROLE "regress_quoted \\"" role";/m,
- like => {
- pg_dumpall_dbprivs => 1,
- pg_dumpall_exclude => 1,
- pg_dumpall_globals => 1,
+ create_sql => 'CREATE ROLE "regress_quoted \"" role";',
+ regexp => qr/^CREATE ROLE "regress_quoted \\"" role";/m,
+ like => {
+ pg_dumpall_dbprivs => 1,
+ pg_dumpall_exclude => 1,
+ pg_dumpall_globals => 1,
pg_dumpall_globals_clean => 1,
},
},
@@ -1924,20 +1918,21 @@ my %tests = (
'CREATE COLLATION test0 FROM "C"' => {
create_order => 76,
- create_sql => 'CREATE COLLATION test0 FROM "C";',
+ create_sql => 'CREATE COLLATION test0 FROM "C";',
regexp =>
qr/CREATE COLLATION public.test0 \(provider = libc, locale = 'C'(, version = '[^']*')?\);/m,
collation => 1,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
},
'CREATE COLLATION icu_collation' => {
create_order => 76,
- create_sql => "CREATE COLLATION icu_collation (PROVIDER = icu, LOCALE = 'en-US-u-va-posix');",
+ create_sql =>
+ "CREATE COLLATION icu_collation (PROVIDER = icu, LOCALE = 'en-US-u-va-posix');",
regexp =>
qr/CREATE COLLATION public.icu_collation \(provider = icu, locale = 'en-US-u-va-posix'(, version = '[^']*')?\);/m,
icu => 1,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
},
'CREATE CAST FOR timestamptz' => {
@@ -1958,8 +1953,8 @@ my %tests = (
'CREATE DATABASE dump_test' => {
create_order => 47,
- create_sql => 'CREATE DATABASE dump_test;',
- regexp => qr/^
+ create_sql => 'CREATE DATABASE dump_test;',
+ regexp => qr/^
\QCREATE DATABASE dump_test WITH TEMPLATE = template0 \E
.+;/xm,
like => { pg_dumpall_dbprivs => 1, },
@@ -1986,7 +1981,7 @@ my %tests = (
'CREATE AGGREGATE dump_test.newavg' => {
create_order => 25,
- create_sql => 'CREATE AGGREGATE dump_test.newavg (
+ create_sql => 'CREATE AGGREGATE dump_test.newavg (
sfunc = int4_avg_accum,
basetype = int4,
stype = _int8,
@@ -2006,11 +2001,11 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
exclude_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2024,13 +2019,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE DOMAIN dump_test.us_postal_code' => {
create_order => 29,
- create_sql => 'CREATE DOMAIN dump_test.us_postal_code AS TEXT
+ create_sql => 'CREATE DOMAIN dump_test.us_postal_code AS TEXT
COLLATE "C"
DEFAULT \'10014\'
CHECK(VALUE ~ \'^\d{5}$\' OR
@@ -2049,13 +2044,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE FUNCTION dump_test.pltestlang_call_handler' => {
create_order => 17,
- create_sql => 'CREATE FUNCTION dump_test.pltestlang_call_handler()
+ create_sql => 'CREATE FUNCTION dump_test.pltestlang_call_handler()
RETURNS LANGUAGE_HANDLER AS \'$libdir/plpgsql\',
\'plpgsql_call_handler\' LANGUAGE C;',
regexp => qr/^
@@ -2069,13 +2064,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE FUNCTION dump_test.trigger_func' => {
create_order => 30,
- create_sql => 'CREATE FUNCTION dump_test.trigger_func()
+ create_sql => 'CREATE FUNCTION dump_test.trigger_func()
RETURNS trigger LANGUAGE plpgsql
AS $$ BEGIN RETURN NULL; END;$$;',
regexp => qr/^
@@ -2088,13 +2083,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE FUNCTION dump_test.event_trigger_func' => {
create_order => 32,
- create_sql => 'CREATE FUNCTION dump_test.event_trigger_func()
+ create_sql => 'CREATE FUNCTION dump_test.event_trigger_func()
RETURNS event_trigger LANGUAGE plpgsql
AS $$ BEGIN RETURN; END;$$;',
regexp => qr/^
@@ -2107,7 +2102,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2122,13 +2117,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE OPERATOR CLASS dump_test.op_class' => {
create_order => 74,
- create_sql => 'CREATE OPERATOR CLASS dump_test.op_class
+ create_sql => 'CREATE OPERATOR CLASS dump_test.op_class
FOR TYPE bigint USING btree FAMILY dump_test.op_family
AS STORAGE bigint,
OPERATOR 1 <(bigint,bigint),
@@ -2155,14 +2150,14 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
# verify that a custom operator/opclass/range type is dumped in right order
'CREATE OPERATOR CLASS dump_test.op_class_custom' => {
create_order => 74,
- create_sql => 'CREATE OPERATOR dump_test.~~ (
+ create_sql => 'CREATE OPERATOR dump_test.~~ (
PROCEDURE = int4eq,
LEFTARG = int,
RIGHTARG = int);
@@ -2194,7 +2189,7 @@ my %tests = (
'CREATE OPERATOR CLASS dump_test.op_class_empty' => {
create_order => 89,
- create_sql => 'CREATE OPERATOR CLASS dump_test.op_class_empty
+ create_sql => 'CREATE OPERATOR CLASS dump_test.op_class_empty
FOR TYPE bigint USING btree FAMILY dump_test.op_family
AS STORAGE bigint;',
regexp => qr/^
@@ -2206,13 +2201,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE EVENT TRIGGER test_event_trigger' => {
create_order => 33,
- create_sql => 'CREATE EVENT TRIGGER test_event_trigger
+ create_sql => 'CREATE EVENT TRIGGER test_event_trigger
ON ddl_command_start
EXECUTE FUNCTION dump_test.event_trigger_func();',
regexp => qr/^
@@ -2225,7 +2220,7 @@ my %tests = (
'CREATE TRIGGER test_trigger' => {
create_order => 31,
- create_sql => 'CREATE TRIGGER test_trigger
+ create_sql => 'CREATE TRIGGER test_trigger
BEFORE INSERT ON dump_test.test_table
FOR EACH ROW WHEN (NEW.col1 > 10)
EXECUTE FUNCTION dump_test.trigger_func();',
@@ -2238,18 +2233,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
- exclude_test_table => 1,
+ exclude_test_table => 1,
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TYPE dump_test.planets AS ENUM' => {
create_order => 37,
- create_sql => 'CREATE TYPE dump_test.planets
+ create_sql => 'CREATE TYPE dump_test.planets
AS ENUM ( \'venus\', \'earth\', \'mars\' );',
regexp => qr/^
\QCREATE TYPE dump_test.planets AS ENUM (\E
@@ -2260,9 +2255,9 @@ my %tests = (
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2281,7 +2276,7 @@ my %tests = (
'CREATE TYPE dump_test.textrange AS RANGE' => {
create_order => 38,
- create_sql => 'CREATE TYPE dump_test.textrange
+ create_sql => 'CREATE TYPE dump_test.textrange
AS RANGE (subtype=text, collation="C");',
regexp => qr/^
\QCREATE TYPE dump_test.textrange AS RANGE (\E
@@ -2293,19 +2288,19 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TYPE dump_test.int42' => {
create_order => 39,
- create_sql => 'CREATE TYPE dump_test.int42;',
- regexp => qr/^\QCREATE TYPE dump_test.int42;\E/m,
+ create_sql => 'CREATE TYPE dump_test.int42;',
+ regexp => qr/^\QCREATE TYPE dump_test.int42;\E/m,
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2320,7 +2315,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2388,7 +2383,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2403,13 +2398,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1' => {
create_order => 82,
- create_sql => 'CREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1
+ create_sql => 'CREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1
(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype);',
regexp => qr/^
\QCREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1 (\E\n
@@ -2422,7 +2417,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2438,13 +2433,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE FUNCTION dump_test.int42_in' => {
create_order => 40,
- create_sql => 'CREATE FUNCTION dump_test.int42_in(cstring)
+ create_sql => 'CREATE FUNCTION dump_test.int42_in(cstring)
RETURNS dump_test.int42 AS \'int4in\'
LANGUAGE internal STRICT IMMUTABLE;',
regexp => qr/^
@@ -2456,13 +2451,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE FUNCTION dump_test.int42_out' => {
create_order => 41,
- create_sql => 'CREATE FUNCTION dump_test.int42_out(dump_test.int42)
+ create_sql => 'CREATE FUNCTION dump_test.int42_out(dump_test.int42)
RETURNS cstring AS \'int4out\'
LANGUAGE internal STRICT IMMUTABLE;',
regexp => qr/^
@@ -2474,7 +2469,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2491,13 +2486,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE PROCEDURE dump_test.ptest1' => {
create_order => 41,
- create_sql => 'CREATE PROCEDURE dump_test.ptest1(a int)
+ create_sql => 'CREATE PROCEDURE dump_test.ptest1(a int)
LANGUAGE SQL AS $$ INSERT INTO dump_test.test_table (col1) VALUES (a) $$;',
regexp => qr/^
\QCREATE PROCEDURE dump_test.ptest1(IN a integer)\E
@@ -2508,13 +2503,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TYPE dump_test.int42 populated' => {
create_order => 42,
- create_sql => 'CREATE TYPE dump_test.int42 (
+ create_sql => 'CREATE TYPE dump_test.int42 (
internallength = 4,
input = dump_test.int42_in,
output = dump_test.int42_out,
@@ -2535,13 +2530,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TYPE dump_test.composite' => {
create_order => 43,
- create_sql => 'CREATE TYPE dump_test.composite AS (
+ create_sql => 'CREATE TYPE dump_test.composite AS (
f1 int,
f2 dump_test.int42
);',
@@ -2555,34 +2550,34 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TYPE dump_test.undefined' => {
create_order => 39,
- create_sql => 'CREATE TYPE dump_test.undefined;',
- regexp => qr/^\QCREATE TYPE dump_test.undefined;\E/m,
+ create_sql => 'CREATE TYPE dump_test.undefined;',
+ regexp => qr/^\QCREATE TYPE dump_test.undefined;\E/m,
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE FOREIGN DATA WRAPPER dummy' => {
create_order => 35,
- create_sql => 'CREATE FOREIGN DATA WRAPPER dummy;',
- regexp => qr/CREATE FOREIGN DATA WRAPPER dummy;/m,
- like => { %full_runs, section_pre_data => 1, },
+ create_sql => 'CREATE FOREIGN DATA WRAPPER dummy;',
+ regexp => qr/CREATE FOREIGN DATA WRAPPER dummy;/m,
+ like => { %full_runs, section_pre_data => 1, },
},
'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy' => {
create_order => 36,
- create_sql => 'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;',
- regexp => qr/CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;/m,
- like => { %full_runs, section_pre_data => 1, },
+ create_sql => 'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;',
+ regexp => qr/CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;/m,
+ like => { %full_runs, section_pre_data => 1, },
},
'CREATE FOREIGN TABLE dump_test.foreign_table SERVER s1' => {
@@ -2603,7 +2598,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -2627,7 +2622,7 @@ my %tests = (
'CREATE LANGUAGE pltestlang' => {
create_order => 18,
- create_sql => 'CREATE LANGUAGE pltestlang
+ create_sql => 'CREATE LANGUAGE pltestlang
HANDLER dump_test.pltestlang_call_handler;',
regexp => qr/^
\QCREATE PROCEDURAL LANGUAGE pltestlang \E
@@ -2639,7 +2634,7 @@ my %tests = (
'CREATE MATERIALIZED VIEW matview' => {
create_order => 20,
- create_sql => 'CREATE MATERIALIZED VIEW dump_test.matview (col1) AS
+ create_sql => 'CREATE MATERIALIZED VIEW dump_test.matview (col1) AS
SELECT col1 FROM dump_test.test_table;',
regexp => qr/^
\QCREATE MATERIALIZED VIEW dump_test.matview AS\E
@@ -2651,13 +2646,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE MATERIALIZED VIEW matview_second' => {
create_order => 21,
- create_sql => 'CREATE MATERIALIZED VIEW
+ create_sql => 'CREATE MATERIALIZED VIEW
dump_test.matview_second (col1) AS
SELECT * FROM dump_test.matview;',
regexp => qr/^
@@ -2670,13 +2665,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE MATERIALIZED VIEW matview_third' => {
create_order => 58,
- create_sql => 'CREATE MATERIALIZED VIEW
+ create_sql => 'CREATE MATERIALIZED VIEW
dump_test.matview_third (col1) AS
SELECT * FROM dump_test.matview_second WITH NO DATA;',
regexp => qr/^
@@ -2689,13 +2684,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE MATERIALIZED VIEW matview_fourth' => {
create_order => 59,
- create_sql => 'CREATE MATERIALIZED VIEW
+ create_sql => 'CREATE MATERIALIZED VIEW
dump_test.matview_fourth (col1) AS
SELECT * FROM dump_test.matview_third WITH NO DATA;',
regexp => qr/^
@@ -2708,13 +2703,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE MATERIALIZED VIEW matview_compression' => {
create_order => 20,
- create_sql => 'CREATE MATERIALIZED VIEW
+ create_sql => 'CREATE MATERIALIZED VIEW
dump_test.matview_compression (col2) AS
SELECT col2 FROM dump_test.test_table;
ALTER MATERIALIZED VIEW dump_test.matview_compression
@@ -2732,14 +2727,14 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_toast_compression => 1,
- only_dump_measurement => 1,
+ no_toast_compression => 1,
+ only_dump_measurement => 1,
},
},
'CREATE POLICY p1 ON test_table' => {
create_order => 22,
- create_sql => 'CREATE POLICY p1 ON dump_test.test_table
+ create_sql => 'CREATE POLICY p1 ON dump_test.test_table
USING (true)
WITH CHECK (true);',
regexp => qr/^
@@ -2750,18 +2745,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'CREATE POLICY p2 ON test_table FOR SELECT' => {
create_order => 24,
- create_sql => 'CREATE POLICY p2 ON dump_test.test_table
+ create_sql => 'CREATE POLICY p2 ON dump_test.test_table
FOR SELECT TO regress_dump_test_role USING (true);',
regexp => qr/^
\QCREATE POLICY p2 ON dump_test.test_table FOR SELECT TO regress_dump_test_role \E
@@ -2771,18 +2766,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'CREATE POLICY p3 ON test_table FOR INSERT' => {
create_order => 25,
- create_sql => 'CREATE POLICY p3 ON dump_test.test_table
+ create_sql => 'CREATE POLICY p3 ON dump_test.test_table
FOR INSERT TO regress_dump_test_role WITH CHECK (true);',
regexp => qr/^
\QCREATE POLICY p3 ON dump_test.test_table FOR INSERT \E
@@ -2792,18 +2787,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'CREATE POLICY p4 ON test_table FOR UPDATE' => {
create_order => 26,
- create_sql => 'CREATE POLICY p4 ON dump_test.test_table FOR UPDATE
+ create_sql => 'CREATE POLICY p4 ON dump_test.test_table FOR UPDATE
TO regress_dump_test_role USING (true) WITH CHECK (true);',
regexp => qr/^
\QCREATE POLICY p4 ON dump_test.test_table FOR UPDATE TO regress_dump_test_role \E
@@ -2813,18 +2808,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'CREATE POLICY p5 ON test_table FOR DELETE' => {
create_order => 27,
- create_sql => 'CREATE POLICY p5 ON dump_test.test_table
+ create_sql => 'CREATE POLICY p5 ON dump_test.test_table
FOR DELETE TO regress_dump_test_role USING (true);',
regexp => qr/^
\QCREATE POLICY p5 ON dump_test.test_table FOR DELETE \E
@@ -2834,12 +2829,12 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
@@ -2855,19 +2850,19 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'CREATE PUBLICATION pub1' => {
create_order => 50,
- create_sql => 'CREATE PUBLICATION pub1;',
- regexp => qr/^
+ create_sql => 'CREATE PUBLICATION pub1;',
+ regexp => qr/^
\QCREATE PUBLICATION pub1 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
@@ -2875,7 +2870,7 @@ my %tests = (
'CREATE PUBLICATION pub2' => {
create_order => 50,
- create_sql => 'CREATE PUBLICATION pub2
+ create_sql => 'CREATE PUBLICATION pub2
FOR ALL TABLES
WITH (publish = \'\');',
regexp => qr/^
@@ -2886,8 +2881,8 @@ my %tests = (
'CREATE PUBLICATION pub3' => {
create_order => 50,
- create_sql => 'CREATE PUBLICATION pub3;',
- regexp => qr/^
+ create_sql => 'CREATE PUBLICATION pub3;',
+ regexp => qr/^
\QCREATE PUBLICATION pub3 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
@@ -2895,8 +2890,8 @@ my %tests = (
'CREATE PUBLICATION pub4' => {
create_order => 50,
- create_sql => 'CREATE PUBLICATION pub4;',
- regexp => qr/^
+ create_sql => 'CREATE PUBLICATION pub4;',
+ regexp => qr/^
\QCREATE PUBLICATION pub4 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
@@ -2904,7 +2899,7 @@ my %tests = (
'CREATE SUBSCRIPTION sub1' => {
create_order => 50,
- create_sql => 'CREATE SUBSCRIPTION sub1
+ create_sql => 'CREATE SUBSCRIPTION sub1
CONNECTION \'dbname=doesnotexist\' PUBLICATION pub1
WITH (connect = false);',
regexp => qr/^
@@ -2915,7 +2910,7 @@ my %tests = (
'CREATE SUBSCRIPTION sub2' => {
create_order => 50,
- create_sql => 'CREATE SUBSCRIPTION sub2
+ create_sql => 'CREATE SUBSCRIPTION sub2
CONNECTION \'dbname=doesnotexist\' PUBLICATION pub1
WITH (connect = false, origin = none);',
regexp => qr/^
@@ -2926,7 +2921,7 @@ my %tests = (
'CREATE SUBSCRIPTION sub3' => {
create_order => 50,
- create_sql => 'CREATE SUBSCRIPTION sub3
+ create_sql => 'CREATE SUBSCRIPTION sub3
CONNECTION \'dbname=doesnotexist\' PUBLICATION pub1
WITH (connect = false, origin = any);',
regexp => qr/^
@@ -2942,10 +2937,10 @@ my %tests = (
regexp => qr/^
\QALTER PUBLICATION pub1 ADD TABLE ONLY dump_test.test_table;\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
+ exclude_test_table => 1,
},
},
@@ -2996,8 +2991,8 @@ my %tests = (
'ALTER PUBLICATION pub3 ADD TABLES IN SCHEMA public' => {
create_order => 52,
- create_sql => 'ALTER PUBLICATION pub3 ADD TABLES IN SCHEMA public;',
- regexp => qr/^
+ create_sql => 'ALTER PUBLICATION pub3 ADD TABLES IN SCHEMA public;',
+ regexp => qr/^
\QALTER PUBLICATION pub3 ADD TABLES IN SCHEMA public;\E
/xm,
like => { %full_runs, section_post_data => 1, },
@@ -3010,10 +3005,10 @@ my %tests = (
regexp => qr/^
\QALTER PUBLICATION pub3 ADD TABLE ONLY dump_test.test_table;\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
+ exclude_test_table => 1,
},
},
@@ -3024,10 +3019,10 @@ my %tests = (
regexp => qr/^
\QALTER PUBLICATION pub4 ADD TABLE ONLY dump_test.test_table WHERE ((col1 > 0));\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
+ exclude_test_table => 1,
},
},
@@ -3052,30 +3047,30 @@ my %tests = (
'CREATE SCHEMA dump_test' => {
create_order => 2,
- create_sql => 'CREATE SCHEMA dump_test;',
- regexp => qr/^CREATE SCHEMA dump_test;/m,
+ create_sql => 'CREATE SCHEMA dump_test;',
+ regexp => qr/^CREATE SCHEMA dump_test;/m,
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE SCHEMA dump_test_second_schema' => {
create_order => 9,
- create_sql => 'CREATE SCHEMA dump_test_second_schema;',
- regexp => qr/^CREATE SCHEMA dump_test_second_schema;/m,
- like => {
+ create_sql => 'CREATE SCHEMA dump_test_second_schema;',
+ regexp => qr/^CREATE SCHEMA dump_test_second_schema;/m,
+ like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
},
},
'CREATE TABLE test_table' => {
create_order => 3,
- create_sql => 'CREATE TABLE dump_test.test_table (
+ create_sql => 'CREATE TABLE dump_test.test_table (
col1 serial primary key,
col2 text COMPRESSION pglz,
col3 text,
@@ -3099,18 +3094,18 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_compression_method' => {
create_order => 110,
- create_sql => 'CREATE TABLE dump_test.test_compression_method (
+ create_sql => 'CREATE TABLE dump_test.test_compression_method (
col1 text
);',
regexp => qr/^
@@ -3119,13 +3114,11 @@ my %tests = (
\Q);\E
/xm,
like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1,
+ %full_runs, %dump_test_schema_runs, section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -3133,7 +3126,7 @@ my %tests = (
# (de)compression operations
'COPY test_compression_method' => {
create_order => 111,
- create_sql => 'INSERT INTO dump_test.test_compression_method (col1) '
+ create_sql => 'INSERT INTO dump_test.test_compression_method (col1) '
. 'SELECT string_agg(a::text, \'\') FROM generate_series(1,4096) a;',
regexp => qr/^
\QCOPY dump_test.test_compression_method (col1) FROM stdin;\E
@@ -3141,22 +3134,22 @@ my %tests = (
/xm,
like => {
%full_runs,
- data_only => 1,
- section_data => 1,
- only_dump_test_schema => 1,
- test_schema_plus_large_objects => 1,
+ data_only => 1,
+ section_data => 1,
+ only_dump_test_schema => 1,
+ test_schema_plus_large_objects => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE fk_reference_test_table' => {
create_order => 21,
- create_sql => 'CREATE TABLE dump_test.fk_reference_test_table (
+ create_sql => 'CREATE TABLE dump_test.fk_reference_test_table (
col1 int primary key references dump_test.test_table
);',
regexp => qr/^
@@ -3168,13 +3161,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_second_table' => {
create_order => 6,
- create_sql => 'CREATE TABLE dump_test.test_second_table (
+ create_sql => 'CREATE TABLE dump_test.test_second_table (
col1 int,
col2 text
);',
@@ -3188,13 +3181,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_compression' => {
create_order => 3,
- create_sql => 'CREATE TABLE dump_test.test_compression (
+ create_sql => 'CREATE TABLE dump_test.test_compression (
col1 int,
col2 text COMPRESSION lz4
);',
@@ -3212,13 +3205,13 @@ my %tests = (
unlike => {
exclude_dump_test_schema => 1,
no_toast_compression => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE measurement PARTITIONED BY' => {
create_order => 90,
- create_sql => 'CREATE TABLE dump_test.measurement (
+ create_sql => 'CREATE TABLE dump_test.measurement (
city_id serial not null,
logdate date not null,
peaktemp int CHECK (peaktemp >= -460),
@@ -3243,7 +3236,7 @@ my %tests = (
only_dump_measurement => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
exclude_measurement => 1,
},
@@ -3270,8 +3263,8 @@ my %tests = (
like => {
%full_runs,
section_pre_data => 1,
- role => 1,
- binary_upgrade => 1,
+ role => 1,
+ binary_upgrade => 1,
only_dump_measurement => 1,
},
unlike => {
@@ -3281,7 +3274,7 @@ my %tests = (
'Creation of row-level trigger in partitioned table' => {
create_order => 92,
- create_sql => 'CREATE TRIGGER test_trigger
+ create_sql => 'CREATE TRIGGER test_trigger
AFTER INSERT ON dump_test.measurement
FOR EACH ROW EXECUTE PROCEDURE dump_test.trigger_func()',
regexp => qr/^
@@ -3290,7 +3283,8 @@ my %tests = (
\QEXECUTE FUNCTION dump_test.trigger_func();\E
/xm,
like => {
- %full_runs, %dump_test_schema_runs, section_post_data => 1,
+ %full_runs, %dump_test_schema_runs,
+ section_post_data => 1,
only_dump_measurement => 1,
},
unlike => {
@@ -3301,7 +3295,8 @@ my %tests = (
'COPY measurement' => {
create_order => 93,
- create_sql => 'INSERT INTO dump_test.measurement (city_id, logdate, peaktemp, unitsales) '
+ create_sql =>
+ 'INSERT INTO dump_test.measurement (city_id, logdate, peaktemp, unitsales) '
. "VALUES (1, '2006-02-12', 35, 1);",
regexp => qr/^
\QCOPY dump_test_second_schema.measurement_y2006m2 (city_id, logdate, peaktemp, unitsales) FROM stdin;\E
@@ -3310,20 +3305,20 @@ my %tests = (
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
+ data_only => 1,
only_dump_measurement => 1,
- section_data => 1,
+ section_data => 1,
only_dump_test_schema => 1,
role_parallel => 1,
role => 1,
},
unlike => {
- binary_upgrade => 1,
- schema_only => 1,
- exclude_measurement => 1,
- only_dump_test_schema => 1,
+ binary_upgrade => 1,
+ schema_only => 1,
+ exclude_measurement => 1,
+ only_dump_test_schema => 1,
test_schema_plus_large_objects => 1,
- exclude_measurement => 1,
+ exclude_measurement => 1,
exclude_measurement_data => 1,
},
},
@@ -3350,8 +3345,8 @@ my %tests = (
like => {
%full_runs,
section_post_data => 1,
- role => 1,
- binary_upgrade => 1,
+ role => 1,
+ binary_upgrade => 1,
only_dump_measurement => 1,
},
unlike => {
@@ -3366,8 +3361,8 @@ my %tests = (
like => {
%full_runs,
section_post_data => 1,
- role => 1,
- binary_upgrade => 1,
+ role => 1,
+ binary_upgrade => 1,
only_dump_measurement => 1,
},
unlike => {
@@ -3382,8 +3377,8 @@ my %tests = (
like => {
%full_runs,
section_post_data => 1,
- role => 1,
- binary_upgrade => 1,
+ role => 1,
+ binary_upgrade => 1,
only_dump_measurement => 1,
},
unlike => {
@@ -3394,19 +3389,19 @@ my %tests = (
# We should never see the creation of a trigger on a partition
'Disabled trigger on partition is not created' => {
regexp => qr/CREATE TRIGGER test_trigger.*ON dump_test_second_schema/,
- like => {},
+ like => {},
unlike => { %full_runs, %dump_test_schema_runs },
},
# Triggers on partitions should not be dropped individually
'Triggers on partitions are not dropped' => {
regexp => qr/DROP TRIGGER test_trigger.*ON dump_test_second_schema/,
- like => {}
+ like => {}
},
'CREATE TABLE test_third_table_generated_cols' => {
create_order => 6,
- create_sql => 'CREATE TABLE dump_test.test_third_table (
+ create_sql => 'CREATE TABLE dump_test.test_third_table (
f1 int, junk int,
g1 int generated always as (f1 * 2) stored,
"F3" int,
@@ -3432,7 +3427,7 @@ my %tests = (
'CREATE TABLE test_fourth_table_zero_col' => {
create_order => 6,
- create_sql => 'CREATE TABLE dump_test.test_fourth_table (
+ create_sql => 'CREATE TABLE dump_test.test_fourth_table (
);',
regexp => qr/^
\QCREATE TABLE dump_test.test_fourth_table (\E
@@ -3442,13 +3437,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_fifth_table' => {
create_order => 53,
- create_sql => 'CREATE TABLE dump_test.test_fifth_table (
+ create_sql => 'CREATE TABLE dump_test.test_fifth_table (
col1 integer,
col2 boolean,
col3 boolean,
@@ -3468,13 +3463,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_sixth_table' => {
create_order => 6,
- create_sql => 'CREATE TABLE dump_test.test_sixth_table (
+ create_sql => 'CREATE TABLE dump_test.test_sixth_table (
col1 int,
col2 text,
col3 bytea
@@ -3490,13 +3485,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_seventh_table' => {
create_order => 6,
- create_sql => 'CREATE TABLE dump_test.test_seventh_table (
+ create_sql => 'CREATE TABLE dump_test.test_seventh_table (
col1 int,
col2 text,
col3 bytea
@@ -3512,13 +3507,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_table_identity' => {
create_order => 3,
- create_sql => 'CREATE TABLE dump_test.test_table_identity (
+ create_sql => 'CREATE TABLE dump_test.test_table_identity (
col1 int generated always as identity primary key,
col2 text
);',
@@ -3541,13 +3536,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_table_generated' => {
create_order => 3,
- create_sql => 'CREATE TABLE dump_test.test_table_generated (
+ create_sql => 'CREATE TABLE dump_test.test_table_generated (
col1 int primary key,
col2 int generated always as (col1 * 2) stored
);',
@@ -3561,13 +3556,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_table_generated_child1 (without local columns)' => {
create_order => 4,
- create_sql => 'CREATE TABLE dump_test.test_table_generated_child1 ()
+ create_sql => 'CREATE TABLE dump_test.test_table_generated_child1 ()
INHERITS (dump_test.test_table_generated);',
regexp => qr/^
\QCREATE TABLE dump_test.test_table_generated_child1 (\E\n
@@ -3577,9 +3572,9 @@ my %tests = (
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -3593,7 +3588,7 @@ my %tests = (
'CREATE TABLE test_table_generated_child2 (with local columns)' => {
create_order => 4,
- create_sql => 'CREATE TABLE dump_test.test_table_generated_child2 (
+ create_sql => 'CREATE TABLE dump_test.test_table_generated_child2 (
col1 int,
col2 int
) INHERITS (dump_test.test_table_generated);',
@@ -3607,15 +3602,15 @@ my %tests = (
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE table_with_stats' => {
create_order => 98,
- create_sql => 'CREATE TABLE dump_test.table_index_stats (
+ create_sql => 'CREATE TABLE dump_test.table_index_stats (
col1 int,
col2 int,
col3 int);
@@ -3634,13 +3629,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_inheritance_parent' => {
create_order => 90,
- create_sql => 'CREATE TABLE dump_test.test_inheritance_parent (
+ create_sql => 'CREATE TABLE dump_test.test_inheritance_parent (
col1 int NOT NULL,
col2 int CHECK (col2 >= 42)
);',
@@ -3655,13 +3650,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE TABLE test_inheritance_child' => {
create_order => 91,
- create_sql => 'CREATE TABLE dump_test.test_inheritance_child (
+ create_sql => 'CREATE TABLE dump_test.test_inheritance_child (
col1 int NOT NULL,
CONSTRAINT test_inheritance_child CHECK (col2 >= 142857)
) INHERITS (dump_test.test_inheritance_parent);',
@@ -3676,15 +3671,15 @@ my %tests = (
%full_runs, %dump_test_schema_runs, section_pre_data => 1,
},
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE STATISTICS extended_stats_no_options' => {
create_order => 97,
- create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_no_options
+ create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_no_options
ON col1, col2 FROM dump_test.test_fifth_table',
regexp => qr/^
\QCREATE STATISTICS dump_test.test_ext_stats_no_options ON col1, col2 FROM dump_test.test_fifth_table;\E
@@ -3693,13 +3688,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE STATISTICS extended_stats_options' => {
create_order => 97,
- create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_opts
+ create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_opts
(ndistinct) ON col1, col2 FROM dump_test.test_fifth_table',
regexp => qr/^
\QCREATE STATISTICS dump_test.test_ext_stats_opts (ndistinct) ON col1, col2 FROM dump_test.test_fifth_table;\E
@@ -3708,7 +3703,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -3723,13 +3718,13 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE STATISTICS extended_stats_expression' => {
create_order => 99,
- create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_expr
+ create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_expr
ON (2 * col1) FROM dump_test.test_fifth_table',
regexp => qr/^
\QCREATE STATISTICS dump_test.test_ext_stats_expr ON (2 * col1) FROM dump_test.test_fifth_table;\E
@@ -3738,7 +3733,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -3756,11 +3751,11 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -3772,42 +3767,42 @@ my %tests = (
\QCREATE INDEX measurement_city_id_logdate_idx ON ONLY dump_test.measurement USING\E
/xm,
like => {
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- compression => 1,
- createdb => 1,
- defaults => 1,
- exclude_test_table => 1,
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ compression => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_test_table => 1,
exclude_test_table_data => 1,
- no_toast_compression => 1,
- no_large_objects => 1,
- no_privs => 1,
- no_owner => 1,
- no_table_access_method => 1,
- only_dump_test_schema => 1,
- pg_dumpall_dbprivs => 1,
- pg_dumpall_exclude => 1,
- schema_only => 1,
- section_post_data => 1,
+ no_toast_compression => 1,
+ no_large_objects => 1,
+ no_privs => 1,
+ no_owner => 1,
+ no_table_access_method => 1,
+ only_dump_test_schema => 1,
+ pg_dumpall_dbprivs => 1,
+ pg_dumpall_exclude => 1,
+ schema_only => 1,
+ section_post_data => 1,
test_schema_plus_large_objects => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
exclude_measurement_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- only_dump_test_table => 1,
- pg_dumpall_globals => 1,
+ only_dump_test_table => 1,
+ pg_dumpall_globals => 1,
pg_dumpall_globals_clean => 1,
- role => 1,
- section_pre_data => 1,
- exclude_measurement => 1,
+ role => 1,
+ section_pre_data => 1,
+ exclude_measurement => 1,
},
},
'ALTER TABLE measurement PRIMARY KEY' => {
- all_runs => 1,
- catch_all => 'CREATE ... commands',
+ all_runs => 1,
+ catch_all => 'CREATE ... commands',
create_order => 93,
create_sql =>
'ALTER TABLE dump_test.measurement ADD PRIMARY KEY (city_id, logdate);',
@@ -3823,7 +3818,7 @@ my %tests = (
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_measurement => 1,
+ exclude_measurement => 1,
},
},
@@ -3833,12 +3828,12 @@ my %tests = (
/xm,
like => {
%full_runs,
- role => 1,
+ role => 1,
section_post_data => 1,
only_dump_measurement => 1,
},
unlike => {
- exclude_measurement => 1,
+ exclude_measurement => 1,
},
},
@@ -3848,59 +3843,59 @@ my %tests = (
/xm,
like => {
%full_runs,
- role => 1,
+ role => 1,
section_post_data => 1,
only_dump_measurement => 1,
exclude_measurement_data => 1,
},
unlike => {
- exclude_measurement => 1,
+ exclude_measurement => 1,
},
},
'ALTER INDEX ... ATTACH PARTITION (primary key)' => {
- all_runs => 1,
+ all_runs => 1,
catch_all => 'CREATE ... commands',
- regexp => qr/^
+ regexp => qr/^
\QALTER INDEX dump_test.measurement_pkey ATTACH PARTITION dump_test_second_schema.measurement_y2006m2_pkey\E
/xm,
like => {
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- compression => 1,
- createdb => 1,
- defaults => 1,
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ compression => 1,
+ createdb => 1,
+ defaults => 1,
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_toast_compression => 1,
- no_large_objects => 1,
- no_privs => 1,
- no_owner => 1,
- no_table_access_method => 1,
- pg_dumpall_dbprivs => 1,
- pg_dumpall_exclude => 1,
- role => 1,
- schema_only => 1,
- section_post_data => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_toast_compression => 1,
+ no_large_objects => 1,
+ no_privs => 1,
+ no_owner => 1,
+ no_table_access_method => 1,
+ pg_dumpall_dbprivs => 1,
+ pg_dumpall_exclude => 1,
+ role => 1,
+ schema_only => 1,
+ section_post_data => 1,
+ only_dump_measurement => 1,
exclude_measurement_data => 1,
},
unlike => {
- only_dump_test_schema => 1,
- only_dump_test_table => 1,
- pg_dumpall_globals => 1,
+ only_dump_test_schema => 1,
+ only_dump_test_table => 1,
+ pg_dumpall_globals => 1,
pg_dumpall_globals_clean => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
test_schema_plus_large_objects => 1,
- exclude_measurement => 1,
+ exclude_measurement => 1,
},
},
'CREATE VIEW test_view' => {
create_order => 61,
- create_sql => 'CREATE VIEW dump_test.test_view
+ create_sql => 'CREATE VIEW dump_test.test_view
WITH (check_option = \'local\', security_barrier = true) AS
SELECT col1 FROM dump_test.test_table;',
regexp => qr/^
@@ -3912,7 +3907,7 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
@@ -3926,17 +3921,17 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
# FIXME
'DROP SCHEMA public (for testing without public schema)' => {
- database => 'regress_pg_dump_test',
+ database => 'regress_pg_dump_test',
create_order => 100,
- create_sql => 'DROP SCHEMA public;',
- regexp => qr/^DROP SCHEMA public;/m,
- like => {},
+ create_sql => 'DROP SCHEMA public;',
+ regexp => qr/^DROP SCHEMA public;/m,
+ like => {},
},
'DROP SCHEMA public' => {
@@ -3962,37 +3957,37 @@ my %tests = (
'DROP FUNCTION dump_test.pltestlang_call_handler()' => {
regexp => qr/^DROP FUNCTION dump_test\.pltestlang_call_handler\(\);/m,
- like => { clean => 1, },
+ like => { clean => 1, },
},
'DROP LANGUAGE pltestlang' => {
regexp => qr/^DROP PROCEDURAL LANGUAGE pltestlang;/m,
- like => { clean => 1, },
+ like => { clean => 1, },
},
'DROP SCHEMA dump_test' => {
regexp => qr/^DROP SCHEMA dump_test;/m,
- like => { clean => 1, },
+ like => { clean => 1, },
},
'DROP SCHEMA dump_test_second_schema' => {
regexp => qr/^DROP SCHEMA dump_test_second_schema;/m,
- like => { clean => 1, },
+ like => { clean => 1, },
},
'DROP TABLE test_table' => {
regexp => qr/^DROP TABLE dump_test\.test_table;/m,
- like => { clean => 1, },
+ like => { clean => 1, },
},
'DROP TABLE fk_reference_test_table' => {
regexp => qr/^DROP TABLE dump_test\.fk_reference_test_table;/m,
- like => { clean => 1, },
+ like => { clean => 1, },
},
'DROP TABLE test_second_table' => {
regexp => qr/^DROP TABLE dump_test\.test_second_table;/m,
- like => { clean => 1, },
+ like => { clean => 1, },
},
'DROP EXTENSION IF EXISTS plpgsql' => {
@@ -4011,27 +4006,27 @@ my %tests = (
'DROP LANGUAGE IF EXISTS pltestlang' => {
regexp => qr/^DROP PROCEDURAL LANGUAGE IF EXISTS pltestlang;/m,
- like => { clean_if_exists => 1, },
+ like => { clean_if_exists => 1, },
},
'DROP SCHEMA IF EXISTS dump_test' => {
regexp => qr/^DROP SCHEMA IF EXISTS dump_test;/m,
- like => { clean_if_exists => 1, },
+ like => { clean_if_exists => 1, },
},
'DROP SCHEMA IF EXISTS dump_test_second_schema' => {
regexp => qr/^DROP SCHEMA IF EXISTS dump_test_second_schema;/m,
- like => { clean_if_exists => 1, },
+ like => { clean_if_exists => 1, },
},
'DROP TABLE IF EXISTS test_table' => {
regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_table;/m,
- like => { clean_if_exists => 1, },
+ like => { clean_if_exists => 1, },
},
'DROP TABLE IF EXISTS test_second_table' => {
regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_second_table;/m,
- like => { clean_if_exists => 1, },
+ like => { clean_if_exists => 1, },
},
'DROP ROLE regress_dump_test_role' => {
@@ -4052,14 +4047,14 @@ my %tests = (
'GRANT USAGE ON SCHEMA dump_test_second_schema' => {
create_order => 10,
- create_sql => 'GRANT USAGE ON SCHEMA dump_test_second_schema
+ create_sql => 'GRANT USAGE ON SCHEMA dump_test_second_schema
TO regress_dump_test_role;',
regexp => qr/^
\QGRANT USAGE ON SCHEMA dump_test_second_schema TO regress_dump_test_role;\E
/xm,
like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
},
unlike => { no_privs => 1, },
@@ -4067,7 +4062,7 @@ my %tests = (
'GRANT USAGE ON FOREIGN DATA WRAPPER dummy' => {
create_order => 85,
- create_sql => 'GRANT USAGE ON FOREIGN DATA WRAPPER dummy
+ create_sql => 'GRANT USAGE ON FOREIGN DATA WRAPPER dummy
TO regress_dump_test_role;',
regexp => qr/^
\QGRANT ALL ON FOREIGN DATA WRAPPER dummy TO regress_dump_test_role;\E
@@ -4078,7 +4073,7 @@ my %tests = (
'GRANT USAGE ON FOREIGN SERVER s1' => {
create_order => 85,
- create_sql => 'GRANT USAGE ON FOREIGN SERVER s1
+ create_sql => 'GRANT USAGE ON FOREIGN SERVER s1
TO regress_dump_test_role;',
regexp => qr/^
\QGRANT ALL ON FOREIGN SERVER s1 TO regress_dump_test_role;\E
@@ -4098,8 +4093,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
@@ -4114,8 +4109,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
@@ -4130,8 +4125,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
@@ -4146,8 +4141,8 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
@@ -4163,7 +4158,7 @@ my %tests = (
'GRANT SELECT ON TABLE test_table' => {
create_order => 5,
- create_sql => 'GRANT SELECT ON TABLE dump_test.test_table
+ create_sql => 'GRANT SELECT ON TABLE dump_test.test_table
TO regress_dump_test_role;',
regexp =>
qr/^\QGRANT SELECT ON TABLE dump_test.test_table TO regress_dump_test_role;\E/m,
@@ -4171,19 +4166,19 @@ my %tests = (
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
'GRANT SELECT ON TABLE measurement' => {
create_order => 91,
- create_sql => 'GRANT SELECT ON
+ create_sql => 'GRANT SELECT ON
TABLE dump_test.measurement
TO regress_dump_test_role;',
regexp =>
@@ -4196,14 +4191,14 @@ my %tests = (
},
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- exclude_measurement => 1,
+ no_privs => 1,
+ exclude_measurement => 1,
},
},
'GRANT SELECT ON TABLE measurement_y2006m2' => {
create_order => 94,
- create_sql => 'GRANT SELECT ON TABLE
+ create_sql => 'GRANT SELECT ON TABLE
dump_test_second_schema.measurement_y2006m2,
dump_test_second_schema.measurement_y2006m3,
dump_test_second_schema.measurement_y2006m4,
@@ -4213,19 +4208,19 @@ my %tests = (
qr/^\QGRANT SELECT ON TABLE dump_test_second_schema.measurement_y2006m2 TO regress_dump_test_role;\E/m,
like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
only_dump_measurement => 1,
},
unlike => {
no_privs => 1,
- exclude_measurement => 1,
+ exclude_measurement => 1,
},
},
'GRANT ALL ON LARGE OBJECT ...' => {
create_order => 60,
- create_sql => 'DO $$
+ create_sql => 'DO $$
DECLARE myoid oid;
BEGIN
SELECT loid FROM pg_largeobject INTO myoid;
@@ -4237,16 +4232,16 @@ my %tests = (
/xm,
like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_pre_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_pre_data => 1,
test_schema_plus_large_objects => 1,
- binary_upgrade => 1,
+ binary_upgrade => 1,
},
unlike => {
no_large_objects => 1,
- no_privs => 1,
+ no_privs => 1,
schema_only => 1,
},
},
@@ -4263,14 +4258,14 @@ my %tests = (
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
'GRANT EXECUTE ON FUNCTION pg_sleep() TO regress_dump_test_role' => {
create_order => 16,
- create_sql => 'GRANT EXECUTE ON FUNCTION pg_sleep(float8)
+ create_sql => 'GRANT EXECUTE ON FUNCTION pg_sleep(float8)
TO regress_dump_test_role;',
regexp => qr/^
\QGRANT ALL ON FUNCTION pg_catalog.pg_sleep(double precision) TO regress_dump_test_role;\E
@@ -4281,7 +4276,7 @@ my %tests = (
'GRANT SELECT (proname ...) ON TABLE pg_proc TO public' => {
create_order => 46,
- create_sql => 'GRANT SELECT (
+ create_sql => 'GRANT SELECT (
tableoid,
oid,
proname,
@@ -4363,10 +4358,10 @@ my %tests = (
like =>
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -4379,10 +4374,10 @@ my %tests = (
like =>
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1,
- only_dump_measurement => 1,
+ schema_only => 1,
+ only_dump_measurement => 1,
},
},
@@ -4404,8 +4399,8 @@ my %tests = (
'REVOKE CONNECT ON DATABASE dump_test FROM public' => {
create_order => 49,
- create_sql => 'REVOKE CONNECT ON DATABASE dump_test FROM public;',
- regexp => qr/^
+ create_sql => 'REVOKE CONNECT ON DATABASE dump_test FROM public;',
+ regexp => qr/^
\QREVOKE CONNECT,TEMPORARY ON DATABASE dump_test FROM PUBLIC;\E\n
\QGRANT TEMPORARY ON DATABASE dump_test TO PUBLIC;\E\n
\QGRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;\E
@@ -4415,7 +4410,7 @@ my %tests = (
'REVOKE EXECUTE ON FUNCTION pg_sleep() FROM public' => {
create_order => 15,
- create_sql => 'REVOKE EXECUTE ON FUNCTION pg_sleep(float8)
+ create_sql => 'REVOKE EXECUTE ON FUNCTION pg_sleep(float8)
FROM public;',
regexp => qr/^
\QREVOKE ALL ON FUNCTION pg_catalog.pg_sleep(double precision) FROM PUBLIC;\E
@@ -4431,7 +4426,7 @@ my %tests = (
'REVOKE EXECUTE ON FUNCTION pg_stat_reset FROM regress_dump_test_role' =>
{
create_order => 15,
- create_sql => '
+ create_sql => '
ALTER FUNCTION pg_stat_reset OWNER TO regress_dump_test_role;
REVOKE EXECUTE ON FUNCTION pg_stat_reset
FROM regress_dump_test_role;',
@@ -4443,7 +4438,7 @@ my %tests = (
'REVOKE SELECT ON TABLE pg_proc FROM public' => {
create_order => 45,
- create_sql => 'REVOKE SELECT ON TABLE pg_proc FROM public;',
+ create_sql => 'REVOKE SELECT ON TABLE pg_proc FROM public;',
regexp =>
qr/^\QREVOKE SELECT ON TABLE pg_catalog.pg_proc FROM PUBLIC;\E/m,
like => { %full_runs, section_pre_data => 1, },
@@ -4462,14 +4457,14 @@ my %tests = (
'REVOKE USAGE ON LANGUAGE plpgsql FROM public' => {
create_order => 16,
- create_sql => 'REVOKE USAGE ON LANGUAGE plpgsql FROM public;',
- regexp => qr/^REVOKE ALL ON LANGUAGE plpgsql FROM PUBLIC;/m,
- like => {
+ create_sql => 'REVOKE USAGE ON LANGUAGE plpgsql FROM public;',
+ regexp => qr/^REVOKE ALL ON LANGUAGE plpgsql FROM PUBLIC;/m,
+ like => {
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- role => 1,
- section_pre_data => 1,
+ role => 1,
+ section_pre_data => 1,
only_dump_measurement => 1,
},
unlike => { no_privs => 1, },
@@ -4496,7 +4491,7 @@ my %tests = (
# pretty, but seems hard to do better in this framework.
'CREATE TABLE regress_pg_dump_table_am' => {
create_order => 12,
- create_sql => '
+ create_sql => '
CREATE TABLE dump_test.regress_pg_dump_table_am_0() USING heap;
CREATE TABLE dump_test.regress_pg_dump_table_am_1 (col1 int) USING regress_table_am;
CREATE TABLE dump_test.regress_pg_dump_table_am_2() USING heap;',
@@ -4512,13 +4507,13 @@ my %tests = (
unlike => {
exclude_dump_test_schema => 1,
no_table_access_method => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'CREATE MATERIALIZED VIEW regress_pg_dump_matview_am' => {
create_order => 13,
- create_sql => '
+ create_sql => '
CREATE MATERIALIZED VIEW dump_test.regress_pg_dump_matview_am_0 USING heap AS SELECT 1;
CREATE MATERIALIZED VIEW dump_test.regress_pg_dump_matview_am_1
USING regress_table_am AS SELECT count(*) FROM pg_class;
@@ -4536,7 +4531,7 @@ my %tests = (
unlike => {
exclude_dump_test_schema => 1,
no_table_access_method => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
});
@@ -4557,7 +4552,7 @@ $node->psql(
'postgres',
"CREATE COLLATION testing FROM \"C\"; DROP COLLATION testing;",
on_error_stop => 0,
- stderr => \$collation_check_stderr);
+ stderr => \$collation_check_stderr);
if ($collation_check_stderr !~ /ERROR: /)
{
@@ -4765,15 +4760,19 @@ command_fails_like(
foreach my $run (sort keys %pgdump_runs)
{
my $test_key = $run;
- my $run_db = 'postgres';
+ my $run_db = 'postgres';
# Skip command-level tests for gzip/lz4/zstd if the tool is not supported
- if ($pgdump_runs{$run}->{compile_option} &&
- (($pgdump_runs{$run}->{compile_option} eq 'gzip' && !$supports_gzip) ||
- ($pgdump_runs{$run}->{compile_option} eq 'lz4' && !$supports_lz4) ||
- ($pgdump_runs{$run}->{compile_option} eq 'zstd' && !$supports_zstd)))
+ if ($pgdump_runs{$run}->{compile_option}
+ && (($pgdump_runs{$run}->{compile_option} eq 'gzip'
+ && !$supports_gzip)
+ || ($pgdump_runs{$run}->{compile_option} eq 'lz4'
+ && !$supports_lz4)
+ || ($pgdump_runs{$run}->{compile_option} eq 'zstd'
+ && !$supports_zstd)))
{
- note "$run: skipped due to no $pgdump_runs{$run}->{compile_option} support";
+ note
+ "$run: skipped due to no $pgdump_runs{$run}->{compile_option} support";
next;
}
@@ -4800,16 +4799,18 @@ foreach my $run (sort keys %pgdump_runs)
foreach my $glob_pattern (@{$glob_patterns})
{
my @glob_output = glob($glob_pattern);
- is(scalar(@glob_output) > 0, 1, "$run: glob check for $glob_pattern");
+ is(scalar(@glob_output) > 0,
+ 1, "$run: glob check for $glob_pattern");
}
}
if ($pgdump_runs{$run}->{command_like})
{
my $cmd_like = $pgdump_runs{$run}->{command_like};
- $node->command_like(\@{ $cmd_like->{command} },
- $cmd_like->{expected},
- "$run: " . $cmd_like->{name})
+ $node->command_like(
+ \@{ $cmd_like->{command} },
+ $cmd_like->{expected},
+ "$run: " . $cmd_like->{name});
}
if ($pgdump_runs{$run}->{restore_cmd})
diff --git a/src/bin/pg_dump/t/004_pg_dump_parallel.pl b/src/bin/pg_dump/t/004_pg_dump_parallel.pl
index f41c2fa223..c4b461ed87 100644
--- a/src/bin/pg_dump/t/004_pg_dump_parallel.pl
+++ b/src/bin/pg_dump/t/004_pg_dump_parallel.pl
@@ -56,16 +56,16 @@ $node->command_ok(
$node->command_ok(
[
'pg_restore', '-v',
- '-d', $node->connstr($dbname2),
- '-j3', "$backupdir/dump1"
+ '-d', $node->connstr($dbname2),
+ '-j3', "$backupdir/dump1"
],
'parallel restore');
$node->command_ok(
[
- 'pg_dump', '-Fd',
+ 'pg_dump', '-Fd',
'--no-sync', '-j2',
- '-f', "$backupdir/dump2",
+ '-f', "$backupdir/dump2",
'--inserts', $node->connstr($dbname1)
],
'parallel dump as inserts');
@@ -73,8 +73,8 @@ $node->command_ok(
$node->command_ok(
[
'pg_restore', '-v',
- '-d', $node->connstr($dbname3),
- '-j3', "$backupdir/dump2"
+ '-d', $node->connstr($dbname3),
+ '-j3', "$backupdir/dump2"
],
'parallel restore as inserts');
diff --git a/src/bin/pg_dump/t/010_dump_connstr.pl b/src/bin/pg_dump/t/010_dump_connstr.pl
index de55564555..ed86c332ef 100644
--- a/src/bin/pg_dump/t/010_dump_connstr.pl
+++ b/src/bin/pg_dump/t/010_dump_connstr.pl
@@ -15,7 +15,7 @@ if ($PostgreSQL::Test::Utils::is_msys2)
# We're going to use byte sequences that aren't valid UTF-8 strings. Use
# LATIN1, which accepts any byte and has a conversion from each byte to UTF-8.
-$ENV{LC_ALL} = 'C';
+$ENV{LC_ALL} = 'C';
$ENV{PGCLIENTENCODING} = 'LATIN1';
# Create database and user names covering the range of LATIN1
@@ -26,8 +26,8 @@ $ENV{PGCLIENTENCODING} = 'LATIN1';
# The odds of finding something interesting by testing all ASCII letters
# seem too small to justify the cycles of testing a fifth name.
my $dbname1 =
- 'regression'
- . generate_ascii_string(1, 9)
+ 'regression'
+ . generate_ascii_string(1, 9)
. generate_ascii_string(11, 12)
. generate_ascii_string(14, 33)
. (
@@ -37,7 +37,7 @@ my $dbname1 =
. generate_ascii_string(35, 43) # skip ','
. generate_ascii_string(45, 54);
my $dbname2 = 'regression' . generate_ascii_string(55, 65) # skip 'B'-'W'
- . generate_ascii_string(88, 99) # skip 'd'-'w'
+ . generate_ascii_string(88, 99) # skip 'd'-'w'
. generate_ascii_string(120, 149);
my $dbname3 = 'regression' . generate_ascii_string(150, 202);
my $dbname4 = 'regression' . generate_ascii_string(203, 255);
@@ -57,17 +57,17 @@ $node->init(extra =>
# prep pg_hba.conf and pg_ident.conf
$node->run_log(
[
- $ENV{PG_REGRESS}, '--config-auth',
- $node->data_dir, '--user',
+ $ENV{PG_REGRESS}, '--config-auth',
+ $node->data_dir, '--user',
$src_bootstrap_super, '--create-role',
"$username1,$username2,$username3,$username4"
]);
$node->start;
my $backupdir = $node->backup_dir;
-my $discard = "$backupdir/discard.sql";
-my $plain = "$backupdir/plain.sql";
-my $dirfmt = "$backupdir/dirfmt";
+my $discard = "$backupdir/discard.sql";
+my $plain = "$backupdir/plain.sql";
+my $dirfmt = "$backupdir/dirfmt";
$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]);
$node->run_log(
@@ -115,9 +115,9 @@ $node->command_ok(
'pg_dumpall with long ASCII name 4');
$node->command_ok(
[
- 'pg_dumpall', '-U',
+ 'pg_dumpall', '-U',
$src_bootstrap_super, '--no-sync',
- '-r', '-l',
+ '-r', '-l',
'dbname=template1'
],
'pg_dumpall -l accepts connection string');
@@ -146,13 +146,13 @@ $node->command_ok(
'parallel dump');
# recreate $dbname1 for restore test
-$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]);
+$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]);
$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]);
$node->command_ok(
[
- 'pg_restore', '-v', '-d', 'template1',
- '-j2', '-U', $username1, $dirfmt
+ 'pg_restore', '-v', '-d', 'template1',
+ '-j2', '-U', $username1, $dirfmt
],
'parallel restore');
@@ -160,8 +160,8 @@ $node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]);
$node->command_ok(
[
- 'pg_restore', '-C', '-v', '-d',
- 'template1', '-j2', '-U', $username1,
+ 'pg_restore', '-C', '-v', '-d',
+ 'template1', '-j2', '-U', $username1,
$dirfmt
],
'parallel restore with create');
@@ -220,8 +220,8 @@ $cmdline_node->run_log(
{
$result = run_log(
[
- 'psql', '-p', $cmdline_node->port, '-U',
- $restore_super, '-X', '-f', $plain
+ 'psql', '-p', $cmdline_node->port, '-U',
+ $restore_super, '-X', '-f', $plain
],
'2>',
\$stderr);
diff --git a/src/bin/pg_resetwal/t/002_corrupted.pl b/src/bin/pg_resetwal/t/002_corrupted.pl
index 3dd2a4e89f..6d19a1efd5 100644
--- a/src/bin/pg_resetwal/t/002_corrupted.pl
+++ b/src/bin/pg_resetwal/t/002_corrupted.pl
@@ -14,7 +14,7 @@ my $node = PostgreSQL::Test::Cluster->new('main');
$node->init;
my $pg_control = $node->data_dir . '/global/pg_control';
-my $size = (stat($pg_control))[7];
+my $size = (stat($pg_control))[7];
# Read out the head of the file to get PG_CONTROL_VERSION in
# particular.
diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl
index 63490360e5..031594e14e 100644
--- a/src/bin/pg_rewind/t/001_basic.pl
+++ b/src/bin/pg_rewind/t/001_basic.pl
@@ -92,7 +92,7 @@ sub run_test
# step.
command_fails(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync'
@@ -104,10 +104,10 @@ sub run_test
# recovery once.
command_fails(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $primary_pgdata,
- '--no-sync', '--no-ensure-shutdown'
+ '--no-sync', '--no-ensure-shutdown'
],
'pg_rewind --no-ensure-shutdown with running target');
@@ -117,10 +117,10 @@ sub run_test
$node_primary->stop;
command_fails(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $primary_pgdata,
- '--no-sync', '--no-ensure-shutdown'
+ '--no-sync', '--no-ensure-shutdown'
],
'pg_rewind with unexpected running source');
@@ -131,10 +131,10 @@ sub run_test
$node_standby->stop;
command_ok(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $primary_pgdata,
- '--no-sync', '--dry-run'
+ '--no-sync', '--dry-run'
],
'pg_rewind --dry-run');
diff --git a/src/bin/pg_rewind/t/006_options.pl b/src/bin/pg_rewind/t/006_options.pl
index 2d0c5e2f8b..4b6e39a47c 100644
--- a/src/bin/pg_rewind/t/006_options.pl
+++ b/src/bin/pg_rewind/t/006_options.pl
@@ -17,7 +17,7 @@ my $primary_pgdata = PostgreSQL::Test::Utils::tempdir;
my $standby_pgdata = PostgreSQL::Test::Utils::tempdir;
command_fails(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--target-pgdata', $primary_pgdata,
'--source-pgdata', $standby_pgdata,
'extra_arg1'
@@ -27,7 +27,7 @@ command_fails([ 'pg_rewind', '--target-pgdata', $primary_pgdata ],
'no source specified');
command_fails(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--target-pgdata', $primary_pgdata,
'--source-pgdata', $standby_pgdata,
'--source-server', 'incorrect_source'
@@ -35,7 +35,7 @@ command_fails(
'both remote and local sources specified');
command_fails(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--target-pgdata', $primary_pgdata,
'--source-pgdata', $standby_pgdata,
'--write-recovery-conf'
diff --git a/src/bin/pg_rewind/t/007_standby_source.pl b/src/bin/pg_rewind/t/007_standby_source.pl
index 3f813929a6..4fd1ed001c 100644
--- a/src/bin/pg_rewind/t/007_standby_source.pl
+++ b/src/bin/pg_rewind/t/007_standby_source.pl
@@ -124,8 +124,8 @@ copy(
# recovery configuration automatically.
command_ok(
[
- 'pg_rewind', "--debug",
- "--source-server", $node_b->connstr('postgres'),
+ 'pg_rewind', "--debug",
+ "--source-server", $node_b->connstr('postgres'),
"--target-pgdata=$node_c_pgdata", "--no-sync",
"--write-recovery-conf"
],
diff --git a/src/bin/pg_rewind/t/008_min_recovery_point.pl b/src/bin/pg_rewind/t/008_min_recovery_point.pl
index c753a64fdb..d4c89451e6 100644
--- a/src/bin/pg_rewind/t/008_min_recovery_point.pl
+++ b/src/bin/pg_rewind/t/008_min_recovery_point.pl
@@ -132,7 +132,7 @@ $node_2->poll_query_until('postgres',
$node_2->stop('fast');
$node_3->stop('fast');
-my $node_2_pgdata = $node_2->data_dir;
+my $node_2_pgdata = $node_2->data_dir;
my $node_1_connstr = $node_1->connstr;
# Keep a temporary postgresql.conf or it would be overwritten during the rewind.
@@ -142,7 +142,7 @@ copy(
command_ok(
[
- 'pg_rewind', "--source-server=$node_1_connstr",
+ 'pg_rewind', "--source-server=$node_1_connstr",
"--target-pgdata=$node_2_pgdata", "--debug"
],
'run pg_rewind');
diff --git a/src/bin/pg_rewind/t/009_growing_files.pl b/src/bin/pg_rewind/t/009_growing_files.pl
index ed89aba35f..cf60a04ae7 100644
--- a/src/bin/pg_rewind/t/009_growing_files.pl
+++ b/src/bin/pg_rewind/t/009_growing_files.pl
@@ -51,7 +51,7 @@ append_to_file "$standby_pgdata/tst_both_dir/file1", 'a';
# copy operation and the result will be an error.
my $ret = run_log(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync',
diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm
index 373f6dfbf7..4957791e94 100644
--- a/src/bin/pg_rewind/t/RewindTest.pm
+++ b/src/bin/pg_rewind/t/RewindTest.pm
@@ -38,7 +38,7 @@ use Carp;
use Exporter 'import';
use File::Copy;
use File::Path qw(rmtree);
-use IPC::Run qw(run);
+use IPC::Run qw(run);
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::RecursiveCopy;
use PostgreSQL::Test::Utils;
@@ -101,8 +101,8 @@ sub check_query
],
'>', \$stdout, '2>', \$stderr;
- is($result, 1, "$test_name: psql exit code");
- is($stderr, '', "$test_name: psql no stderr");
+ is($result, 1, "$test_name: psql exit code");
+ is($stderr, '', "$test_name: psql no stderr");
is($stdout, $expected_stdout, "$test_name: query result matches");
return;
@@ -111,7 +111,7 @@ sub check_query
sub setup_cluster
{
my $extra_name = shift; # Used to differentiate clusters
- my $extra = shift; # Extra params for initdb
+ my $extra = shift; # Extra params for initdb
# Initialize primary, data checksums are mandatory
$node_primary =
@@ -123,8 +123,8 @@ sub setup_cluster
# minimal permissions enough to rewind from an online source.
$node_primary->init(
allows_streaming => 1,
- extra => $extra,
- auth_extra => [ '--create-role', 'rewind_user' ]);
+ extra => $extra,
+ auth_extra => [ '--create-role', 'rewind_user' ]);
# Set wal_keep_size to prevent WAL segment recycling after enforced
# checkpoints in the tests.
@@ -203,11 +203,11 @@ sub promote_standby
sub run_pg_rewind
{
- my $test_mode = shift;
- my $primary_pgdata = $node_primary->data_dir;
- my $standby_pgdata = $node_standby->data_dir;
+ my $test_mode = shift;
+ my $primary_pgdata = $node_primary->data_dir;
+ my $standby_pgdata = $node_standby->data_dir;
my $standby_connstr = $node_standby->connstr('postgres');
- my $tmp_folder = PostgreSQL::Test::Utils::tempdir;
+ my $tmp_folder = PostgreSQL::Test::Utils::tempdir;
# Append the rewind-specific role to the connection string.
$standby_connstr = "$standby_connstr user=rewind_user";
@@ -269,10 +269,10 @@ sub run_pg_rewind
# recovery configuration automatically.
command_ok(
[
- 'pg_rewind', "--debug",
- "--source-server", $standby_connstr,
+ 'pg_rewind', "--debug",
+ "--source-server", $standby_connstr,
"--target-pgdata=$primary_pgdata", "--no-sync",
- "--write-recovery-conf", "--config-file",
+ "--write-recovery-conf", "--config-file",
"$tmp_folder/primary-postgresql.conf.tmp"
],
'pg_rewind remote');
diff --git a/src/bin/pg_test_fsync/pg_test_fsync.c b/src/bin/pg_test_fsync/pg_test_fsync.c
index 3d5e8f30ab..435df8d808 100644
--- a/src/bin/pg_test_fsync/pg_test_fsync.c
+++ b/src/bin/pg_test_fsync/pg_test_fsync.c
@@ -623,7 +623,7 @@ static void
print_elapse(struct timeval start_t, struct timeval stop_t, int ops)
{
double total_time = (stop_t.tv_sec - start_t.tv_sec) +
- (stop_t.tv_usec - start_t.tv_usec) * 0.000001;
+ (stop_t.tv_usec - start_t.tv_usec) * 0.000001;
double per_second = ops / total_time;
double avg_op_time_us = (total_time / ops) * USECS_SEC;
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 1ff68c5cc6..64024e3b9e 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -105,8 +105,8 @@ check_and_dump_old_cluster(bool live_check)
check_for_isn_and_int8_passing_mismatch(&old_cluster);
/*
- * PG 16 increased the size of the 'aclitem' type, which breaks the on-disk
- * format for existing data.
+ * PG 16 increased the size of the 'aclitem' type, which breaks the
+ * on-disk format for existing data.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1500)
check_for_aclitem_data_type_usage(&old_cluster);
diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c
index 85ed15ae4a..a9988abfe1 100644
--- a/src/bin/pg_upgrade/info.c
+++ b/src/bin/pg_upgrade/info.c
@@ -61,9 +61,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
new_relnum < new_db->rel_arr.nrels)
{
RelInfo *old_rel = (old_relnum < old_db->rel_arr.nrels) ?
- &old_db->rel_arr.rels[old_relnum] : NULL;
+ &old_db->rel_arr.rels[old_relnum] : NULL;
RelInfo *new_rel = (new_relnum < new_db->rel_arr.nrels) ?
- &new_db->rel_arr.rels[new_relnum] : NULL;
+ &new_db->rel_arr.rels[new_relnum] : NULL;
/* handle running off one array before the other */
if (!new_rel)
@@ -302,14 +302,14 @@ get_db_and_rel_infos(ClusterInfo *cluster)
static void
get_template0_info(ClusterInfo *cluster)
{
- PGconn *conn = connectToServer(cluster, "template1");
- DbLocaleInfo *locale;
- PGresult *dbres;
- int i_datencoding;
- int i_datlocprovider;
- int i_datcollate;
- int i_datctype;
- int i_daticulocale;
+ PGconn *conn = connectToServer(cluster, "template1");
+ DbLocaleInfo *locale;
+ PGresult *dbres;
+ int i_datencoding;
+ int i_datlocprovider;
+ int i_datcollate;
+ int i_datctype;
+ int i_daticulocale;
if (GET_MAJOR_VERSION(cluster->major_version) >= 1500)
dbres = executeQueryOrDie(conn,
diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c
index 75bab0a04c..4562dafcff 100644
--- a/src/bin/pg_upgrade/pg_upgrade.c
+++ b/src/bin/pg_upgrade/pg_upgrade.c
@@ -379,10 +379,10 @@ setup(char *argv0, bool *live_check)
static void
set_locale_and_encoding(void)
{
- PGconn *conn_new_template1;
- char *datcollate_literal;
- char *datctype_literal;
- char *daticulocale_literal = NULL;
+ PGconn *conn_new_template1;
+ char *datcollate_literal;
+ char *datctype_literal;
+ char *daticulocale_literal = NULL;
DbLocaleInfo *locale = old_cluster.template0;
prep_status("Setting locale and encoding for new cluster");
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index 4a7895a756..41fce089d6 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -4,7 +4,7 @@
use strict;
use warnings;
-use Cwd qw(abs_path);
+use Cwd qw(abs_path);
use File::Basename qw(dirname);
use File::Compare;
use File::Find qw(find);
@@ -81,7 +81,7 @@ if ( (defined($ENV{olddump}) && !defined($ENV{oldinstall}))
}
# Paths to the dumps taken during the tests.
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
my $dump1_file = "$tempdir/dump1.sql";
my $dump2_file = "$tempdir/dump2.sql";
@@ -108,7 +108,7 @@ if ($oldnode->pg_version >= 11)
# can test that pg_upgrade copies the locale settings of template0
# from the old to the new cluster.
-my $original_encoding = "6"; # UTF-8
+my $original_encoding = "6"; # UTF-8
my $original_provider = "c";
my $original_locale = "C";
my $original_iculocale = "";
@@ -138,11 +138,12 @@ $oldnode->start;
my $result;
$result = $oldnode->safe_psql(
- 'postgres', "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
+ 'postgres',
+ "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
FROM pg_database WHERE datname='template0'");
-is($result, "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
- "check locales in original cluster"
- );
+is( $result,
+ "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
+ "check locales in original cluster");
# The default location of the source code is the root of this directory.
my $srcdir = abs_path("../../..");
@@ -166,9 +167,9 @@ else
# Create databases with names covering most ASCII bytes. The
# first name exercises backslashes adjacent to double quotes, a
# Windows special case.
- generate_db($oldnode, 'regression\\"\\', 1, 45, '\\\\"\\\\\\');
- generate_db($oldnode, 'regression', 46, 90, '');
- generate_db($oldnode, 'regression', 91, 127, '');
+ generate_db($oldnode, 'regression\\"\\', 1, 45, '\\\\"\\\\\\');
+ generate_db($oldnode, 'regression', 46, 90, '');
+ generate_db($oldnode, 'regression', 91, 127, '');
# Grab any regression options that may be passed down by caller.
my $extra_opts = $ENV{EXTRA_REGRESS_OPTS} || "";
@@ -251,9 +252,9 @@ if (defined($ENV{oldinstall}))
$newnode->command_ok(
[
'psql', '-X',
- '-v', 'ON_ERROR_STOP=1',
- '-c', $upcmds,
- '-d', $oldnode->connstr($updb),
+ '-v', 'ON_ERROR_STOP=1',
+ '-c', $upcmds,
+ '-d', $oldnode->connstr($updb),
],
"ran version adaptation commands for database $updb");
}
@@ -263,7 +264,7 @@ if (defined($ENV{oldinstall}))
# that we need to use pg_dumpall from the new node here.
my @dump_command = (
'pg_dumpall', '--no-sync', '-d', $oldnode->connstr('postgres'),
- '-f', $dump1_file);
+ '-f', $dump1_file);
# --extra-float-digits is needed when upgrading from a version older than 11.
push(@dump_command, '--extra-float-digits', '0')
if ($oldnode->pg_version < 12);
@@ -330,15 +331,14 @@ $oldnode->stop;
command_fails(
[
'pg_upgrade', '--no-sync',
- '-d', $oldnode->data_dir,
- '-D', $newnode->data_dir,
- '-b', $oldbindir . '/does/not/exist/',
- '-B', $newbindir,
- '-s', $newnode->host,
- '-p', $oldnode->port,
- '-P', $newnode->port,
- $mode,
- '--check',
+ '-d', $oldnode->data_dir,
+ '-D', $newnode->data_dir,
+ '-b', $oldbindir . '/does/not/exist/',
+ '-B', $newbindir,
+ '-s', $newnode->host,
+ '-p', $oldnode->port,
+ '-P', $newnode->port,
+ $mode, '--check',
],
'run of pg_upgrade --check for new instance with incorrect binary path');
ok(-d $newnode->data_dir . "/pg_upgrade_output.d",
@@ -348,12 +348,11 @@ rmtree($newnode->data_dir . "/pg_upgrade_output.d");
# --check command works here, cleans up pg_upgrade_output.d.
command_ok(
[
- 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir,
- '-D', $newnode->data_dir, '-b', $oldbindir,
- '-B', $newbindir, '-s', $newnode->host,
- '-p', $oldnode->port, '-P', $newnode->port,
- $mode,
- '--check',
+ 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir,
+ '-D', $newnode->data_dir, '-b', $oldbindir,
+ '-B', $newbindir, '-s', $newnode->host,
+ '-p', $oldnode->port, '-P', $newnode->port,
+ $mode, '--check',
],
'run of pg_upgrade --check for new instance');
ok(!-d $newnode->data_dir . "/pg_upgrade_output.d",
@@ -362,10 +361,10 @@ ok(!-d $newnode->data_dir . "/pg_upgrade_output.d",
# Actual run, pg_upgrade_output.d is removed at the end.
command_ok(
[
- 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir,
- '-D', $newnode->data_dir, '-b', $oldbindir,
- '-B', $newbindir, '-s', $newnode->host,
- '-p', $oldnode->port, '-P', $newnode->port,
+ 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir,
+ '-D', $newnode->data_dir, '-b', $oldbindir,
+ '-B', $newbindir, '-s', $newnode->host,
+ '-p', $oldnode->port, '-P', $newnode->port,
$mode,
],
'run of pg_upgrade for new instance');
@@ -396,16 +395,17 @@ if (-d $log_path)
# Test that upgraded cluster has original locale settings.
$result = $newnode->safe_psql(
- 'postgres', "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
+ 'postgres',
+ "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
FROM pg_database WHERE datname='template0'");
-is($result, "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
- "check that locales in new cluster match original cluster"
- );
+is( $result,
+ "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
+ "check that locales in new cluster match original cluster");
# Second dump from the upgraded instance.
@dump_command = (
'pg_dumpall', '--no-sync', '-d', $newnode->connstr('postgres'),
- '-f', $dump2_file);
+ '-f', $dump2_file);
# --extra-float-digits is needed when upgrading from a version older than 11.
push(@dump_command, '--extra-float-digits', '0')
if ($oldnode->pg_version < 12);
diff --git a/src/bin/pg_verifybackup/t/002_algorithm.pl b/src/bin/pg_verifybackup/t/002_algorithm.pl
index 87b8803a33..5b02ea4d55 100644
--- a/src/bin/pg_verifybackup/t/002_algorithm.pl
+++ b/src/bin/pg_verifybackup/t/002_algorithm.pl
@@ -17,7 +17,7 @@ $primary->start;
for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512))
{
my $backup_path = $primary->backup_dir . '/' . $algorithm;
- my @backup = (
+ my @backup = (
'pg_basebackup', '-D', $backup_path,
'--manifest-checksums', $algorithm, '--no-sync', '-cfast');
my @verify = ('pg_verifybackup', '-e', $backup_path);
diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl
index 0c304105c5..4cc3dd05e3 100644
--- a/src/bin/pg_verifybackup/t/003_corruption.pl
+++ b/src/bin/pg_verifybackup/t/003_corruption.pl
@@ -16,7 +16,7 @@ $primary->start;
# Include a user-defined tablespace in the hopes of detecting problems in that
# area.
-my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short();
+my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short();
my $source_ts_prefix = $source_ts_path;
$source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!;
@@ -30,67 +30,67 @@ EOM
my @scenario = (
{
- 'name' => 'extra_file',
+ 'name' => 'extra_file',
'mutilate' => \&mutilate_extra_file,
'fails_like' =>
qr/extra_file.*present on disk but not in the manifest/
},
{
- 'name' => 'extra_tablespace_file',
+ 'name' => 'extra_tablespace_file',
'mutilate' => \&mutilate_extra_tablespace_file,
'fails_like' =>
qr/extra_ts_file.*present on disk but not in the manifest/
},
{
- 'name' => 'missing_file',
+ 'name' => 'missing_file',
'mutilate' => \&mutilate_missing_file,
'fails_like' =>
qr/pg_xact\/0000.*present in the manifest but not on disk/
},
{
- 'name' => 'missing_tablespace',
+ 'name' => 'missing_tablespace',
'mutilate' => \&mutilate_missing_tablespace,
'fails_like' =>
qr/pg_tblspc.*present in the manifest but not on disk/
},
{
- 'name' => 'append_to_file',
- 'mutilate' => \&mutilate_append_to_file,
+ 'name' => 'append_to_file',
+ 'mutilate' => \&mutilate_append_to_file,
'fails_like' => qr/has size \d+ on disk but size \d+ in the manifest/
},
{
- 'name' => 'truncate_file',
- 'mutilate' => \&mutilate_truncate_file,
+ 'name' => 'truncate_file',
+ 'mutilate' => \&mutilate_truncate_file,
'fails_like' => qr/has size 0 on disk but size \d+ in the manifest/
},
{
- 'name' => 'replace_file',
- 'mutilate' => \&mutilate_replace_file,
+ 'name' => 'replace_file',
+ 'mutilate' => \&mutilate_replace_file,
'fails_like' => qr/checksum mismatch for file/
},
{
- 'name' => 'bad_manifest',
- 'mutilate' => \&mutilate_bad_manifest,
+ 'name' => 'bad_manifest',
+ 'mutilate' => \&mutilate_bad_manifest,
'fails_like' => qr/manifest checksum mismatch/
},
{
- 'name' => 'open_file_fails',
- 'mutilate' => \&mutilate_open_file_fails,
- 'fails_like' => qr/could not open file/,
+ 'name' => 'open_file_fails',
+ 'mutilate' => \&mutilate_open_file_fails,
+ 'fails_like' => qr/could not open file/,
'skip_on_windows' => 1
},
{
- 'name' => 'open_directory_fails',
- 'mutilate' => \&mutilate_open_directory_fails,
- 'cleanup' => \&cleanup_open_directory_fails,
- 'fails_like' => qr/could not open directory/,
+ 'name' => 'open_directory_fails',
+ 'mutilate' => \&mutilate_open_directory_fails,
+ 'cleanup' => \&cleanup_open_directory_fails,
+ 'fails_like' => qr/could not open directory/,
'skip_on_windows' => 1
},
{
- 'name' => 'search_directory_fails',
- 'mutilate' => \&mutilate_search_directory_fails,
- 'cleanup' => \&cleanup_search_directory_fails,
- 'fails_like' => qr/could not stat file or directory/,
+ 'name' => 'search_directory_fails',
+ 'mutilate' => \&mutilate_search_directory_fails,
+ 'cleanup' => \&cleanup_search_directory_fails,
+ 'fails_like' => qr/could not stat file or directory/,
'skip_on_windows' => 1
});
@@ -104,7 +104,7 @@ for my $scenario (@scenario)
if $scenario->{'skip_on_windows'} && $windows_os;
# Take a backup and check that it verifies OK.
- my $backup_path = $primary->backup_dir . '/' . $name;
+ my $backup_path = $primary->backup_dir . '/' . $name;
my $backup_ts_path = PostgreSQL::Test::Utils::tempdir_short();
# The tablespace map parameter confuses Msys2, which tries to mangle
# it. Tell it not to.
@@ -228,8 +228,8 @@ sub mutilate_truncate_file
sub mutilate_replace_file
{
my ($backup_path) = @_;
- my $pathname = "$backup_path/PG_VERSION";
- my $contents = slurp_file($pathname);
+ my $pathname = "$backup_path/PG_VERSION";
+ my $contents = slurp_file($pathname);
open(my $fh, '>', $pathname) || die "open $pathname: $!";
print $fh 'q' x length($contents);
close($fh);
diff --git a/src/bin/pg_verifybackup/t/004_options.pl b/src/bin/pg_verifybackup/t/004_options.pl
index 591a6b36be..2aa8352f00 100644
--- a/src/bin/pg_verifybackup/t/004_options.pl
+++ b/src/bin/pg_verifybackup/t/004_options.pl
@@ -108,7 +108,7 @@ unlike(
# Test valid manifest with nonexistent backup directory.
command_fails_like(
[
- 'pg_verifybackup', '-m',
+ 'pg_verifybackup', '-m',
"$backup_path/backup_manifest", "$backup_path/fake"
],
qr/could not open directory/,
diff --git a/src/bin/pg_verifybackup/t/006_encoding.pl b/src/bin/pg_verifybackup/t/006_encoding.pl
index 4cbd1a6051..0b37bda20c 100644
--- a/src/bin/pg_verifybackup/t/006_encoding.pl
+++ b/src/bin/pg_verifybackup/t/006_encoding.pl
@@ -16,8 +16,8 @@ my $backup_path = $primary->backup_dir . '/test_encoding';
$primary->command_ok(
[
'pg_basebackup', '-D',
- $backup_path, '--no-sync',
- '-cfast', '--manifest-force-encode'
+ $backup_path, '--no-sync',
+ '-cfast', '--manifest-force-encode'
],
"backup ok with forced hex encoding");
diff --git a/src/bin/pg_verifybackup/t/007_wal.pl b/src/bin/pg_verifybackup/t/007_wal.pl
index 34ca877d10..89f96f85db 100644
--- a/src/bin/pg_verifybackup/t/007_wal.pl
+++ b/src/bin/pg_verifybackup/t/007_wal.pl
@@ -19,7 +19,7 @@ $primary->command_ok(
"base backup ok");
# Rename pg_wal.
-my $original_pg_wal = $backup_path . '/pg_wal';
+my $original_pg_wal = $backup_path . '/pg_wal';
my $relocated_pg_wal = $primary->backup_dir . '/relocated_pg_wal';
rename($original_pg_wal, $relocated_pg_wal) || die "rename pg_wal: $!";
@@ -46,7 +46,7 @@ my @walfiles = grep { /^[0-9A-F]{24}$/ } slurp_dir($original_pg_wal);
# Replace the contents of one of the files with garbage of equal length.
my $wal_corruption_target = $original_pg_wal . '/' . $walfiles[0];
-my $wal_size = -s $wal_corruption_target;
+my $wal_size = -s $wal_corruption_target;
open(my $fh, '>', $wal_corruption_target)
|| die "open $wal_corruption_target: $!";
print $fh 'w' x $wal_size;
diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl
index 05754bc8ec..1a783d1188 100644
--- a/src/bin/pg_verifybackup/t/008_untar.pl
+++ b/src/bin/pg_verifybackup/t/008_untar.pl
@@ -16,47 +16,47 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
-my $backup_path = $primary->backup_dir . '/server-backup';
+my $backup_path = $primary->backup_dir . '/server-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'backup_archive' => 'base.tar',
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'backup_archive' => 'base.tar',
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => [ '--compress', 'server-gzip' ],
- 'backup_archive' => 'base.tar.gz',
+ 'backup_flags' => [ '--compress', 'server-gzip' ],
+ 'backup_archive' => 'base.tar.gz',
'decompress_program' => $ENV{'GZIP_PROGRAM'},
- 'decompress_flags' => ['-d'],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => [ '--compress', 'server-lz4' ],
- 'backup_archive' => 'base.tar.lz4',
+ 'backup_flags' => [ '--compress', 'server-lz4' ],
+ 'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
- 'decompress_flags' => [ '-d', '-m' ],
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'decompress_flags' => [ '-d', '-m' ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => [ '--compress', 'server-zstd' ],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'server-zstd' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => ['-d'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => [ '--compress', 'server-zstd:level=1,long' ],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'server-zstd:level=1,long' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => ['-d'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
});
for my $tc (@test_configuration)
@@ -74,8 +74,8 @@ for my $tc (@test_configuration)
# Take a server-side backup.
my @backup = (
- 'pg_basebackup', '--no-sync',
- '-cfast', '--target',
+ 'pg_basebackup', '--no-sync',
+ '-cfast', '--target',
"server:$backup_path", '-Xfetch');
push @backup, @{ $tc->{'backup_flags'} };
$primary->command_ok(\@backup,
diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl
index d26064b002..f4d5378555 100644
--- a/src/bin/pg_verifybackup/t/009_extract.pl
+++ b/src/bin/pg_verifybackup/t/009_extract.pl
@@ -17,28 +17,28 @@ $primary->start;
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => [ '--compress', 'server-gzip:5' ],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'backup_flags' => [ '--compress', 'server-gzip:5' ],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => [ '--compress', 'server-lz4:5' ],
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'backup_flags' => [ '--compress', 'server-lz4:5' ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => [ '--compress', 'server-zstd:5' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'backup_flags' => [ '--compress', 'server-zstd:5' ],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'parallel zstd',
- 'backup_flags' => [ '--compress', 'server-zstd:workers=3' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1"),
+ 'backup_flags' => [ '--compress', 'server-zstd:workers=3' ],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1"),
'possibly_unsupported' =>
qr/could not set compression worker count to 3: Unsupported parameter/
});
@@ -46,7 +46,7 @@ my @test_configuration = (
for my $tc (@test_configuration)
{
my $backup_path = $primary->backup_dir . '/' . 'extract_backup';
- my $method = $tc->{'compression_method'};
+ my $method = $tc->{'compression_method'};
SKIP:
{
diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl
index ac51a174d1..44d83e777f 100644
--- a/src/bin/pg_verifybackup/t/010_client_untar.pl
+++ b/src/bin/pg_verifybackup/t/010_client_untar.pl
@@ -15,56 +15,56 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
-my $backup_path = $primary->backup_dir . '/client-backup';
+my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'backup_archive' => 'base.tar',
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'backup_archive' => 'base.tar',
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => [ '--compress', 'client-gzip:5' ],
- 'backup_archive' => 'base.tar.gz',
+ 'backup_flags' => [ '--compress', 'client-gzip:5' ],
+ 'backup_archive' => 'base.tar.gz',
'decompress_program' => $ENV{'GZIP_PROGRAM'},
- 'decompress_flags' => ['-d'],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => [ '--compress', 'client-lz4:5' ],
- 'backup_archive' => 'base.tar.lz4',
+ 'backup_flags' => [ '--compress', 'client-lz4:5' ],
+ 'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
- 'decompress_flags' => ['-d'],
- 'output_file' => 'base.tar',
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'decompress_flags' => ['-d'],
+ 'output_file' => 'base.tar',
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => [ '--compress', 'client-zstd:5' ],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'client-zstd:5' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => ['-d'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'client-zstd:level=1,long'],
+ 'backup_flags' => [ '--compress', 'client-zstd:level=1,long' ],
'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
+ 'decompress_flags' => ['-d'],
'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'parallel zstd',
- 'backup_flags' => [ '--compress', 'client-zstd:workers=3' ],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'client-zstd:workers=3' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => ['-d'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1"),
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1"),
'possibly_unsupported' =>
qr/could not set compression worker count to 3: Unsupported parameter/
});
diff --git a/src/bin/pg_waldump/t/002_save_fullpage.pl b/src/bin/pg_waldump/t/002_save_fullpage.pl
index 18a89a26f8..831ffdefef 100644
--- a/src/bin/pg_waldump/t/002_save_fullpage.pl
+++ b/src/bin/pg_waldump/t/002_save_fullpage.pl
@@ -14,7 +14,7 @@ my ($blocksize, $walfile_name);
# Function to extract the LSN from the given block structure
sub get_block_lsn
{
- my $path = shift;
+ my $path = shift;
my $blocksize = shift;
my $block;
@@ -64,16 +64,16 @@ my $relation = $node->safe_psql(
datname = current_database()}
);
-my $walfile = $node->data_dir . '/pg_wal/' . $walfile_name;
+my $walfile = $node->data_dir . '/pg_wal/' . $walfile_name;
my $tmp_folder = PostgreSQL::Test::Utils::tempdir;
ok(-f $walfile, "Got a WAL file");
$node->command_ok(
[
- 'pg_waldump', '--quiet',
+ 'pg_waldump', '--quiet',
'--save-fullpage', "$tmp_folder/raw",
- '--relation', $relation,
+ '--relation', $relation,
$walfile
],
'pg_waldump with --save-fullpage runs');
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 70ed034e70..7dbb2ed6a7 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -4621,7 +4621,7 @@ processXactStats(TState *thread, CState *st, pg_time_usec_t *now,
double latency = 0.0,
lag = 0.0;
bool detailed = progress || throttle_delay || latency_limit ||
- use_log || per_script_stats;
+ use_log || per_script_stats;
if (detailed && !skipped && st->estatus == ESTATUS_NO_ERROR)
{
@@ -6400,7 +6400,7 @@ printResults(StatsData *total,
StatsData *sstats = &sql_script[i].stats;
int64 script_failures = getFailures(sstats);
int64 script_total_cnt =
- sstats->cnt + sstats->skipped + script_failures;
+ sstats->cnt + sstats->skipped + script_failures;
printf("SQL script %d: %s\n"
" - weight: %d (targets %.1f%% of total)\n"
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index 99273203f0..363a1ffabd 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -140,7 +140,7 @@ $node->pgbench(
qr{mode: prepared}
],
[
- qr{vacuum}, qr{client 0}, qr{client 1}, qr{sending},
+ qr{vacuum}, qr{client 0}, qr{client 1}, qr{sending},
qr{receiving}, qr{executing}
],
'pgbench select only');
@@ -233,7 +233,7 @@ COMMIT;
# 1. Logging neither with errors nor with statements
$node->append_conf('postgresql.conf',
- "log_min_duration_statement = 0\n"
+ "log_min_duration_statement = 0\n"
. "log_parameter_max_length = 0\n"
. "log_parameter_max_length_on_error = 0");
$node->reload;
@@ -261,7 +261,7 @@ $log = undef;
# 2. Logging truncated parameters on error, full with statements
$node->append_conf('postgresql.conf',
- "log_parameter_max_length = -1\n"
+ "log_parameter_max_length = -1\n"
. "log_parameter_max_length_on_error = 64");
$node->reload;
$node->pgbench(
@@ -302,7 +302,7 @@ $log = undef;
# 3. Logging full parameters on error, truncated with statements
$node->append_conf('postgresql.conf',
- "log_min_duration_statement = -1\n"
+ "log_min_duration_statement = -1\n"
. "log_parameter_max_length = 7\n"
. "log_parameter_max_length_on_error = -1");
$node->reload;
@@ -363,7 +363,7 @@ select :value1::smallint, :value2::smallint;
# Restore default logging config
$node->append_conf('postgresql.conf',
- "log_min_duration_statement = -1\n"
+ "log_min_duration_statement = -1\n"
. "log_parameter_max_length_on_error = 0\n"
. "log_parameter_max_length = -1");
$node->reload;
@@ -438,7 +438,7 @@ $node->pgbench(
qr{command=98.: int 5432\b}, # :random_seed
qr{command=99.: int -9223372036854775808\b}, # min int
qr{command=100.: int 9223372036854775807\b}, # max int
- # pseudorandom permutation tests
+ # pseudorandom permutation tests
qr{command=101.: boolean true\b},
qr{command=102.: boolean true\b},
qr{command=103.: boolean true\b},
@@ -640,7 +640,7 @@ my ($ret, $out, $err) = $node->psql('postgres',
'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val'
);
-ok($ret == 0, "psql seeded_random count ok");
+ok($ret == 0, "psql seeded_random count ok");
ok($err eq '', "psql seeded_random count stderr is empty");
ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/,
"psql seeded_random count uniform");
@@ -734,7 +734,7 @@ SELECT 5432 AS fail UNION SELECT 5433 ORDER BY 1 \gset
$node->pgbench(
'-t 1', 0,
[ qr{type: .*/001_pgbench_aset}, qr{processed: 1/1} ],
- [ qr{command=3.: int 8\b}, qr{command=4.: int 7\b} ],
+ [ qr{command=3.: int 8\b}, qr{command=4.: int 7\b} ],
'pgbench aset command',
{
'001_pgbench_aset' => q{
@@ -886,7 +886,7 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
# SHELL
[
- 'shell bad command', 2,
+ 'shell bad command', 2,
[qr{\(shell\) .* meta-command failed}], q{\shell no-such-command}
],
[
@@ -905,11 +905,11 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
# SET
[
- 'set syntax error', 1,
+ 'set syntax error', 1,
[qr{syntax error in command "set"}], q{\set i 1 +}
],
[
- 'set no such function', 1,
+ 'set no such function', 1,
[qr{unexpected function name}], q{\set i noSuchFunction()}
],
[
@@ -931,11 +931,11 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
q{\set i least(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)}
],
[
- 'set empty random range', 2,
+ 'set empty random range', 2,
[qr{empty range given to random}], q{\set i random(5,3)}
],
[
- 'set random range too large', 2,
+ 'set random range too large', 2,
[qr{random range is too large}], q{\set i random(:minint, :maxint)}
],
[
@@ -963,21 +963,21 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
q{\set i random_zipfian(0, 10, 1000000)}
],
[
- 'set non numeric value', 2,
+ 'set non numeric value', 2,
[qr{malformed variable "foo" value: "bla"}], q{\set i :foo + 1}
],
- [ 'set no expression', 1, [qr{syntax error}], q{\set i} ],
+ [ 'set no expression', 1, [qr{syntax error}], q{\set i} ],
[ 'set missing argument', 1, [qr{missing argument}i], q{\set} ],
[
- 'set not a bool', 2,
+ 'set not a bool', 2,
[qr{cannot coerce double to boolean}], q{\set b NOT 0.0}
],
[
- 'set not an int', 2,
+ 'set not an int', 2,
[qr{cannot coerce boolean to int}], q{\set i TRUE + 2}
],
[
- 'set not a double', 2,
+ 'set not a double', 2,
[qr{cannot coerce boolean to double}], q{\set d ln(TRUE)}
],
[
@@ -987,26 +987,26 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
q{\set i CASE TRUE THEN 1 ELSE 0 END}
],
[
- 'set random error', 2,
+ 'set random error', 2,
[qr{cannot coerce boolean to int}], q{\set b random(FALSE, TRUE)}
],
[
- 'set number of args mismatch', 1,
+ 'set number of args mismatch', 1,
[qr{unexpected number of arguments}], q{\set d ln(1.0, 2.0))}
],
[
- 'set at least one arg', 1,
+ 'set at least one arg', 1,
[qr{at least one argument expected}], q{\set i greatest())}
],
# SET: ARITHMETIC OVERFLOW DETECTION
[
- 'set double to int overflow', 2,
+ 'set double to int overflow', 2,
[qr{double to int overflow for 100}], q{\set i int(1E32)}
],
[
'set bigint add overflow', 2,
- [qr{int add out}], q{\set i (1<<62) + (1<<62)}
+ [qr{int add out}], q{\set i (1<<62) + (1<<62)}
],
[
'set bigint sub overflow',
@@ -1023,22 +1023,22 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
# SETSHELL
[
- 'setshell not an int', 2,
+ 'setshell not an int', 2,
[qr{command must return an integer}], q{\setshell i echo -n one}
],
[ 'setshell missing arg', 1, [qr{missing argument }], q{\setshell var} ],
[
- 'setshell no such command', 2,
+ 'setshell no such command', 2,
[qr{could not read result }], q{\setshell var no-such-command}
],
# SLEEP
[
- 'sleep undefined variable', 2,
+ 'sleep undefined variable', 2,
[qr{sleep: undefined variable}], q{\sleep :nosuchvariable}
],
[
- 'sleep too many args', 1,
+ 'sleep too many args', 1,
[qr{too many arguments}], q{\sleep too many args}
],
[
@@ -1046,18 +1046,18 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
[ qr{missing argument}, qr{\\sleep} ], q{\sleep}
],
[
- 'sleep unknown unit', 1,
+ 'sleep unknown unit', 1,
[qr{unrecognized time unit}], q{\sleep 1 week}
],
# MISC
[
- 'misc invalid backslash command', 1,
+ 'misc invalid backslash command', 1,
[qr{invalid command .* "nosuchcommand"}], q{\nosuchcommand}
],
[ 'misc empty script', 1, [qr{empty command list for script}], q{} ],
[
- 'bad boolean', 2,
+ 'bad boolean', 2,
[qr{malformed variable.*trueXXX}], q{\set b :badtrue or true}
],
[
@@ -1069,21 +1069,21 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
# GSET
[
- 'gset no row', 2,
+ 'gset no row', 2,
[qr{expected one row, got 0\b}], q{SELECT WHERE FALSE \gset}
],
[ 'gset alone', 1, [qr{gset must follow an SQL command}], q{\gset} ],
[
- 'gset no SQL', 1,
+ 'gset no SQL', 1,
[qr{gset must follow an SQL command}], q{\set i +1
\gset}
],
[
'gset too many arguments', 1,
- [qr{too many arguments}], q{SELECT 1 \gset a b}
+ [qr{too many arguments}], q{SELECT 1 \gset a b}
],
[
- 'gset after gset', 1,
+ 'gset after gset', 1,
[qr{gset must follow an SQL command}], q{SELECT 1 AS i \gset
\gset}
],
@@ -1094,7 +1094,7 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
q{DROP TABLE IF EXISTS no_such_table \gset}
],
[
- 'gset bad default name', 2,
+ 'gset bad default name', 2,
[qr{error storing into variable \?column\?}], q{SELECT 1 \gset}
],
[
@@ -1234,7 +1234,7 @@ $node->pgbench(
# Test the concurrent update in the table row and deadlocks.
$node->safe_psql('postgres',
- 'CREATE UNLOGGED TABLE first_client_table (value integer); '
+ 'CREATE UNLOGGED TABLE first_client_table (value integer); '
. 'CREATE UNLOGGED TABLE xy (x integer, y integer); '
. 'INSERT INTO xy VALUES (1, 2);');
@@ -1245,7 +1245,7 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=repeatable\\ read";
# Check that we have a serialization error and the same random value of the
# delta variable in the next try
my $err_pattern =
- "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*"
+ "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*"
. "client \\2 got an error in command 3 \\(SQL\\) of script 0; "
. "ERROR: could not serialize access due to concurrent update\\b.*"
. "\\1";
@@ -1331,7 +1331,7 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=read\\ committed";
# Check that we have a deadlock error
$err_pattern =
- "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; "
+ "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; "
. "ERROR: deadlock detected\\b";
$node->pgbench(
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index d59d2ab6da..0ec54fbb03 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -128,7 +128,7 @@ my @options = (
'invalid progress', '--progress=0',
[qr{-P/--progress must be in range}]
],
- [ 'invalid rate', '--rate=0.0', [qr{invalid rate limit}] ],
+ [ 'invalid rate', '--rate=0.0', [qr{invalid rate limit}] ],
[ 'invalid latency', '--latency-limit=0.0', [qr{invalid latency limit}] ],
[
'invalid sampling rate', '--sampling-rate=0',
@@ -144,7 +144,7 @@ my @options = (
'-b se@0 -b si@0 -b tpcb@0',
[qr{weight must not be zero}]
],
- [ 'init vs run', '-i -S', [qr{cannot be used in initialization}] ],
+ [ 'init vs run', '-i -S', [qr{cannot be used in initialization}] ],
[ 'run vs init', '-S -F 90', [qr{cannot be used in benchmarking}] ],
[ 'ambiguous builtin', '-b s', [qr{ambiguous}] ],
[
@@ -257,7 +257,7 @@ pgbench(
[qr{^$}],
[
qr{Available builtin scripts:}, qr{tpcb-like},
- qr{simple-update}, qr{select-only}
+ qr{simple-update}, qr{select-only}
],
'pgbench builtin list');
@@ -268,7 +268,7 @@ pgbench(
[qr{^$}],
[
qr{select-only: }, qr{SELECT abalance FROM pgbench_accounts WHERE},
- qr{(?!UPDATE)}, qr{(?!INSERT)}
+ qr{(?!UPDATE)}, qr{(?!INSERT)}
],
'pgbench builtin listing');
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index ab3f4e4920..511debbe81 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -4511,7 +4511,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
/* header line width in expanded mode */
else if (strcmp(param, "xheader_width") == 0)
{
- if (! value)
+ if (!value)
;
else if (pg_strcasecmp(value, "full") == 0)
popt->topt.expanded_header_width_type = PRINT_XHEADER_FULL;
@@ -5063,15 +5063,16 @@ pset_value_string(const char *param, printQueryOpt *popt)
else if (strcmp(param, "xheader_width") == 0)
{
if (popt->topt.expanded_header_width_type == PRINT_XHEADER_FULL)
- return(pstrdup("full"));
+ return pstrdup("full");
else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_COLUMN)
- return(pstrdup("column"));
+ return pstrdup("column");
else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_PAGE)
- return(pstrdup("page"));
+ return pstrdup("page");
else
{
/* must be PRINT_XHEADER_EXACT_WIDTH */
- char wbuff[32];
+ char wbuff[32];
+
snprintf(wbuff, sizeof(wbuff), "%d",
popt->topt.expanded_header_exact_width);
return pstrdup(wbuff);
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index c0e6e8e6ed..5973df2e39 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -1432,7 +1432,7 @@ ExecQueryAndProcessResults(const char *query,
INSTR_TIME_SET_ZERO(before);
if (pset.bind_flag)
- success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char * const *) pset.bind_params, NULL, NULL, 0);
+ success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char *const *) pset.bind_params, NULL, NULL, 0);
else
success = PQsendQuery(pset.db, query);
diff --git a/src/bin/psql/create_help.pl b/src/bin/psql/create_help.pl
index 1d5366db16..0809db4151 100644
--- a/src/bin/psql/create_help.pl
+++ b/src/bin/psql/create_help.pl
@@ -23,18 +23,18 @@ use strict;
use warnings;
use Getopt::Long;
-my $docdir = '';
-my $outdir = '.';
-my $depfile = '';
+my $docdir = '';
+my $outdir = '.';
+my $depfile = '';
my $hfilebasename = '';
GetOptions(
- 'docdir=s' => \$docdir,
- 'outdir=s' => \$outdir,
+ 'docdir=s' => \$docdir,
+ 'outdir=s' => \$outdir,
'basename=s' => \$hfilebasename,
- 'depfile=s' => \$depfile,) or die "$0: wrong arguments";
+ 'depfile=s' => \$depfile,) or die "$0: wrong arguments";
-$docdir or die "$0: missing required argument: docdir\n";
+$docdir or die "$0: missing required argument: docdir\n";
$hfilebasename or die "$0: missing required argument: basename\n";
my $hfile = $hfilebasename . '.h';
@@ -163,11 +163,11 @@ foreach my $file (sort readdir $dh)
foreach my $cmdname (@cmdnames)
{
$entries{$cmdname} = {
- cmdid => $cmdid,
- cmddesc => $cmddesc,
+ cmdid => $cmdid,
+ cmddesc => $cmddesc,
cmdsynopsis => $cmdsynopsis,
- params => \@params,
- nl_count => $nl_count
+ params => \@params,
+ nl_count => $nl_count
};
$maxlen =
($maxlen >= length $cmdname) ? $maxlen : length $cmdname;
@@ -182,7 +182,7 @@ foreach my $file (sort readdir $dh)
foreach (sort keys %entries)
{
my $prefix = "\t" x 5 . ' ';
- my $id = $_;
+ my $id = $_;
$id =~ s/ /_/g;
my $synopsis = "\"$entries{$_}{cmdsynopsis}\"";
$synopsis =~ s/\\n/\\n"\n$prefix"/g;
diff --git a/src/bin/psql/crosstabview.c b/src/bin/psql/crosstabview.c
index 67fcdb49dd..e1ad0e61d9 100644
--- a/src/bin/psql/crosstabview.c
+++ b/src/bin/psql/crosstabview.c
@@ -532,7 +532,7 @@ avlInsertNode(avl_tree *tree, avl_node **node, pivot_field field)
if (current == tree->end)
{
avl_node *new_node = (avl_node *)
- pg_malloc(sizeof(avl_node));
+ pg_malloc(sizeof(avl_node));
new_node->height = 1;
new_node->field = field;
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index ab4279ed58..9325a46b8f 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -1160,8 +1160,8 @@ permissionsList(const char *pattern, bool showSystem)
return true;
error_return:
- termPQExpBuffer(&buf);
- return false;
+ termPQExpBuffer(&buf);
+ return false;
}
diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h
index 73d4b393bc..1106954236 100644
--- a/src/bin/psql/settings.h
+++ b/src/bin/psql/settings.h
@@ -96,7 +96,8 @@ typedef struct _psqlSettings
char *gset_prefix; /* one-shot prefix argument for \gset */
bool gdesc_flag; /* one-shot request to describe query result */
bool gexec_flag; /* one-shot request to execute query result */
- bool bind_flag; /* one-shot request to use extended query protocol */
+ bool bind_flag; /* one-shot request to use extended query
+ * protocol */
int bind_nparams; /* number of parameters */
char **bind_params; /* parameters for extended query protocol call */
bool crosstab_flag; /* one-shot request to crosstab result */
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index 596746de17..9ac27db212 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -22,7 +22,7 @@ sub psql_like
my ($ret, $stdout, $stderr) = $node->psql('postgres', $sql);
- is($ret, 0, "$test_name: exit code 0");
+ is($ret, 0, "$test_name: exit code 0");
is($stderr, '', "$test_name: no stderr");
like($stdout, $expected_stdout, "$test_name: matches");
@@ -69,9 +69,9 @@ max_wal_senders = 4
});
$node->start;
-psql_like($node, '\copyright', qr/Copyright/, '\copyright');
-psql_like($node, '\help', qr/ALTER/, '\help without arguments');
-psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument');
+psql_like($node, '\copyright', qr/Copyright/, '\copyright');
+psql_like($node, '\help', qr/ALTER/, '\help without arguments');
+psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument');
# Test clean handling of unsupported replication command responses
psql_fails_like(
@@ -132,7 +132,7 @@ NOTIFY foo, 'bar';",
# test behavior and output on server crash
my ($ret, $out, $err) = $node->psql('postgres',
- "SELECT 'before' AS running;\n"
+ "SELECT 'before' AS running;\n"
. "SELECT pg_terminate_backend(pg_backend_pid());\n"
. "SELECT 'AFTER' AS not_running;\n");
@@ -216,9 +216,9 @@ $node->safe_psql('postgres', "CREATE TABLE tab_psql_single (a int);");
# Tests with ON_ERROR_STOP.
$node->command_ok(
[
- 'psql', '-X',
- '--single-transaction', '-v',
- 'ON_ERROR_STOP=1', '-c',
+ 'psql', '-X',
+ '--single-transaction', '-v',
+ 'ON_ERROR_STOP=1', '-c',
'INSERT INTO tab_psql_single VALUES (1)', '-c',
'INSERT INTO tab_psql_single VALUES (2)'
],
@@ -231,9 +231,9 @@ is($row_count, '2',
$node->command_fails(
[
- 'psql', '-X',
- '--single-transaction', '-v',
- 'ON_ERROR_STOP=1', '-c',
+ 'psql', '-X',
+ '--single-transaction', '-v',
+ 'ON_ERROR_STOP=1', '-c',
'INSERT INTO tab_psql_single VALUES (3)', '-c',
"\\copy tab_psql_single FROM '$tempdir/nonexistent'"
],
@@ -245,15 +245,15 @@ is($row_count, '2',
);
# Tests mixing files and commands.
-my $copy_sql_file = "$tempdir/tab_copy.sql";
+my $copy_sql_file = "$tempdir/tab_copy.sql";
my $insert_sql_file = "$tempdir/tab_insert.sql";
append_to_file($copy_sql_file,
"\\copy tab_psql_single FROM '$tempdir/nonexistent';");
append_to_file($insert_sql_file, 'INSERT INTO tab_psql_single VALUES (4);');
$node->command_ok(
[
- 'psql', '-X', '--single-transaction', '-v',
- 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f',
+ 'psql', '-X', '--single-transaction', '-v',
+ 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f',
$insert_sql_file
],
'ON_ERROR_STOP, --single-transaction and multiple -f switches');
@@ -265,8 +265,8 @@ is($row_count, '4',
$node->command_fails(
[
- 'psql', '-X', '--single-transaction', '-v',
- 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f',
+ 'psql', '-X', '--single-transaction', '-v',
+ 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f',
$copy_sql_file
],
'ON_ERROR_STOP, --single-transaction and multiple -f switches, error');
@@ -281,10 +281,10 @@ is($row_count, '4',
# transaction commits.
$node->command_fails(
[
- 'psql', '-X',
+ 'psql', '-X',
'--single-transaction', '-f',
- $insert_sql_file, '-f',
- $insert_sql_file, '-c',
+ $insert_sql_file, '-f',
+ $insert_sql_file, '-c',
"\\copy tab_psql_single FROM '$tempdir/nonexistent'"
],
'no ON_ERROR_STOP, --single-transaction and multiple -f/-c switches');
@@ -298,8 +298,8 @@ is($row_count, '6',
# returns a success and the transaction commits.
$node->command_ok(
[
- 'psql', '-X', '--single-transaction', '-f',
- $insert_sql_file, '-f', $insert_sql_file, '-f',
+ 'psql', '-X', '--single-transaction', '-f',
+ $insert_sql_file, '-f', $insert_sql_file, '-f',
$copy_sql_file
],
'no ON_ERROR_STOP, --single-transaction and multiple -f switches');
@@ -313,10 +313,10 @@ is($row_count, '8',
# the transaction commit even if there is a failure in-between.
$node->command_ok(
[
- 'psql', '-X',
- '--single-transaction', '-c',
+ 'psql', '-X',
+ '--single-transaction', '-c',
'INSERT INTO tab_psql_single VALUES (5)', '-f',
- $copy_sql_file, '-c',
+ $copy_sql_file, '-c',
'INSERT INTO tab_psql_single VALUES (6)'
],
'no ON_ERROR_STOP, --single-transaction and multiple -c switches');
@@ -348,16 +348,12 @@ psql_like(
qr/1\|value\|2022-07-04 00:00:00
2|test|2022-07-03 00:00:00
3|test|2022-07-05 00:00:00/,
- '\copy from w