summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Haas2024-01-02 16:56:02 +0000
committerRobert Haas2024-01-02 17:05:41 +0000
commit0d9937d1185629a94bf43dd0768c8f4eb835c9f0 (patch)
tree3a4b5db3c8f3430daf7eda0f55672f7fbda6fcf3
parent5c430f9dc559ecd3bda8bebf4854f3f8d8bd86f3 (diff)
Fix typos in comments and in one isolation test.
Dagfinn Ilmari MannsÃ¥ker, reviewed by Shubham Khanna. Some subtractions by me. Discussion: http://postgr.es/m/[email protected]
-rw-r--r--contrib/bloom/bloom.h2
-rw-r--r--contrib/pgcrypto/expected/pgp-compression.out2
-rw-r--r--contrib/pgcrypto/openssl.c2
-rw-r--r--contrib/pgcrypto/sql/pgp-compression.sql2
-rw-r--r--contrib/postgres_fdw/expected/postgres_fdw.out2
-rw-r--r--contrib/postgres_fdw/sql/postgres_fdw.sql2
-rw-r--r--src/backend/access/brin/brin.c6
-rw-r--r--src/backend/access/common/heaptuple.c2
-rw-r--r--src/backend/access/nbtree/nbtree.c2
-rw-r--r--src/backend/catalog/namespace.c2
-rw-r--r--src/backend/catalog/pg_constraint.c2
-rw-r--r--src/backend/commands/event_trigger.c2
-rw-r--r--src/backend/executor/execMain.c2
-rw-r--r--src/backend/optimizer/plan/initsplan.c4
-rw-r--r--src/backend/utils/adt/rangetypes.c2
-rw-r--r--src/backend/utils/sort/tuplesortvariants.c10
-rw-r--r--src/backend/utils/time/combocid.c2
-rw-r--r--src/bin/pg_rewind/t/001_basic.pl2
-rw-r--r--src/bin/pg_rewind/t/004_pg_xlog_symlink.pl2
-rw-r--r--src/bin/pg_rewind/t/007_standby_source.pl2
-rw-r--r--src/bin/pg_rewind/t/009_growing_files.pl2
-rw-r--r--src/include/pg_config_manual.h2
-rw-r--r--src/test/isolation/specs/stats.spec16
-rw-r--r--src/test/regress/expected/boolean.out2
-rw-r--r--src/test/regress/expected/brin_multi.out4
-rw-r--r--src/test/regress/expected/join.out2
-rw-r--r--src/test/regress/sql/boolean.sql2
-rw-r--r--src/test/regress/sql/brin_multi.sql4
-rw-r--r--src/test/regress/sql/join.sql2
29 files changed, 45 insertions, 45 deletions
diff --git a/contrib/bloom/bloom.h b/contrib/bloom/bloom.h
index 330811ec608..7c4407b9ece 100644
--- a/contrib/bloom/bloom.h
+++ b/contrib/bloom/bloom.h
@@ -127,7 +127,7 @@ typedef struct BloomMetaPageData
FreeBlockNumberArray notFullPage;
} BloomMetaPageData;
-/* Magic number to distinguish bloom pages among anothers */
+/* Magic number to distinguish bloom pages from others */
#define BLOOM_MAGICK_NUMBER (0xDBAC0DED)
/* Number of blocks numbers fit in BloomMetaPageData */
diff --git a/contrib/pgcrypto/expected/pgp-compression.out b/contrib/pgcrypto/expected/pgp-compression.out
index d4c57feba30..67e2dce897a 100644
--- a/contrib/pgcrypto/expected/pgp-compression.out
+++ b/contrib/pgcrypto/expected/pgp-compression.out
@@ -60,7 +60,7 @@ WITH random_string AS
-- This generates a random string of 16366 bytes. This is chosen
-- as random so that it does not get compressed, and the decompression
-- would work on a string with the same length as the origin, making the
- -- test behavior more predictible. lpad() ensures that the generated
+ -- test behavior more predictable. lpad() ensures that the generated
-- hexadecimal value is completed by extra zero characters if random()
-- has generated a value strictly lower than 16.
SELECT string_agg(decode(lpad(to_hex((random()*256)::int), 2, '0'), 'hex'), '') as bytes
diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c
index 4a913bd04f7..8259de5e393 100644
--- a/contrib/pgcrypto/openssl.c
+++ b/contrib/pgcrypto/openssl.c
@@ -460,7 +460,7 @@ bf_init(PX_Cipher *c, const uint8 *key, unsigned klen, const uint8 *iv)
/*
* Test if key len is supported. BF_set_key silently cut large keys and it
- * could be a problem when user transfer crypted data from one server to
+ * could be a problem when user transfer encrypted data from one server to
* another.
*/
diff --git a/contrib/pgcrypto/sql/pgp-compression.sql b/contrib/pgcrypto/sql/pgp-compression.sql
index 87c59c6cabc..82080e4389c 100644
--- a/contrib/pgcrypto/sql/pgp-compression.sql
+++ b/contrib/pgcrypto/sql/pgp-compression.sql
@@ -36,7 +36,7 @@ WITH random_string AS
-- This generates a random string of 16366 bytes. This is chosen
-- as random so that it does not get compressed, and the decompression
-- would work on a string with the same length as the origin, making the
- -- test behavior more predictible. lpad() ensures that the generated
+ -- test behavior more predictable. lpad() ensures that the generated
-- hexadecimal value is completed by extra zero characters if random()
-- has generated a value strictly lower than 16.
SELECT string_agg(decode(lpad(to_hex((random()*256)::int), 2, '0'), 'hex'), '') as bytes
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index c988745b926..d83f6ae8cbc 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -4819,7 +4819,7 @@ SELECT * FROM ft2 ftupper WHERE
925 | 5 | 00925 | Mon Jan 26 00:00:00 1970 PST | Mon Jan 26 00:00:00 1970 | 5 | 5 | foo
(10 rows)
--- EXISTS should be propogated to the highest upper inner join
+-- EXISTS should be propagated to the highest upper inner join
EXPLAIN (verbose, costs off)
SELECT ft2.*, ft4.* FROM ft2 INNER JOIN
(SELECT * FROM ft4 WHERE EXISTS (
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index cb405407028..90c8fa4b705 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -1399,7 +1399,7 @@ SELECT * FROM ft2 ftupper WHERE
AND ftupper.c1 > 900
ORDER BY ftupper.c1 LIMIT 10;
--- EXISTS should be propogated to the highest upper inner join
+-- EXISTS should be propagated to the highest upper inner join
EXPLAIN (verbose, costs off)
SELECT ft2.*, ft4.* FROM ft2 INNER JOIN
(SELECT * FROM ft4 WHERE EXISTS (
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index dfa34f49a4f..6f1f5518977 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -348,7 +348,7 @@ brininsert(Relation idxRel, Datum *values, bool *nulls,
bool autosummarize = BrinGetAutoSummarize(idxRel);
/*
- * If firt time through in this statement, initialize the insert state
+ * If first time through in this statement, initialize the insert state
* that we keep for all the inserts in the command.
*/
if (!bistate)
@@ -1042,7 +1042,7 @@ brinbuildCallbackParallel(Relation index,
/*
* If we're in a block that belongs to a different range, summarize what
* we've got and start afresh. Note the scan might have skipped many
- * pages, if they were devoid of live tuples; we do not create emptry BRIN
+ * pages, if they were devoid of live tuples; we do not create empty BRIN
* ranges here - the leader is responsible for filling them in.
*
* Unlike serial builds, parallel index builds allow synchronized seqscans
@@ -2149,7 +2149,7 @@ union_tuples(BrinDesc *bdesc, BrinMemTuple *a, BrinTuple *b)
* brin_vacuum_scan
* Do a complete scan of the index during VACUUM.
*
- * This routine scans the complete index looking for uncatalogued index pages,
+ * This routine scans the complete index looking for uncataloged index pages,
* i.e. those that might have been lost due to a crash after index extension
* and such.
*/
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index c52d40dce0f..88fb9e34453 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -85,7 +85,7 @@
((att)->attstorage != TYPSTORAGE_PLAIN)
/*
- * Setup for cacheing pass-by-ref missing attributes in a way that survives
+ * Setup for caching pass-by-ref missing attributes in a way that survives
* tupleDesc destruction.
*/
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index dd6dc0971bc..c95b52eac49 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -158,7 +158,7 @@ btbuildempty(Relation index)
Page metapage;
/*
- * Initalize the metapage.
+ * Initialize the metapage.
*
* Regular index build bypasses the buffer manager and uses smgr functions
* directly, with an smgrimmedsync() call at the end. That makes sense
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 37a69e9023f..3f777693ae9 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -4218,7 +4218,7 @@ cachedNamespacePath(const char *searchPath, Oid roleid)
entry = spcache_insert(searchPath, roleid);
/*
- * An OOM may have resulted in a cache entry with mising 'oidlist' or
+ * An OOM may have resulted in a cache entry with missing 'oidlist' or
* 'finalPath', so just compute whatever is missing.
*/
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index e9d4d6006ef..b0730c99af1 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -1290,7 +1290,7 @@ get_relation_constraint_attnos(Oid relid, const char *conname,
/*
* Return the OID of the constraint enforced by the given index in the
- * given relation; or InvalidOid if no such index is catalogued.
+ * given relation; or InvalidOid if no such index is cataloged.
*
* Much like get_constraint_index, this function is concerned only with the
* one constraint that "owns" the given index. Therefore, constraints of
diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c
index bf47b0f6e26..35d5508f4a7 100644
--- a/src/backend/commands/event_trigger.c
+++ b/src/backend/commands/event_trigger.c
@@ -387,7 +387,7 @@ SetDatatabaseHasLoginEventTriggers(void)
HeapTuple tuple;
/*
- * Use shared lock to prevent a conflit with EventTriggerOnLogin() trying
+ * Use shared lock to prevent a conflict with EventTriggerOnLogin() trying
* to reset pg_database.dathasloginevt flag. Note, this lock doesn't
* effectively blocks database or other objection. It's just custom lock
* tag used to prevent multiple backends changing
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 4c5a7bbf620..9539377139b 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1849,7 +1849,7 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
econtext->ecxt_scantuple = slot;
/*
- * As in case of the catalogued constraints, we treat a NULL result as
+ * As in case of the cataloged constraints, we treat a NULL result as
* success here, not a failure.
*/
success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 8295e7753db..b0f9e21474f 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -1928,8 +1928,8 @@ deconstruct_distribute_oj_quals(PlannerInfo *root,
* jtitems list to be ordered that way.
*
* We first strip out all the nullingrels bits corresponding to
- * commutating joins below this one, and then successively put them
- * back as we crawl up the join stack.
+ * commuting joins below this one, and then successively put them back
+ * as we crawl up the join stack.
*/
quals = jtitem->oj_joinclauses;
if (!bms_is_empty(joins_below))
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 24bad529239..d3fc88ec2db 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -2608,7 +2608,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, const RangeType *r, Datum
* values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as
- * heaptuple.c's ATT_IS_PACKABLE macro. See the comments thare for more
+ * heaptuple.c's ATT_IS_PACKABLE macro. See the comments there for more
* details.
*/
diff --git a/src/backend/utils/sort/tuplesortvariants.c b/src/backend/utils/sort/tuplesortvariants.c
index 27425880a5f..1aa2a3bb5bd 100644
--- a/src/backend/utils/sort/tuplesortvariants.c
+++ b/src/backend/utils/sort/tuplesortvariants.c
@@ -93,7 +93,7 @@ static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
static void freestate_cluster(Tuplesortstate *state);
/*
- * Data struture pointed by "TuplesortPublic.arg" for the CLUSTER case. Set by
+ * Data structure pointed by "TuplesortPublic.arg" for the CLUSTER case. Set by
* the tuplesort_begin_cluster.
*/
typedef struct
@@ -105,7 +105,7 @@ typedef struct
} TuplesortClusterArg;
/*
- * Data struture pointed by "TuplesortPublic.arg" for the IndexTuple case.
+ * Data structure pointed by "TuplesortPublic.arg" for the IndexTuple case.
* Set by tuplesort_begin_index_xxx and used only by the IndexTuple routines.
*/
typedef struct
@@ -115,7 +115,7 @@ typedef struct
} TuplesortIndexArg;
/*
- * Data struture pointed by "TuplesortPublic.arg" for the index_btree subcase.
+ * Data structure pointed by "TuplesortPublic.arg" for the index_btree subcase.
*/
typedef struct
{
@@ -126,7 +126,7 @@ typedef struct
} TuplesortIndexBTreeArg;
/*
- * Data struture pointed by "TuplesortPublic.arg" for the index_hash subcase.
+ * Data structure pointed by "TuplesortPublic.arg" for the index_hash subcase.
*/
typedef struct
{
@@ -138,7 +138,7 @@ typedef struct
} TuplesortIndexHashArg;
/*
- * Data struture pointed by "TuplesortPublic.arg" for the Datum case.
+ * Data structure pointed by "TuplesortPublic.arg" for the Datum case.
* Set by tuplesort_begin_datum and used only by the DatumTuple routines.
*/
typedef struct
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index 0e94bc93f74..192d9c1efc3 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -4,7 +4,7 @@
* Combo command ID support routines
*
* Before version 8.3, HeapTupleHeaderData had separate fields for cmin
- * and cmax. To reduce the header size, cmin and cmax are now overlayed
+ * and cmax. To reduce the header size, cmin and cmax are now overlaid
* in the same field in the header. That usually works because you rarely
* insert and delete a tuple in the same transaction, and we don't need
* either field to remain valid after the originating transaction exits.
diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl
index 842f6c7fbe4..54cd00ca04d 100644
--- a/src/bin/pg_rewind/t/001_basic.pl
+++ b/src/bin/pg_rewind/t/001_basic.pl
@@ -60,7 +60,7 @@ sub run_test
# Insert a row in the old primary. This causes the primary and standby
# to have "diverged", it's no longer possible to just apply the
- # standy's logs over primary directory - you need to rewind.
+ # standby's logs over primary directory - you need to rewind.
primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')");
# Also insert a new row in the standby, which won't be present in the
diff --git a/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl b/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
index 7d1bb65cae2..ad085d41ad0 100644
--- a/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
+++ b/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
@@ -52,7 +52,7 @@ sub run_test
# Insert a row in the old primary. This causes the primary and standby
# to have "diverged", it's no longer possible to just apply the
- # standy's logs over primary directory - you need to rewind.
+ # standby's logs over primary directory - you need to rewind.
primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')");
# Also insert a new row in the standby, which won't be present in the
diff --git a/src/bin/pg_rewind/t/007_standby_source.pl b/src/bin/pg_rewind/t/007_standby_source.pl
index fab84a4bbb0..47e88571984 100644
--- a/src/bin/pg_rewind/t/007_standby_source.pl
+++ b/src/bin/pg_rewind/t/007_standby_source.pl
@@ -86,7 +86,7 @@ $node_c->promote;
# Insert a row in A. This causes A/B and C to have "diverged", so that it's
-# no longer possible to just apply the standy's logs over primary directory
+# no longer possible to just apply the standby's logs over primary directory
# - you need to rewind.
$node_a->safe_psql('postgres',
"INSERT INTO tbl1 VALUES ('in A, after C was promoted')");
diff --git a/src/bin/pg_rewind/t/009_growing_files.pl b/src/bin/pg_rewind/t/009_growing_files.pl
index 016f7736e77..c456a387b2f 100644
--- a/src/bin/pg_rewind/t/009_growing_files.pl
+++ b/src/bin/pg_rewind/t/009_growing_files.pl
@@ -28,7 +28,7 @@ primary_psql('CHECKPOINT');
RewindTest::promote_standby();
# Insert a row in the old primary. This causes the primary and standby to have
-# "diverged", it's no longer possible to just apply the standy's logs over
+# "diverged", it's no longer possible to just apply the standby's logs over
# primary directory - you need to rewind. Also insert a new row in the
# standby, which won't be present in the old primary.
primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')");
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index 16c383ba7f7..fd537329660 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -337,7 +337,7 @@
/*
* Define this to force Bitmapset reallocation on each modification. Helps
- * to find hangling pointers to Bitmapset's.
+ * to find dangling pointers to Bitmapset's.
*/
/* #define REALLOCATE_BITMAPSETS */
diff --git a/src/test/isolation/specs/stats.spec b/src/test/isolation/specs/stats.spec
index 5b922d788cc..a7daf2a49aa 100644
--- a/src/test/isolation/specs/stats.spec
+++ b/src/test/isolation/specs/stats.spec
@@ -543,10 +543,10 @@ permutation
s1_table_insert
s1_begin
s1_table_update_k1 # should *not* be counted, different rel
- s1_table_update_k1 # dito
+ s1_table_update_k1 # ditto
s1_table_truncate
s1_table_insert_k1 # should be counted
- s1_table_update_k1 # dito
+ s1_table_update_k1 # ditto
s1_prepare_a
s1_commit_prepared_a
s1_ff
@@ -557,10 +557,10 @@ permutation
s1_table_insert
s1_begin
s1_table_update_k1 # should *not* be counted, different rel
- s1_table_update_k1 # dito
+ s1_table_update_k1 # ditto
s1_table_truncate
s1_table_insert_k1 # should be counted
- s1_table_update_k1 # dito
+ s1_table_update_k1 # ditto
s1_prepare_a
s1_ff # flush out non-transactional stats, might happen anyway
s2_commit_prepared_a
@@ -572,10 +572,10 @@ permutation
s1_table_insert
s1_begin
s1_table_update_k1 # should be counted
- s1_table_update_k1 # dito
+ s1_table_update_k1 # ditto
s1_table_truncate
s1_table_insert_k1 # should *not* be counted, different rel
- s1_table_update_k1 # dito
+ s1_table_update_k1 # ditto
s1_prepare_a
s1_rollback_prepared_a
s1_ff
@@ -586,10 +586,10 @@ permutation
s1_table_insert
s1_begin
s1_table_update_k1 # should be counted
- s1_table_update_k1 # dito
+ s1_table_update_k1 # ditto
s1_table_truncate
s1_table_insert_k1 # should *not* be counted, different rel
- s1_table_update_k1 # dito
+ s1_table_update_k1 # ditto
s1_prepare_a
s2_rollback_prepared_a
s1_ff s2_ff
diff --git a/src/test/regress/expected/boolean.out b/src/test/regress/expected/boolean.out
index ee9c244bf8e..57d251eea76 100644
--- a/src/test/regress/expected/boolean.out
+++ b/src/test/regress/expected/boolean.out
@@ -486,7 +486,7 @@ FROM booltbl3 ORDER BY o;
-- Test to make sure short-circuiting and NULL handling is
-- correct. Use a table as source to prevent constant simplification
--- to interfer.
+-- from interfering.
CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool);
INSERT INTO booltbl4 VALUES (false, true, null);
\pset null '(null)'
diff --git a/src/test/regress/expected/brin_multi.out b/src/test/regress/expected/brin_multi.out
index 7df42865daa..ae9ce9d8ecf 100644
--- a/src/test/regress/expected/brin_multi.out
+++ b/src/test/regress/expected/brin_multi.out
@@ -826,11 +826,11 @@ RESET enable_seqscan;
-- test overflows during CREATE INDEX with extreme timestamp values
CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ);
SET datestyle TO iso;
--- values close to timetamp minimum
+-- values close to timestamp minimum
INSERT INTO brin_timestamp_test
SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval
FROM generate_series(1,30) s(i);
--- values close to timetamp maximum
+-- values close to timestamp maximum
INSERT INTO brin_timestamp_test
SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval
FROM generate_series(1,30) s(i);
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index 7c6f3cb15c9..ffc60eb21d0 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -6957,7 +6957,7 @@ WHERE q0.a = 1;
(7 rows)
--
----- Only one side is unqiue
+---- Only one side is unique
--select * from sl t1, sl t2 where t1.a = t2.a and t1.b = 1;
--select * from sl t1, sl t2 where t1.a = t2.a and t2.b = 1;
--
diff --git a/src/test/regress/sql/boolean.sql b/src/test/regress/sql/boolean.sql
index bc9937d6920..5b9dcd23172 100644
--- a/src/test/regress/sql/boolean.sql
+++ b/src/test/regress/sql/boolean.sql
@@ -227,7 +227,7 @@ FROM booltbl3 ORDER BY o;
-- Test to make sure short-circuiting and NULL handling is
-- correct. Use a table as source to prevent constant simplification
--- to interfer.
+-- from interfering.
CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool);
INSERT INTO booltbl4 VALUES (false, true, null);
\pset null '(null)'
diff --git a/src/test/regress/sql/brin_multi.sql b/src/test/regress/sql/brin_multi.sql
index c5a84845841..55349b4e1fd 100644
--- a/src/test/regress/sql/brin_multi.sql
+++ b/src/test/regress/sql/brin_multi.sql
@@ -592,12 +592,12 @@ CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ);
SET datestyle TO iso;
--- values close to timetamp minimum
+-- values close to timestamp minimum
INSERT INTO brin_timestamp_test
SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval
FROM generate_series(1,30) s(i);
--- values close to timetamp maximum
+-- values close to timestamp maximum
INSERT INTO brin_timestamp_test
SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval
FROM generate_series(1,30) s(i);
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
index 6d368562646..f51f39eee79 100644
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -2656,7 +2656,7 @@ SELECT * FROM
WHERE q0.a = 1;
--
----- Only one side is unqiue
+---- Only one side is unique
--select * from sl t1, sl t2 where t1.a = t2.a and t1.b = 1;
--select * from sl t1, sl t2 where t1.a = t2.a and t2.b = 1;
--