diff options
author | Bruce Momjian | 2014-05-06 15:26:26 +0000 |
---|---|---|
committer | Bruce Momjian | 2014-05-06 15:26:26 +0000 |
commit | 2616a5d300e5bb5a2838d2a065afa3740e08727f (patch) | |
tree | 5939408c63409abda810217fe812749a5da7345b | |
parent | e0070a6858cfcd2c4129dfa93bc042d6d86732c8 (diff) |
Remove tabs after spaces in C comments
This was not changed in HEAD, but will be done later as part of a
pgindent run. Future pgindent runs will also do this.
Report by Tom Lane
Backpatch through all supported branches, but not HEAD
623 files changed, 3206 insertions, 3206 deletions
diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c index 137a5fcd7f4..de77bfec8aa 100644 --- a/contrib/btree_gist/btree_interval.c +++ b/contrib/btree_gist/btree_interval.c @@ -93,7 +93,7 @@ gbt_intv_dist(const void *a, const void *b) /* * INTERVALSIZE should be the actual size-on-disk of an Interval, as shown - * in pg_type. This might be less than sizeof(Interval) if the compiler + * in pg_type. This might be less than sizeof(Interval) if the compiler * insists on adding alignment padding at the end of the struct. */ #define INTERVALSIZE 16 diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c index 2d3f5bc8e33..979186b805a 100644 --- a/contrib/cube/cube.c +++ b/contrib/cube/cube.c @@ -564,7 +564,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS) rt_cube_size(datum_r, &size_r); /* - * Now split up the regions between the two seeds. An important property + * Now split up the regions between the two seeds. An important property * of this split algorithm is that the split vector v has the indices of * items to be split in order in its left and right vectors. We exploit * this property by doing a merge in the code that actually splits the @@ -580,7 +580,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS) { /* * If we've already decided where to place this item, just put it on - * the right list. Otherwise, we need to figure out which page needs + * the right list. Otherwise, we need to figure out which page needs * the least enlargement in order to store the item. */ diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 98316a93185..521dfc768c5 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -1987,7 +1987,7 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk * Build sql statement to look up tuple of interest, ie, the one matching * src_pkattvals. We used to use "SELECT *" here, but it's simpler to * generate a result tuple that matches the table's physical structure, - * with NULLs for any dropped columns. Otherwise we have to deal with two + * with NULLs for any dropped columns. Otherwise we have to deal with two * different tupdescs and everything's very confusing. */ appendStringInfoString(&buf, "SELECT "); @@ -2213,7 +2213,7 @@ dblink_security_check(PGconn *conn, remoteConn *rconn) } /* - * For non-superusers, insist that the connstr specify a password. This + * For non-superusers, insist that the connstr specify a password. This * prevents a password from being picked up from .pgpass, a service file, * the environment, etc. We don't want the postgres user's passwords * to be accessible to non-superusers. diff --git a/contrib/earthdistance/earthdistance.c b/contrib/earthdistance/earthdistance.c index 2f344a70116..28130312c8c 100644 --- a/contrib/earthdistance/earthdistance.c +++ b/contrib/earthdistance/earthdistance.c @@ -91,7 +91,7 @@ geo_distance_internal(Point *pt1, Point *pt2) * distance between the points in miles on earth's surface * * If float8 is passed-by-value, the oldstyle version-0 calling convention - * is unportable, so we use version-1. However, if it's passed-by-reference, + * is unportable, so we use version-1. However, if it's passed-by-reference, * continue to use oldstyle. This is just because we'd like earthdistance * to serve as a canary for any unintentional breakage of version-0 functions * with float8 results. diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index 4eb87e651bc..cb210e507c6 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -518,7 +518,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel, /* * Now estimate the number of rows returned by the scan after applying the - * baserestrictinfo quals. This is pretty bogus too, since the planner + * baserestrictinfo quals. This is pretty bogus too, since the planner * will have no stats about the relation, but it's better than nothing. */ nrows = ntuples * @@ -534,7 +534,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel, baserel->rows = nrows; /* - * Now estimate costs. We estimate costs almost the same way as + * Now estimate costs. We estimate costs almost the same way as * cost_seqscan(), thus assuming that I/O costs are equivalent to a * regular table file of the same size. However, we take per-tuple CPU * costs as 10x of a seqscan, to account for the cost of parsing records. diff --git a/contrib/fuzzystrmatch/levenshtein.c b/contrib/fuzzystrmatch/levenshtein.c index a84c46a4a40..72ad1ddf790 100644 --- a/contrib/fuzzystrmatch/levenshtein.c +++ b/contrib/fuzzystrmatch/levenshtein.c @@ -50,7 +50,7 @@ static int levenshtein_internal(text *s, text *t, * array. * * If max_d >= 0, we only need to provide an accurate answer when that answer - * is less than or equal to the bound. From any cell in the matrix, there is + * is less than or equal to the bound. From any cell in the matrix, there is * theoretical "minimum residual distance" from that cell to the last column * of the final row. This minimum residual distance is zero when the * untransformed portions of the strings are of equal length (because we might @@ -141,7 +141,7 @@ levenshtein_internal(text *s, text *t, stop_column = m + 1; /* - * If max_d >= 0, determine whether the bound is impossibly tight. If so, + * If max_d >= 0, determine whether the bound is impossibly tight. If so, * return max_d + 1 immediately. Otherwise, determine whether it's tight * enough to limit the computation we must perform. If so, figure out * initial stop column. @@ -168,7 +168,7 @@ levenshtein_internal(text *s, text *t, * need to fill in. If the string is growing, the theoretical * minimum distance already incorporates the cost of deleting the * number of characters necessary to make the two strings equal in - * length. Each additional deletion forces another insertion, so + * length. Each additional deletion forces another insertion, so * the best-case total cost increases by ins_c + del_c. If the * string is shrinking, the minimum theoretical cost assumes no * excess deletions; that is, we're starting no futher right than @@ -246,7 +246,7 @@ levenshtein_internal(text *s, text *t, /* * The main loop fills in curr, but curr[0] needs a special case: to * transform the first 0 characters of s into the first j characters - * of t, we must perform j insertions. However, if start_column > 0, + * of t, we must perform j insertions. However, if start_column > 0, * this special case does not apply. */ if (start_column == 0) diff --git a/contrib/hstore/hstore.h b/contrib/hstore/hstore.h index 4e55f6e5e28..df5a75377d8 100644 --- a/contrib/hstore/hstore.h +++ b/contrib/hstore/hstore.h @@ -12,7 +12,7 @@ * HEntry: there is one of these for each key _and_ value in an hstore * * the position offset points to the _end_ so that we can get the length - * by subtraction from the previous entry. the ISFIRST flag lets us tell + * by subtraction from the previous entry. the ISFIRST flag lets us tell * whether there is a previous entry. */ typedef struct diff --git a/contrib/hstore/hstore_gin.c b/contrib/hstore/hstore_gin.c index 2007801cf0c..d55674c79f9 100644 --- a/contrib/hstore/hstore_gin.c +++ b/contrib/hstore/hstore_gin.c @@ -13,7 +13,7 @@ /* * When using a GIN index for hstore, we choose to index both keys and values. * The storage format is "text" values, with K, V, or N prepended to the string - * to indicate key, value, or null values. (As of 9.1 it might be better to + * to indicate key, value, or null values. (As of 9.1 it might be better to * store null values as nulls, but we'll keep it this way for on-disk * compatibility.) */ @@ -168,7 +168,7 @@ gin_consistent_hstore(PG_FUNCTION_ARGS) { /* * Index doesn't have information about correspondence of keys and - * values, so we need recheck. However, if not all the keys are + * values, so we need recheck. However, if not all the keys are * present, we can fail at once. */ *recheck = true; diff --git a/contrib/intarray/_int_bool.c b/contrib/intarray/_int_bool.c index 62294d1caba..43cc0d42d5c 100644 --- a/contrib/intarray/_int_bool.c +++ b/contrib/intarray/_int_bool.c @@ -355,7 +355,7 @@ gin_bool_consistent(QUERYTYPE *query, bool *check) return FALSE; /* - * Set up data for checkcondition_gin. This must agree with the query + * Set up data for checkcondition_gin. This must agree with the query * extraction code in ginint4_queryextract. */ gcv.first = items; diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c index 0a173bfcb66..fe7e9b12d6b 100644 --- a/contrib/intarray/_int_gist.c +++ b/contrib/intarray/_int_gist.c @@ -482,7 +482,7 @@ g_int_picksplit(PG_FUNCTION_ARGS) qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost); /* - * Now split up the regions between the two seeds. An important property + * Now split up the regions between the two seeds. An important property * of this split algorithm is that the split vector v has the indices of * items to be split in order in its left and right vectors. We exploit * this property by doing a merge in the code that actually splits the @@ -500,7 +500,7 @@ g_int_picksplit(PG_FUNCTION_ARGS) /* * If we've already decided where to place this item, just put it on - * the right list. Otherwise, we need to figure out which page needs + * the right list. Otherwise, we need to figure out which page needs * the least enlargement in order to store the item. */ diff --git a/contrib/intarray/_int_tool.c b/contrib/intarray/_int_tool.c index 94df98608de..9a33765a912 100644 --- a/contrib/intarray/_int_tool.c +++ b/contrib/intarray/_int_tool.c @@ -184,7 +184,7 @@ rt__int_size(ArrayType *a, float *size) *size = (float) ARRNELEMS(a); } -/* Sort the given data (len >= 2). Return true if any duplicates found */ +/* Sort the given data (len >= 2). Return true if any duplicates found */ bool isort(int4 *a, int len) { @@ -196,7 +196,7 @@ isort(int4 *a, int len) bool r = FALSE; /* - * We use a simple insertion sort. While this is O(N^2) in the worst + * We use a simple insertion sort. While this is O(N^2) in the worst * case, it's quite fast if the input is already sorted or nearly so. * Also, for not-too-large inputs it's faster than more complex methods * anyhow. diff --git a/contrib/ltree/ltree_op.c b/contrib/ltree/ltree_op.c index 2b048d4b7c5..cfd6514a298 100644 --- a/contrib/ltree/ltree_op.c +++ b/contrib/ltree/ltree_op.c @@ -612,7 +612,7 @@ ltreeparentsel(PG_FUNCTION_ARGS) /* * If the histogram is large enough, see what fraction of it the * constant is "<@" to, and assume that's representative of the - * non-MCV population. Otherwise use the default selectivity for the + * non-MCV population. Otherwise use the default selectivity for the * non-MCV population. */ selec = histogram_selectivity(&vardata, &contproc, diff --git a/contrib/oid2name/oid2name.c b/contrib/oid2name/oid2name.c index 05c06ccc684..5f79b4710e2 100644 --- a/contrib/oid2name/oid2name.c +++ b/contrib/oid2name/oid2name.c @@ -430,7 +430,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet) } /* - * Dump all databases. There are no system objects to worry about. + * Dump all databases. There are no system objects to worry about. */ void sql_exec_dumpalldbs(PGconn *conn, struct options * opts) diff --git a/contrib/pg_archivecleanup/pg_archivecleanup.c b/contrib/pg_archivecleanup/pg_archivecleanup.c index 84d80f215c5..7c308c3aa03 100644 --- a/contrib/pg_archivecleanup/pg_archivecleanup.c +++ b/contrib/pg_archivecleanup/pg_archivecleanup.c @@ -102,7 +102,7 @@ CleanupPriorWALFiles(void) { /* * We ignore the timeline part of the XLOG segment identifiers in - * deciding whether a segment is still needed. This ensures that + * deciding whether a segment is still needed. This ensures that * we won't prematurely remove a segment from a parent timeline. * We could probably be a little more proactive about removing * segments of non-parent timelines, but that would be a whole lot diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 590512fdcdb..468282ec997 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -948,7 +948,7 @@ pgss_memsize(void) * caller must hold an exclusive lock on pgss->lock * * Note: despite needing exclusive lock, it's not an error for the target - * entry to already exist. This is because pgss_store releases and + * entry to already exist. This is because pgss_store releases and * reacquires lock after failing to find a match; so someone else could * have made the entry while we waited to get exclusive lock. */ diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c index b328a09f41f..315edd130f5 100644 --- a/contrib/pg_trgm/trgm_gist.c +++ b/contrib/pg_trgm/trgm_gist.c @@ -201,7 +201,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS) /* * Store both the strategy number and extracted trigrams in cache, because - * trigram extraction is relatively CPU-expensive. We must include + * trigram extraction is relatively CPU-expensive. We must include * strategy number because trigram extraction depends on strategy. */ if (cache == NULL || strategy != *((StrategyNumber *) cache) || diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c index 8e0db7750ff..c10ae48db5a 100644 --- a/contrib/pg_upgrade/controldata.c +++ b/contrib/pg_upgrade/controldata.c @@ -25,7 +25,7 @@ * pg_control data. pg_resetxlog cannot be run while the server is running * so we use pg_controldata; pg_controldata doesn't provide all the fields * we need to actually perform the upgrade, but it provides enough for - * check mode. We do not implement pg_resetxlog -n because it is hard to + * check mode. We do not implement pg_resetxlog -n because it is hard to * return valid xid data for a running server. */ void diff --git a/contrib/pg_upgrade/dump.c b/contrib/pg_upgrade/dump.c index 167e5502c3e..4ef3e2bd3d1 100644 --- a/contrib/pg_upgrade/dump.c +++ b/contrib/pg_upgrade/dump.c @@ -33,7 +33,7 @@ generate_old_dump(void) * split_old_dump * * This function splits pg_dumpall output into global values and - * database creation, and per-db schemas. This allows us to create + * database creation, and per-db schemas. This allows us to create * the support functions between restoring these two parts of the * dump. We split on the first "\connect " after a CREATE ROLE * username match; this is where the per-db restore starts. diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c index 02804629f38..9b5a84d7e43 100644 --- a/contrib/pg_upgrade/exec.c +++ b/contrib/pg_upgrade/exec.c @@ -147,7 +147,7 @@ win32_check_directory_write_permissions(void) * * This function validates the given cluster directory - we search for a * small set of subdirectories that we expect to find in a valid $PGDATA - * directory. If any of the subdirectories are missing (or secured against + * directory. If any of the subdirectories are missing (or secured against * us) we display an error message and exit() * */ @@ -187,7 +187,7 @@ check_data_dir(const char *pg_data) * check_bin_dir() * * This function searches for the executables that we expect to find - * in the binaries directory. If we find that a required executable + * in the binaries directory. If we find that a required executable * is missing (or secured against us), we display an error message and * exit(). */ diff --git a/contrib/pg_upgrade/page.c b/contrib/pg_upgrade/page.c index 22a587f4b19..d1bd668592a 100644 --- a/contrib/pg_upgrade/page.c +++ b/contrib/pg_upgrade/page.c @@ -28,11 +28,11 @@ static pageCnvCtx *loadConverterPlugin( * the PageLayoutVersion of the new cluster. If the versions differ, this * function loads a converter plugin and returns a pointer to a pageCnvCtx * object (in *result) that knows how to convert pages from the old format - * to the new format. If the versions are identical, this function just + * to the new format. If the versions are identical, this function just * returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion * is not required. * - * If successful this function sets *result and returns NULL. If an error + * If successful this function sets *result and returns NULL. If an error * occurs, this function returns an error message in the form of an null-terminated * string. */ @@ -122,7 +122,7 @@ getPageVersion(uint16 *version, const char *pathName) * This function loads a page-converter plugin library and grabs a * pointer to each of the (interesting) functions provided by that * plugin. The name of the plugin library is derived from the given - * newPageVersion and oldPageVersion. If a plugin is found, this + * newPageVersion and oldPageVersion. If a plugin is found, this * function returns a pointer to a pageCnvCtx object (which will contain * a collection of plugin function pointers). If the required plugin * is not found, this function returns NULL. diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c index e8b48a4fab0..b294d7b677c 100644 --- a/contrib/pg_upgrade/pg_upgrade.c +++ b/contrib/pg_upgrade/pg_upgrade.c @@ -18,7 +18,7 @@ * FYI, while pg_class.oid and pg_class.relfilenode are intially the same * in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM * FULL. The new cluster will have matching pg_class.oid and - * pg_class.relfilenode values and be based on the old oid value. This can + * pg_class.relfilenode values and be based on the old oid value. This can * cause the old and new pg_class.relfilenode values to differ. In summary, * old and new pg_class.oid and new pg_class.relfilenode will have the * same value, and old pg_class.relfilenode might differ. diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c index 9a0a3ac18d9..9e350c9741f 100644 --- a/contrib/pg_upgrade/relfilenode.c +++ b/contrib/pg_upgrade/relfilenode.c @@ -79,7 +79,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, /* * get_pg_database_relfilenode() * - * Retrieves the relfilenode for a few system-catalog tables. We need these + * Retrieves the relfilenode for a few system-catalog tables. We need these * relfilenodes later in the upgrade process. */ void diff --git a/contrib/pg_upgrade/util.c b/contrib/pg_upgrade/util.c index f6582f5e385..27dcd22a9b3 100644 --- a/contrib/pg_upgrade/util.c +++ b/contrib/pg_upgrade/util.c @@ -266,7 +266,7 @@ pg_putenv(const char *var, const char *val) /* * Do not free envstr because it becomes part of the environment on - * some operating systems. See port/unsetenv.c::unsetenv. + * some operating systems. See port/unsetenv.c::unsetenv. */ #else SetEnvironmentVariableA(var, val); diff --git a/contrib/pgcrypto/crypt-des.c b/contrib/pgcrypto/crypt-des.c index 10ba6646915..9c5e334ff56 100644 --- a/contrib/pgcrypto/crypt-des.c +++ b/contrib/pgcrypto/crypt-des.c @@ -29,7 +29,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/crypt-gensalt.c b/contrib/pgcrypto/crypt-gensalt.c index 84bf27bedb9..bf6759bf6e6 100644 --- a/contrib/pgcrypto/crypt-gensalt.c +++ b/contrib/pgcrypto/crypt-gensalt.c @@ -9,7 +9,7 @@ * entirely in crypt_blowfish.c. * * Put bcrypt generator also here as crypt-blowfish.c - * may not be compiled always. -- marko + * may not be compiled always. -- marko */ #include "postgres.h" diff --git a/contrib/pgcrypto/fortuna.c b/contrib/pgcrypto/fortuna.c index 47380a812a2..7ab888fb981 100644 --- a/contrib/pgcrypto/fortuna.c +++ b/contrib/pgcrypto/fortuna.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -53,7 +53,7 @@ /* * There is some confusion about whether and how to carry forward - * the state of the pools. Seems like original Fortuna does not + * the state of the pools. Seems like original Fortuna does not * do it, resetting hash after each request. I guess expecting * feeding to happen more often that requesting. This is absolutely * unsuitable for pgcrypto, as nothing asynchronous happens here. @@ -77,7 +77,7 @@ * How many pools. * * Original Fortuna uses 32 pools, that means 32'th pool is - * used not earlier than in 13th year. This is a waste in + * used not earlier than in 13th year. This is a waste in * pgcrypto, as we have very low-frequancy seeding. Here * is preferable to have all entropy usable in reasonable time. * @@ -296,7 +296,7 @@ reseed(FState *st) } /* - * Pick a random pool. This uses key bytes as random source. + * Pick a random pool. This uses key bytes as random source. */ static unsigned get_rand_pool(FState *st) diff --git a/contrib/pgcrypto/fortuna.h b/contrib/pgcrypto/fortuna.h index 2e49f8aab83..bf9f4768d16 100644 --- a/contrib/pgcrypto/fortuna.h +++ b/contrib/pgcrypto/fortuna.h @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c index de240763229..1bb90faa75d 100644 --- a/contrib/pgcrypto/imath.c +++ b/contrib/pgcrypto/imath.c @@ -21,7 +21,7 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE @@ -211,7 +211,7 @@ static int s_vcmp(mp_int a, int v); static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a, mp_size size_b); -/* Unsigned magnitude subtraction. Assumes dc is big enough. */ +/* Unsigned magnitude subtraction. Assumes dc is big enough. */ static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a, mp_size size_b); @@ -2275,7 +2275,7 @@ mp_error_string(mp_result res) /* }}} */ /*------------------------------------------------------------------------*/ -/* Private functions for internal use. These make assumptions. */ +/* Private functions for internal use. These make assumptions. */ /* {{{ s_alloc(num) */ diff --git a/contrib/pgcrypto/imath.h b/contrib/pgcrypto/imath.h index f2b02d0cd7f..f993dbe4726 100644 --- a/contrib/pgcrypto/imath.h +++ b/contrib/pgcrypto/imath.h @@ -20,7 +20,7 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE diff --git a/contrib/pgcrypto/internal-sha2.c b/contrib/pgcrypto/internal-sha2.c index 912effb1412..55ec7e16bd9 100644 --- a/contrib/pgcrypto/internal-sha2.c +++ b/contrib/pgcrypto/internal-sha2.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/internal.c b/contrib/pgcrypto/internal.c index 6c334ff6d6a..d2de9c3c2da 100644 --- a/contrib/pgcrypto/internal.c +++ b/contrib/pgcrypto/internal.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c index 5980cb0e002..7410970f147 100644 --- a/contrib/pgcrypto/mbuf.c +++ b/contrib/pgcrypto/mbuf.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/mbuf.h b/contrib/pgcrypto/mbuf.h index 37d2db53371..7a25fa3882a 100644 --- a/contrib/pgcrypto/mbuf.h +++ b/contrib/pgcrypto/mbuf.h @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/md5.c b/contrib/pgcrypto/md5.c index b5071fba430..85a97a0bce4 100644 --- a/contrib/pgcrypto/md5.c +++ b/contrib/pgcrypto/md5.c @@ -19,7 +19,7 @@ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/md5.h b/contrib/pgcrypto/md5.h index 03b9ab58ba6..07d08c134d4 100644 --- a/contrib/pgcrypto/md5.h +++ b/contrib/pgcrypto/md5.h @@ -20,7 +20,7 @@ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c index 0b1bf746275..0c39b5721fc 100644 --- a/contrib/pgcrypto/openssl.c +++ b/contrib/pgcrypto/openssl.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgcrypto.c b/contrib/pgcrypto/pgcrypto.c index 1da3afcd1d8..be227ab5c37 100644 --- a/contrib/pgcrypto/pgcrypto.c +++ b/contrib/pgcrypto/pgcrypto.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgcrypto.h b/contrib/pgcrypto/pgcrypto.h index 6284ba2406a..f89c0ca423a 100644 --- a/contrib/pgcrypto/pgcrypto.h +++ b/contrib/pgcrypto/pgcrypto.h @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-armor.c b/contrib/pgcrypto/pgp-armor.c index 87adf911259..700dc368b71 100644 --- a/contrib/pgcrypto/pgp-armor.c +++ b/contrib/pgcrypto/pgp-armor.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-cfb.c b/contrib/pgcrypto/pgp-cfb.c index d9939ee11e9..77d208c1136 100644 --- a/contrib/pgcrypto/pgp-cfb.c +++ b/contrib/pgcrypto/pgp-cfb.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -90,7 +90,7 @@ pgp_cfb_free(PGP_CFB *ctx) } /* - * Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC) + * Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC) */ static int mix_encrypt_normal(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst) diff --git a/contrib/pgcrypto/pgp-compress.c b/contrib/pgcrypto/pgp-compress.c index db0c5dbadff..384bd69b89e 100644 --- a/contrib/pgcrypto/pgp-compress.c +++ b/contrib/pgcrypto/pgp-compress.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c index 2063e8c319e..e03ee7f5f02 100644 --- a/contrib/pgcrypto/pgp-decrypt.c +++ b/contrib/pgcrypto/pgp-decrypt.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-encrypt.c b/contrib/pgcrypto/pgp-encrypt.c index 48eb3f42af0..2320c7574b5 100644 --- a/contrib/pgcrypto/pgp-encrypt.c +++ b/contrib/pgcrypto/pgp-encrypt.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-info.c b/contrib/pgcrypto/pgp-info.c index b75266f18c1..9bfbbe6d0c9 100644 --- a/contrib/pgcrypto/pgp-info.c +++ b/contrib/pgcrypto/pgp-info.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-mpi-internal.c b/contrib/pgcrypto/pgp-mpi-internal.c index d0e5830fe03..ea0620d4852 100644 --- a/contrib/pgcrypto/pgp-mpi-internal.c +++ b/contrib/pgcrypto/pgp-mpi-internal.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -147,7 +147,7 @@ bn_to_mpi(mpz_t *bn) * * Until I research it further, I just mimic gpg behaviour. * It has a special mapping table, for values <= 5120, - * above that it uses 'arbitrary high number'. Following + * above that it uses 'arbitrary high number'. Following * algorihm hovers 10-70 bits above gpg values. And for * larger p, it uses gpg's algorihm. * diff --git a/contrib/pgcrypto/pgp-mpi-openssl.c b/contrib/pgcrypto/pgp-mpi-openssl.c index ed41e1151c3..87936d0d5f6 100644 --- a/contrib/pgcrypto/pgp-mpi-openssl.c +++ b/contrib/pgcrypto/pgp-mpi-openssl.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -82,7 +82,7 @@ bn_to_mpi(BIGNUM *bn) * * Until I research it further, I just mimic gpg behaviour. * It has a special mapping table, for values <= 5120, - * above that it uses 'arbitrary high number'. Following + * above that it uses 'arbitrary high number'. Following * algorihm hovers 10-70 bits above gpg values. And for * larger p, it uses gpg's algorihm. * diff --git a/contrib/pgcrypto/pgp-mpi.c b/contrib/pgcrypto/pgp-mpi.c index 0daca3cda48..6306a908f73 100644 --- a/contrib/pgcrypto/pgp-mpi.c +++ b/contrib/pgcrypto/pgp-mpi.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c index 046228cdde9..f3185eb3035 100644 --- a/contrib/pgcrypto/pgp-pgsql.c +++ b/contrib/pgcrypto/pgp-pgsql.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -93,7 +93,7 @@ add_block_entropy(PX_MD *md, text *data) } /* - * Mix user data into RNG. It is for user own interests to have + * Mix user data into RNG. It is for user own interests to have * RNG state shuffled. */ static void @@ -310,7 +310,7 @@ set_arg(PGP_Context *ctx, char *key, char *val, } /* - * Find next word. Handle ',' and '=' as words. Skip whitespace. + * Find next word. Handle ',' and '=' as words. Skip whitespace. * Put word info into res_p, res_len. * Returns ptr to next word. */ diff --git a/contrib/pgcrypto/pgp-pubdec.c b/contrib/pgcrypto/pgp-pubdec.c index fe5fae0c42c..7ed86b8f348 100644 --- a/contrib/pgcrypto/pgp-pubdec.c +++ b/contrib/pgcrypto/pgp-pubdec.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-pubenc.c b/contrib/pgcrypto/pgp-pubenc.c index 0d1be7d2b78..9e3bd2403af 100644 --- a/contrib/pgcrypto/pgp-pubenc.c +++ b/contrib/pgcrypto/pgp-pubenc.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-pubkey.c b/contrib/pgcrypto/pgp-pubkey.c index abab0581235..f898d72ae99 100644 --- a/contrib/pgcrypto/pgp-pubkey.c +++ b/contrib/pgcrypto/pgp-pubkey.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp-s2k.c b/contrib/pgcrypto/pgp-s2k.c index 40c05a1a96e..5f47e79f1d3 100644 --- a/contrib/pgcrypto/pgp-s2k.c +++ b/contrib/pgcrypto/pgp-s2k.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp.c b/contrib/pgcrypto/pgp.c index a17ca93e0df..e2e6c241697 100644 --- a/contrib/pgcrypto/pgp.c +++ b/contrib/pgcrypto/pgp.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h index 7ae01ccc4db..84dfc1929e6 100644 --- a/contrib/pgcrypto/pgp.h +++ b/contrib/pgcrypto/pgp.h @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/px-crypt.c b/contrib/pgcrypto/px-crypt.c index 79c97cce938..f66eb43c08c 100644 --- a/contrib/pgcrypto/px-crypt.c +++ b/contrib/pgcrypto/px-crypt.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/px-crypt.h b/contrib/pgcrypto/px-crypt.h index 7dde9ab77b5..24daee743c9 100644 --- a/contrib/pgcrypto/px-crypt.h +++ b/contrib/pgcrypto/px-crypt.h @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/px-hmac.c b/contrib/pgcrypto/px-hmac.c index 8db79233d95..06e5148f1b4 100644 --- a/contrib/pgcrypto/px-hmac.c +++ b/contrib/pgcrypto/px-hmac.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c index f1a7bbdb6e9..24aa8cc2cca 100644 --- a/contrib/pgcrypto/px.c +++ b/contrib/pgcrypto/px.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h index fe716929e58..488e62ac232 100644 --- a/contrib/pgcrypto/px.h +++ b/contrib/pgcrypto/px.h @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/random.c b/contrib/pgcrypto/random.c index 393a0be983a..3f092ca3461 100644 --- a/contrib/pgcrypto/random.c +++ b/contrib/pgcrypto/random.c @@ -17,7 +17,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/rijndael.c b/contrib/pgcrypto/rijndael.c index 5651d03750e..4adbcc1f916 100644 --- a/contrib/pgcrypto/rijndael.c +++ b/contrib/pgcrypto/rijndael.c @@ -7,12 +7,12 @@ /* RIJNDAEL by Joan Daemen and Vincent Rijmen */ /* */ /* which is a candidate algorithm in the Advanced Encryption Standard */ -/* programme of the US National Institute of Standards and Technology. */ +/* programme of the US National Institute of Standards and Technology. */ /* */ /* Copyright in this implementation is held by Dr B R Gladman but I */ /* hereby give permission for its free direct or derivative use subject */ /* to acknowledgment of its origin and compliance with any conditions */ -/* that the originators of the algorithm place on its exploitation. */ +/* that the originators of the algorithm place on its exploitation. */ /* */ /* Dr Brian Gladman ([email protected]) 14th January 1999 */ @@ -188,7 +188,7 @@ gen_tabs(void) /* rijndael specification is in big endian format with */ /* bit 0 as the most significant bit. In the remainder */ /* of the specification the bits are numbered from the */ - /* least significant end of a byte. */ + /* least significant end of a byte. */ for (i = 0; i < 256; ++i) { diff --git a/contrib/pgcrypto/rijndael.h b/contrib/pgcrypto/rijndael.h index fb30e46c144..e536c61a6fb 100644 --- a/contrib/pgcrypto/rijndael.h +++ b/contrib/pgcrypto/rijndael.h @@ -8,12 +8,12 @@ /* RIJNDAEL by Joan Daemen and Vincent Rijmen */ /* */ /* which is a candidate algorithm in the Advanced Encryption Standard */ -/* programme of the US National Institute of Standards and Technology. */ +/* programme of the US National Institute of Standards and Technology. */ /* */ /* Copyright in this implementation is held by Dr B R Gladman but I */ /* hereby give permission for its free direct or derivative use subject */ /* to acknowledgment of its origin and compliance with any conditions */ -/* that the originators of the algorithm place on its exploitation. */ +/* that the originators of the algorithm place on its exploitation. */ /* */ /* Dr Brian Gladman ([email protected]) 14th January 1999 */ diff --git a/contrib/pgcrypto/sha1.c b/contrib/pgcrypto/sha1.c index 4ee4f24559b..314d245a7db 100644 --- a/contrib/pgcrypto/sha1.c +++ b/contrib/pgcrypto/sha1.c @@ -19,7 +19,7 @@ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/sha1.h b/contrib/pgcrypto/sha1.h index 3e0931efbc8..5532ca160d3 100644 --- a/contrib/pgcrypto/sha1.h +++ b/contrib/pgcrypto/sha1.h @@ -20,7 +20,7 @@ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/sha2.c b/contrib/pgcrypto/sha2.c index bde1f61b311..231f9dfbb0e 100644 --- a/contrib/pgcrypto/sha2.c +++ b/contrib/pgcrypto/sha2.c @@ -22,7 +22,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgcrypto/sha2.h b/contrib/pgcrypto/sha2.h index df77a7a6596..501f0e04463 100644 --- a/contrib/pgcrypto/sha2.h +++ b/contrib/pgcrypto/sha2.h @@ -23,7 +23,7 @@ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index e5ddd870910..47af30add5e 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -299,7 +299,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) /* * To avoid physically reading the table twice, try to do the - * free-space scan in parallel with the heap scan. However, + * free-space scan in parallel with the heap scan. However, * heap_getnext may find no tuples on a given page, so we cannot * simply examine the pages returned by the heap scan. */ diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c index 3d05cc505ce..34ecb836164 100644 --- a/contrib/spi/timetravel.c +++ b/contrib/spi/timetravel.c @@ -47,17 +47,17 @@ static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans); /* * timetravel () -- - * 1. IF an update affects tuple with stop_date eq INFINITY + * 1. IF an update affects tuple with stop_date eq INFINITY * then form (and return) new tuple with start_date eq current date * and stop_date eq INFINITY [ and update_user eq current user ] * and all other column values as in new tuple, and insert tuple * with old data and stop_date eq current date * ELSE - skip updation of tuple. - * 2. IF an delete affects tuple with stop_date eq INFINITY + * 2. IF an delete affects tuple with stop_date eq INFINITY * then insert the same tuple with stop_date eq current date * [ and delete_user eq current user ] * ELSE - skip deletion of tuple. - * 3. On INSERT, if start_date is NULL then current date will be + * 3. On INSERT, if start_date is NULL then current date will be * inserted, if stop_date is NULL then INFINITY will be inserted. * [ and insert_user eq current user, update_user and delete_user * eq NULL ] diff --git a/contrib/sslinfo/sslinfo.c b/contrib/sslinfo/sslinfo.c index b5fd7ec987d..1cb24b571e7 100644 --- a/contrib/sslinfo/sslinfo.c +++ b/contrib/sslinfo/sslinfo.c @@ -132,7 +132,7 @@ ssl_client_serial(PG_FUNCTION_ARGS) * current database encoding if possible. Any invalid characters are * replaced by question marks. * - * Parameter: str - OpenSSL ASN1_STRING structure. Memory managment + * Parameter: str - OpenSSL ASN1_STRING structure. Memory managment * of this structure is responsibility of caller. * * Returns Datum, which can be directly returned from a C language SQL diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 75d599aab9a..ca060fabbd6 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -21,7 +21,7 @@ * tuptoaster.c. * * This change will break any code that assumes it needn't detoast values - * that have been put into a tuple but never sent to disk. Hopefully there + * that have been put into a tuple but never sent to disk. Hopefully there * are few such places. * * Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since @@ -388,7 +388,7 @@ nocachegetattr(HeapTuple tuple, /* * Otherwise, check for non-fixed-length attrs up to and including - * target. If there aren't any, it's safe to cheaply initialize the + * target. If there aren't any, it's safe to cheaply initialize the * cached offsets for these attrs. */ if (HeapTupleHasVarWidth(tuple)) @@ -455,7 +455,7 @@ nocachegetattr(HeapTuple tuple, * * Note - This loop is a little tricky. For each non-null attribute, * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no + * then advance over the attr based on its length. Nulls have no * storage and no alignment padding either. We can use/set * attcacheoff until we reach either a null or a var-width attribute. */ @@ -550,7 +550,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) /* * cmin and cmax are now both aliases for the same field, which - * can in fact also be a combo command id. XXX perhaps we should + * can in fact also be a combo command id. XXX perhaps we should * return the "real" cmin or cmax if possible, that is if we are * inside the originating transaction? */ @@ -710,7 +710,7 @@ heap_form_tuple(TupleDesc tupleDescriptor, len += data_len; /* - * Allocate and zero the space needed. Note that the tuple body and + * Allocate and zero the space needed. Note that the tuple body and * HeapTupleData management structure are allocated in one chunk. */ tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len); diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index 2b518c53464..65c77cf41ec 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor, /* * If value is stored EXTERNAL, must fetch it so we are not depending - * on outside storage. This should be improved someday. + * on outside storage. This should be improved someday. */ if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i]))) { @@ -281,7 +281,7 @@ nocache_index_getattr(IndexTuple tup, /* * Otherwise, check for non-fixed-length attrs up to and including - * target. If there aren't any, it's safe to cheaply initialize the + * target. If there aren't any, it's safe to cheaply initialize the * cached offsets for these attrs. */ if (IndexTupleHasVarwidths(tup)) @@ -348,7 +348,7 @@ nocache_index_getattr(IndexTuple tup, * * Note - This loop is a little tricky. For each non-null attribute, * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no + * then advance over the attr based on its length. Nulls have no * storage and no alignment padding either. We can use/set * attcacheoff until we reach either a null or a var-width attribute. */ diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index afce6d5edfa..b959d460303 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -166,7 +166,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) * or some similar function; it does not contain a full set of fields. * The targetlist will be NIL when executing a utility function that does * not have a plan. If the targetlist isn't NIL then it is a Query node's - * targetlist; it is up to us to ignore resjunk columns in it. The formats[] + * targetlist; it is up to us to ignore resjunk columns in it. The formats[] * array pointer might be NULL (if we are doing Describe on a prepared stmt); * send zeroes for the format codes in that case. */ diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 465742556f5..e4d0380e1cd 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -466,7 +466,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val, * Add a new string reloption * * "validator" is an optional function pointer that can be used to test the - * validity of the values. It must elog(ERROR) when the argument string is + * validity of the values. It must elog(ERROR) when the argument string is * not acceptable for the variable. Note that the default value must pass * the validation. */ @@ -533,7 +533,7 @@ add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val, * Note that this is not responsible for determining whether the options * are valid, but it does check that namespaces for all the options given are * listed in validnsps. The NULL namespace is always valid and needs not be - * explicitely listed. Passing a NULL pointer means that only the NULL + * explicitely listed. Passing a NULL pointer means that only the NULL * namespace is valid. * * Both oldOptions and the result are text arrays (or NULL for "default"), @@ -809,7 +809,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions) * is returned. * * Note: values of type int, bool and real are allocated as part of the - * returned array. Values of type string are allocated separately and must + * returned array. Values of type string are allocated separately and must * be freed by the caller. */ relopt_value * diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c index 34e5f114404..94a7f87b377 100644 --- a/src/backend/access/common/tupconvert.c +++ b/src/backend/access/common/tupconvert.c @@ -5,7 +5,7 @@ * * These functions provide conversion between rowtypes that are logically * equivalent but might have columns in a different order or different sets - * of dropped columns. There is some overlap of functionality with the + * of dropped columns. There is some overlap of functionality with the * executor's "junkfilter" routines, but these functions work on bare * HeapTuples rather than TupleTableSlots. * diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index 014fc3babfd..5ed8e7c00ad 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -533,7 +533,7 @@ TupleDescInitEntryCollation(TupleDesc desc, * Given a relation schema (list of ColumnDef nodes), build a TupleDesc. * * Note: the default assumption is no OIDs; caller may modify the returned - * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in + * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in * later on. */ TupleDesc diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c index 2de58604eee..ef3ec45fd71 100644 --- a/src/backend/access/gin/ginarrayproc.c +++ b/src/backend/access/gin/ginarrayproc.c @@ -197,7 +197,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS) /* * Must have all elements in check[] true; no discrimination - * against nulls here. This is because array_contain_compare and + * against nulls here. This is because array_contain_compare and * array_eq handle nulls differently ... */ res = true; diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c index 9e5bab194de..7dab03f8fc2 100644 --- a/src/backend/access/gin/ginbulk.c +++ b/src/backend/access/gin/ginbulk.c @@ -187,7 +187,7 @@ ginInsertBAEntry(BuildAccumulator *accum, * Since the entries are being inserted into a balanced binary tree, you * might think that the order of insertion wouldn't be critical, but it turns * out that inserting the entries in sorted order results in a lot of - * rebalancing operations and is slow. To prevent this, we attempt to insert + * rebalancing operations and is slow. To prevent this, we attempt to insert * the nodes in an order that will produce a nearly-balanced tree if the input * is in fact sorted. * diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index 97cd755e3ef..ba7408f4b9a 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -164,7 +164,7 @@ GinShortenTuple(IndexTuple itup, uint32 nipd) * Form a non-leaf entry tuple by copying the key data from the given tuple, * which can be either a leaf or non-leaf entry tuple. * - * Any posting list in the source tuple is not copied. The specified child + * Any posting list in the source tuple is not copied. The specified child * block number is inserted into t_tid. */ static IndexTuple diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index cc01deac3ec..8742bc50823 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -444,7 +444,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) * Create temporary index tuples for a single indexable item (one index column * for the heap tuple specified by ht_ctid), and append them to the array * in *collector. They will subsequently be written out using - * ginHeapTupleFastInsert. Note that to guarantee consistent state, all + * ginHeapTupleFastInsert. Note that to guarantee consistent state, all * temp tuples for a given heap tuple must be written in one call to * ginHeapTupleFastInsert. */ @@ -713,7 +713,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, * * This can be called concurrently by multiple backends, so it must cope. * On first glance it looks completely not concurrent-safe and not crash-safe - * either. The reason it's okay is that multiple insertion of the same entry + * either. The reason it's okay is that multiple insertion of the same entry * is detected and treated as a no-op by gininsert.c. If we crash after * posting entries to the main index and before removing them from the * pending list, it's okay because when we redo the posting later on, nothing @@ -767,7 +767,7 @@ ginInsertCleanup(GinState *ginstate, LockBuffer(metabuffer, GIN_UNLOCK); /* - * Initialize. All temporary space will be in opCtx + * Initialize. All temporary space will be in opCtx */ opCtx = AllocSetContextCreate(CurrentMemoryContext, "GIN insert cleanup temporary context", @@ -861,7 +861,7 @@ ginInsertCleanup(GinState *ginstate, /* * While we left the page unlocked, more stuff might have gotten - * added to it. If so, process those entries immediately. There + * added to it. If so, process those entries immediately. There * shouldn't be very many, so we don't worry about the fact that * we're doing this with exclusive lock. Insertion algorithm * gurantees that inserted row(s) will not continue on next page. diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index 4fe87eb46af..5d414c8a575 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -166,7 +166,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry, /* * Collects TIDs into scanEntry->matchBitmap for all heap tuples that - * match the search entry. This supports three different match modes: + * match the search entry. This supports three different match modes: * * 1. Partial-match support: scan from current point until the * comparePartialFn says we're done. @@ -262,7 +262,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, /* * In ALL mode, we are not interested in null items, so we can * stop if we get to a null-item placeholder (which will be the - * last entry for a given attnum). We do want to include NULL_KEY + * last entry for a given attnum). We do want to include NULL_KEY * and EMPTY_ITEM entries, though. */ if (icategory == GIN_CAT_NULL_ITEM) @@ -960,14 +960,14 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast, * that exact TID, or a lossy reference to the same page. * * This logic works only if a keyGetItem stream can never contain both - * exact and lossy pointers for the same page. Else we could have a + * exact and lossy pointers for the same page. Else we could have a * case like * * stream 1 stream 2 - * ... ... + * ... ... * 42/6 42/7 * 50/1 42/0xffff - * ... ... + * ... ... * * We would conclude that 42/6 is not a match and advance stream 1, * thus never detecting the match to the lossy pointer in stream 2. @@ -996,7 +996,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast, break; /* - * No hit. Update myAdvancePast to this TID, so that on the next pass + * No hit. Update myAdvancePast to this TID, so that on the next pass * we'll move to the next possible entry. */ myAdvancePast = *item; @@ -1512,10 +1512,10 @@ gingetbitmap(PG_FUNCTION_ARGS) /* * First, scan the pending list and collect any matching entries into the - * bitmap. After we scan a pending item, some other backend could post it + * bitmap. After we scan a pending item, some other backend could post it * into the main index, and so we might visit it a second time during the * main scan. This is okay because we'll just re-set the same bit in the - * bitmap. (The possibility of duplicate visits is a major reason why GIN + * bitmap. (The possibility of duplicate visits is a major reason why GIN * can't support the amgettuple API, however.) Note that it would not do * to scan the main index before the pending list, since concurrent * cleanup could then make us miss entries entirely. diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 3e32af94a96..2976414f9fc 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -97,7 +97,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems) * Adds array of item pointers to tuple's posting list, or * creates posting tree and tuple pointing to tree in case * of not enough space. Max size of tuple is defined in - * GinFormTuple(). Returns a new, modified index tuple. + * GinFormTuple(). Returns a new, modified index tuple. * items[] must be in sorted order with no duplicates. */ static IndexTuple diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c index f8d54b1b462..b2944cfbb31 100644 --- a/src/backend/access/gin/ginscan.c +++ b/src/backend/access/gin/ginscan.c @@ -388,7 +388,7 @@ ginNewScanKey(IndexScanDesc scan) /* * If the index is version 0, it may be missing null and placeholder * entries, which would render searches for nulls and full-index scans - * unreliable. Throw an error if so. + * unreliable. Throw an error if so. */ if (hasNullQuery && !so->isVoidRes) { diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index ba142bc874d..493deebb91a 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -438,7 +438,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, * If there's more than one key, sort and unique-ify. * * XXX Using qsort here is notationally painful, and the overhead is - * pretty bad too. For small numbers of keys it'd likely be better to use + * pretty bad too. For small numbers of keys it'd likely be better to use * a simple insertion sort. */ if (*nentries > 1) diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index 6b781c5d356..0d58d4830e6 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -692,7 +692,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record) /* * In normal operation, shiftList() takes exclusive lock on all the - * pages-to-be-deleted simultaneously. During replay, however, it should + * pages-to-be-deleted simultaneously. During replay, however, it should * be all right to lock them one at a time. This is dependent on the fact * that we are deleting pages from the head of the list, and that readers * share-lock the next page before releasing the one they are on. So we diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 7172f210286..2b34ce3d09a 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -216,7 +216,7 @@ gistbuildCallback(Relation index, /* * Since we already have the index relation locked, we call gistdoinsert * directly. Normal access method calls dispatch through gistinsert, - * which locks the relation for write. This is the right thing to do if + * which locks the relation for write. This is the right thing to do if * you're inserting single tups, but not when you're initializing the * whole index at once. * @@ -1495,7 +1495,7 @@ initGISTstate(GISTSTATE *giststate, Relation index) /* * If the index column has a specified collation, we should honor that * while doing comparisons. However, we may have a collatable storage - * type for a noncollatable indexed data type. If there's no index + * type for a noncollatable indexed data type. If there's no index * collation then specify default collation in case the support * functions need collation. This is harmless if the support * functions don't care about collation, so we just do it diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 1aba6868447..6cf6a1f9d56 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -32,7 +32,7 @@ * * On success return for a heap tuple, *recheck_p is set to indicate * whether recheck is needed. We recheck if any of the consistent() functions - * request it. recheck is not interesting when examining a non-leaf entry, + * request it. recheck is not interesting when examining a non-leaf entry, * since we must visit the lower index page if there's any doubt. * * If we are doing an ordered scan, so->distances[] is filled with distance @@ -63,7 +63,7 @@ gistindex_keytest(IndexScanDesc scan, /* * If it's a leftover invalid tuple from pre-9.1, treat it as a match with - * minimum possible distances. This means we'll always follow it to the + * minimum possible distances. This means we'll always follow it to the * referenced page. */ if (GistTupleIsInvalid(tuple)) @@ -225,7 +225,7 @@ gistindex_keytest(IndexScanDesc scan, * ntids: if not NULL, gistgetbitmap's output tuple counter * * If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap - * tuples should be reported directly into the bitmap. If they are NULL, + * tuples should be reported directly into the bitmap. If they are NULL, * we're doing a plain or ordered indexscan. For a plain indexscan, heap * tuple TIDs are returned into so->pageData[]. For an ordered indexscan, * heap tuple TIDs are pushed into individual search queue items. diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 92405283d02..870450b1445 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -58,7 +58,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg) /* * If new item is heap tuple, it goes to front of chain; otherwise insert * it before the first index-page item, so that index pages are visited in - * LIFO order, ensuring depth-first search of index pages. See comments + * LIFO order, ensuring depth-first search of index pages. See comments * in gist_private.h. */ if (GISTSearchItemIsHeap(*newitem)) diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c index 67741b6ca7a..ed061afdd47 100644 --- a/src/backend/access/gist/gistsplit.c +++ b/src/backend/access/gist/gistsplit.c @@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec, * Recompute unions of left- and right-side subkeys after a page split, * ignoring any tuples that are marked in spl->spl_dontcare[]. * - * Note: we always recompute union keys for all index columns. In some cases + * Note: we always recompute union keys for all index columns. In some cases * this might represent duplicate work for the leftmost column(s), but it's * not safe to assume that "zero penalty to move a tuple" means "the union * key doesn't change at all". Penalty functions aren't 100% accurate. @@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec, /* * Remove tuples that are marked don't-cares from the tuple index array a[] - * of length *len. This is applied separately to the spl_left and spl_right + * of length *len. This is applied separately to the spl_left and spl_right * arrays. */ static void @@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare) /* * Place a single don't-care tuple into either the left or right side of the * split, according to which has least penalty for merging the tuple into - * the previously-computed union keys. We need consider only columns starting + * the previously-computed union keys. We need consider only columns starting * at attno. */ static void @@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, /* * There is only one previously defined union, so we just choose swap - * or not by lowest penalty for that side. We can only get here if a + * or not by lowest penalty for that side. We can only get here if a * secondary split happened to have all NULLs in its column in the * tuples that the outer recursion level had assigned to one side. * (Note that the null checks in gistSplitByKey don't prevent the @@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec sv->spl_rdatum = v->spl_rattr[attno]; /* - * Let the opclass-specific PickSplit method do its thing. Note that at + * Let the opclass-specific PickSplit method do its thing. Note that at * this point we know there are no null keys in the entryvec. */ FunctionCall2Coll(&giststate->picksplitFn[attno], diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index 806f229800f..92c8c6c583a 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -432,7 +432,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ { /* * New best penalty for column. Tentatively select this tuple - * as the target, and record the best penalty. Then reset the + * as the target, and record the best penalty. Then reset the * next column's penalty to "unknown" (and indirectly, the * same for all the ones to its right). This will force us to * adopt this tuple's penalty values as the best for all the @@ -448,7 +448,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ { /* * The current tuple is exactly as good for this column as the - * best tuple seen so far. The next iteration of this loop + * best tuple seen so far. The next iteration of this loop * will compare the next column. */ } @@ -623,7 +623,7 @@ gistcheckpage(Relation rel, Buffer buf) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index 50c6270b005..deaadc594bb 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -53,7 +53,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS) stats->estimated_count = info->estimated_count; /* - * XXX the above is wrong if index is partial. Would it be OK to just + * XXX the above is wrong if index is partial. Would it be OK to just * return NULL, or is there work we must do below? */ } diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 4cb29b2bb45..720ecc548a2 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -76,7 +76,7 @@ hashbuild(PG_FUNCTION_ARGS) * (assuming their hash codes are pretty random) there will be no locality * of access to the index, and if the index is bigger than available RAM * then we'll thrash horribly. To prevent that scenario, we can sort the - * tuples by (expected) bucket number. However, such a sort is useless + * tuples by (expected) bucket number. However, such a sort is useless * overhead when the index does fit in RAM. We choose to sort if the * initial index size exceeds NBuffers. * @@ -246,7 +246,7 @@ hashgettuple(PG_FUNCTION_ARGS) /* * An insertion into the current index page could have happened while * we didn't have read lock on it. Re-find our position by looking - * for the TID we previously returned. (Because we hold share lock on + * for the TID we previously returned. (Because we hold share lock on * the bucket, no deletions or splits could have occurred; therefore * we can expect that the TID still exists in the current index page, * at an offset >= where we were.) @@ -524,7 +524,7 @@ hashbulkdelete(PG_FUNCTION_ARGS) /* * Read the metapage to fetch original bucket and tuple counts. Also, we * keep a copy of the last-seen metapage so that we can use its - * hashm_spares[] values to compute bucket page addresses. This is a bit + * hashm_spares[] values to compute bucket page addresses. This is a bit * hokey but perfectly safe, since the interesting entries in the spares * array cannot change under us; and it beats rereading the metapage for * each bucket. @@ -655,7 +655,7 @@ loop_top: { /* * Otherwise, our count is untrustworthy since we may have - * double-scanned tuples in split buckets. Proceed by dead-reckoning. + * double-scanned tuples in split buckets. Proceed by dead-reckoning. * (Note: we still return estimated_count = false, because using this * count is better than not updating reltuples at all.) */ diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index 897bf9c1ac9..1b876a77f31 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -11,7 +11,7 @@ * src/backend/access/hash/hashfunc.c * * NOTES - * These functions are stored in pg_amproc. For each operator class + * These functions are stored in pg_amproc. For each operator class * defined for hash indexes, they compute the hash value of the argument. * * Additional hash functions appear in /utils/adt/ files for various @@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS) /* * Note: this is currently identical in behavior to hashvarlena, but keep * it as a separate function in case we someday want to do something - * different in non-C locales. (See also hashbpchar, if so.) + * different in non-C locales. (See also hashbpchar, if so.) */ result = hash_any((unsigned char *) VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key)); @@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS) * * This allows some parallelism. Read-after-writes are good at doubling * the number of bits affected, so the goal of mixing pulls in the opposite - * direction from the goal of parallelism. I did what I could. Rotates + * direction from the goal of parallelism. I did what I could. Rotates * seem to cost as much as shifts on every machine I could lay my hands on, * and rotates are much kinder to the top and bottom bits, so I used rotates. *---------- @@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS) * substantial performance increase since final() does not need to * do well in reverse, but is does need to affect all output bits. * mix(), on the other hand, does not need to affect all output - * bits (affecting 32 bits is enough). The original hash function had + * bits (affecting 32 bits is enough). The original hash function had * a single mixing operation that had to satisfy both sets of requirements * and was slower as a result. *---------- @@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS) * k : the key (the unaligned variable-length array of bytes) * len : the length of the key, counting by bytes * - * Returns a uint32 value. Every bit of the key affects every bit of + * Returns a uint32 value. Every bit of the key affects every bit of * the return value. Every 1-bit and 2-bit delta achieves avalanche. * About 6*len+35 instructions. The best hash table sizes are powers * of 2. There is no need to do mod a prime (mod is sooo slow!). diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index ae8b2b1cfd4..f9bc621c4b4 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -81,7 +81,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * * Add an overflow page to the bucket whose last page is pointed to by 'buf'. * - * On entry, the caller must hold a pin but no lock on 'buf'. The pin is + * On entry, the caller must hold a pin but no lock on 'buf'. The pin is * dropped before exiting (we assume the caller is not interested in 'buf' * anymore). The returned overflow page will be pinned and write-locked; * it is guaranteed to be empty. @@ -90,12 +90,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * That buffer is returned in the same state. * * The caller must hold at least share lock on the bucket, to ensure that - * no one else tries to compact the bucket meanwhile. This guarantees that + * no one else tries to compact the bucket meanwhile. This guarantees that * 'buf' won't stop being part of the bucket while it's unlocked. * * NB: since this could be executed concurrently by multiple processes, * one should not assume that the returned overflow page will be the - * immediate successor of the originally passed 'buf'. Additional overflow + * immediate successor of the originally passed 'buf'. Additional overflow * pages might have been added to the bucket chain in between. */ Buffer @@ -158,7 +158,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf) /* * _hash_getovflpage() * - * Find an available overflow page and return it. The returned buffer + * Find an available overflow page and return it. The returned buffer * is pinned and write-locked, and has had _hash_pageinit() applied, * but it is caller's responsibility to fill the special space. * @@ -254,7 +254,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) * We create the new bitmap page with all pages marked "in use". * Actually two pages in the new bitmap's range will exist * immediately: the bitmap page itself, and the following page which - * is the one we return to the caller. Both of these are correctly + * is the one we return to the caller. Both of these are correctly * marked "in use". Subsequent pages do not exist yet, but it is * convenient to pre-mark them as "in use" too. */ @@ -285,7 +285,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) metap->hashm_spares[splitnum]++; /* - * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * Adjust hashm_firstfree to avoid redundant searches. But don't risk * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) @@ -314,7 +314,7 @@ found: blkno = bitno_to_blkno(metap, bit); /* - * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * Adjust hashm_firstfree to avoid redundant searches. But don't risk * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) @@ -495,7 +495,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, /* * _hash_initbitmap() * - * Initialize a new bitmap page. The metapage has a write-lock upon + * Initialize a new bitmap page. The metapage has a write-lock upon * entering the function, and must be written by caller after return. * * 'blkno' is the block number of the new bitmap page. diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index fe991cf27bb..62e7755634b 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -52,7 +52,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, * of the locking rules). However, we can skip taking lmgr locks when the * index is local to the current backend (ie, either temp or new in the * current transaction). No one else can see it, so there's no reason to - * take locks. We still take buffer-level locks, but not lmgr locks. + * take locks. We still take buffer-level locks, but not lmgr locks. */ #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel)) @@ -139,7 +139,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags) * * This must be used only to fetch pages that are known to be before * the index's filesystem EOF, but are to be filled from scratch. - * _hash_pageinit() is applied automatically. Otherwise it has + * _hash_pageinit() is applied automatically. Otherwise it has * effects similar to _hash_getbuf() with access = HASH_WRITE. * * When this routine returns, a write lock is set on the @@ -347,7 +347,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum) /* * Determine the target fill factor (in tuples per bucket) for this index. * The idea is to make the fill factor correspond to pages about as full - * as the user-settable fillfactor parameter says. We can compute it + * as the user-settable fillfactor parameter says. We can compute it * exactly since the index datatype (i.e. uint32 hash key) is fixed-width. */ data_width = sizeof(uint32); @@ -380,7 +380,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum) /* * We initialize the metapage, the first N bucket pages, and the first * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend() - * calls to occur. This ensures that the smgr level has the right idea of + * calls to occur. This ensures that the smgr level has the right idea of * the physical index length. */ metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum); @@ -516,9 +516,9 @@ _hash_expandtable(Relation rel, Buffer metabuf) * Note: deadlock should be impossible here. Our own backend could only be * holding bucket sharelocks due to stopped indexscans; those will not * block other holders of the page-zero lock, who are only interested in - * acquiring bucket sharelocks themselves. Exclusive bucket locks are + * acquiring bucket sharelocks themselves. Exclusive bucket locks are * only taken here and in hashbulkdelete, and neither of these operations - * needs any additional locks to complete. (If, due to some flaw in this + * needs any additional locks to complete. (If, due to some flaw in this * reasoning, we manage to deadlock anyway, it's okay to error out; the * index will be left in a consistent state.) */ @@ -560,7 +560,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Determine which bucket is to be split, and attempt to lock the old - * bucket. If we can't get the lock, give up. + * bucket. If we can't get the lock, give up. * * The lock protects us against other backends, but not against our own * backend. Must check for active scans separately. @@ -618,7 +618,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) } /* - * Okay to proceed with split. Update the metapage bucket mapping info. + * Okay to proceed with split. Update the metapage bucket mapping info. * * Since we are scribbling on the metapage data right in the shared * buffer, any failure in this next little bit leaves us with a big @@ -656,7 +656,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) * Copy bucket mapping info now; this saves re-accessing the meta page * inside _hash_splitbucket's inner loop. Note that once we drop the * split lock, other splits could begin, so these values might be out of - * date before _hash_splitbucket finishes. That's okay, since all it + * date before _hash_splitbucket finishes. That's okay, since all it * needs is to tell which of these two buckets to map hashkeys into. */ maxbucket = metap->hashm_maxbucket; @@ -897,7 +897,7 @@ _hash_splitbucket(Relation rel, /* * We're at the end of the old bucket chain, so we're done partitioning - * the tuples. Before quitting, call _hash_squeezebucket to ensure the + * the tuples. Before quitting, call _hash_squeezebucket to ensure the * tuples remaining in the old bucket (including the overflow pages) are * packed as tightly as possible. The new bucket is already tight. */ diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index bf42be103f1..a3e9ac10212 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -251,7 +251,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) * _hash_step() -- step to the next valid item in a scan in the bucket. * * If no valid record exists in the requested direction, return - * false. Else, return true and set the hashso_curpos for the + * false. Else, return true and set the hashso_curpos for the * scan to the right thing. * * 'bufP' points to the current buffer, which is pinned and read-locked. diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c index dbb9c3f39b4..bd60574f32a 100644 --- a/src/backend/access/hash/hashsort.c +++ b/src/backend/access/hash/hashsort.c @@ -8,7 +8,7 @@ * thrashing. We use tuplesort.c to sort the given index tuples into order. * * Note: if the number of rows in the table has been underestimated, - * bucket splits may occur during the index build. In that case we'd + * bucket splits may occur during the index build. In that case we'd * be inserting into two or more buckets for each possible masked-off * hash code value. That's no big problem though, since we'll still have * plenty of locality of access. @@ -52,7 +52,7 @@ _h_spoolinit(Relation index, uint32 num_buckets) hspool->index = index; /* - * Determine the bitmask for hash code values. Since there are currently + * Determine the bitmask for hash code values. Since there are currently * num_buckets buckets in the index, the appropriate mask can be computed * as follows. * diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 6283f4a82b5..50a8aa47c57 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -161,7 +161,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) @@ -281,7 +281,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull) * * Returns the offset of the first index entry having hashkey >= hash_value, * or the page's max offset plus one if hash_value is greater than all - * existing hash keys in the page. This is the appropriate place to start + * existing hash keys in the page. This is the appropriate place to start * a search, or to insert a new item. */ OffsetNumber diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 479853d99a1..0105825e14a 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -108,7 +108,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) * while the scan is in progress will be invisible to my snapshot anyway. * (That is not true when using a non-MVCC snapshot. However, we couldn't * guarantee to return tuples added after scan start anyway, since they - * might go into pages we already scanned. To guarantee consistent + * might go into pages we already scanned. To guarantee consistent * results for a non-MVCC snapshot, the caller must hold some higher-level * lock that ensures the interesting tuple(s) won't change.) */ @@ -116,7 +116,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) /* * If the table is large relative to NBuffers, use a bulk-read access - * strategy and enable synchronized scanning (see syncscan.c). Although + * strategy and enable synchronized scanning (see syncscan.c). Although * the thresholds for these features could be different, we make them the * same so that there are only two behaviors to tune rather than four. * (However, some callers need to be able to disable one or both of these @@ -245,7 +245,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) /* * We must hold share lock on the buffer content while examining tuple - * visibility. Afterwards, however, the tuples we have found to be + * visibility. Afterwards, however, the tuples we have found to be * visible are guaranteed good as long as we hold the buffer pin. */ LockBuffer(buffer, BUFFER_LOCK_SHARE); @@ -1675,7 +1675,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot, * possibly uncommitted version. * * *tid is both an input and an output parameter: it is updated to - * show the latest version of the row. Note that it will not be changed + * show the latest version of the row. Note that it will not be changed * if no version of the row passes the snapshot test. */ void @@ -1794,7 +1794,7 @@ heap_get_latest_tid(Relation relation, * * This is called after we have waited for the XMAX transaction to terminate. * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will - * be set on exit. If the transaction committed, we set the XMAX_COMMITTED + * be set on exit. If the transaction committed, we set the XMAX_COMMITTED * hint bit if possible --- but beware that that may not yet be possible, * if the transaction committed asynchronously. Hence callers should look * only at XMAX_INVALID. @@ -1867,7 +1867,7 @@ FreeBulkInsertState(BulkInsertState bistate) * The return value is the OID assigned to the tuple (either here or by the * caller), or InvalidOid if no OID. The header fields of *tup are updated * to match the stored tuple; in particular tup->t_self receives the actual - * TID where the tuple was stored. But note that any toasting of fields + * TID where the tuple was stored. But note that any toasting of fields * within the tuple data is NOT reflected into *tup. */ Oid @@ -1888,7 +1888,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, /* * If the object id of this tuple has already been assigned, trust the - * caller. There are a couple of ways this can happen. At initial db + * caller. There are a couple of ways this can happen. At initial db * creation, the backend program sets oids for tuples. When we define * an index, we set the oid. Finally, in the future, we may allow * users to set their own object ids in order to support a persistent @@ -2186,10 +2186,10 @@ l1: /* * You might think the multixact is necessarily done here, but not * so: it could have surviving members, namely our own xact or - * other subxacts of this backend. It is legal for us to delete + * other subxacts of this backend. It is legal for us to delete * the tuple in either case, however (the latter case is * essentially a situation of upgrading our former shared lock to - * exclusive). We don't bother changing the on-disk hint bits + * exclusive). We don't bother changing the on-disk hint bits * since we are about to overwrite the xmax altogether. */ } @@ -2259,7 +2259,7 @@ l1: /* * If this transaction commits, the tuple will become DEAD sooner or * later. Set flag that this page is a candidate for pruning once our xid - * falls below the OldestXmin horizon. If the transaction finally aborts, + * falls below the OldestXmin horizon. If the transaction finally aborts, * the subsequent page pruning will be a no-op and the hint will be * cleared. */ @@ -2360,7 +2360,7 @@ l1: * * This routine may be used to delete a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock - * on the relation associated with the tuple). Any failure is reported + * on the relation associated with the tuple). Any failure is reported * via ereport(). */ void @@ -2456,7 +2456,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, /* * Fetch the list of attributes to be checked for HOT update. This is * wasted effort if we fail to update or have to put the new tuple on a - * different page. But we must compute the list before obtaining buffer + * different page. But we must compute the list before obtaining buffer * lock --- in the worst case, if we are doing an update on one of the * relevant system catalogs, we could deadlock if we try to fetch the list * later. In any case, the relcache caches the data so this is usually @@ -2544,10 +2544,10 @@ l2: /* * You might think the multixact is necessarily done here, but not * so: it could have surviving members, namely our own xact or - * other subxacts of this backend. It is legal for us to update + * other subxacts of this backend. It is legal for us to update * the tuple in either case, however (the latter case is * essentially a situation of upgrading our former shared lock to - * exclusive). We don't bother changing the on-disk hint bits + * exclusive). We don't bother changing the on-disk hint bits * since we are about to overwrite the xmax altogether. */ } @@ -2643,7 +2643,7 @@ l2: * If the toaster needs to be activated, OR if the new tuple will not fit * on the same page as the old, then we need to release the content lock * (but not the pin!) on the old tuple's buffer while we are off doing - * TOAST and/or table-file-extension work. We must mark the old tuple to + * TOAST and/or table-file-extension work. We must mark the old tuple to * show that it's already being updated, else other processes may try to * update it themselves. * @@ -2708,7 +2708,7 @@ l2: * there's more free now than before. * * What's more, if we need to get a new page, we will need to acquire - * buffer locks on both old and new pages. To avoid deadlock against + * buffer locks on both old and new pages. To avoid deadlock against * some other backend trying to get the same two locks in the other * order, we must be consistent about the order we get the locks in. * We use the rule "lock the lower-numbered page of the relation @@ -2766,7 +2766,7 @@ l2: /* * At this point newbuf and buffer are both pinned and locked, and newbuf - * has enough space for the new tuple. If they are the same buffer, only + * has enough space for the new tuple. If they are the same buffer, only * one pin is held. */ @@ -2774,7 +2774,7 @@ l2: { /* * Since the new tuple is going into the same page, we might be able - * to do a HOT update. Check if any of the index columns have been + * to do a HOT update. Check if any of the index columns have been * changed. If not, then HOT update is possible. */ if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup)) @@ -2792,13 +2792,13 @@ l2: /* * If this transaction commits, the old tuple will become DEAD sooner or * later. Set flag that this page is a candidate for pruning once our xid - * falls below the OldestXmin horizon. If the transaction finally aborts, + * falls below the OldestXmin horizon. If the transaction finally aborts, * the subsequent page pruning will be a no-op and the hint will be * cleared. * * XXX Should we set hint on newbuf as well? If the transaction aborts, * there would be a prunable tuple in the newbuf; but for now we choose - * not to optimize for aborts. Note that heap_xlog_update must be kept in + * not to optimize for aborts. Note that heap_xlog_update must be kept in * sync if this decision changes. */ PageSetPrunable(page, xid); @@ -2962,7 +2962,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, /* * Extract the corresponding values. XXX this is pretty inefficient if - * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a + * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a * single heap_deform_tuple call on each tuple, instead? But that doesn't * work for system columns ... */ @@ -2985,7 +2985,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, /* * We do simple binary comparison of the two datums. This may be overly * strict because there can be multiple binary representations for the - * same logical value. But we should be OK as long as there are no false + * same logical value. But we should be OK as long as there are no false * positives. Using a type-specific equality operator is messy because * there could be multiple notions of equality in different operator * classes; furthermore, we cannot safely invoke user-defined functions @@ -3041,7 +3041,7 @@ HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs, * * This routine may be used to update a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock - * on the relation associated with the tuple). Any failure is reported + * on the relation associated with the tuple). Any failure is reported * via ereport(). */ void @@ -3123,7 +3123,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) * waiter gets the tuple, potentially leading to indefinite starvation of * some waiters. The possibility of share-locking makes the problem much * worse --- a steady stream of share-lockers can easily block an exclusive - * locker forever. To provide more reliable semantics about who gets a + * locker forever. To provide more reliable semantics about who gets a * tuple-level lock first, we use the standard lock manager. The protocol * for waiting for a tuple-level lock is really * LockTuple() @@ -3131,7 +3131,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) * mark tuple as locked by me * UnlockTuple() * When there are multiple waiters, arbitration of who is to get the lock next - * is provided by LockTuple(). However, at most one tuple-level lock will + * is provided by LockTuple(). However, at most one tuple-level lock will * be held or awaited per backend at any time, so we don't risk overflow * of the lock table. Note that incoming share-lockers are required to * do LockTuple as well, if there is any conflict, to ensure that they don't @@ -3273,7 +3273,7 @@ l3: /* * You might think the multixact is necessarily done here, but not * so: it could have surviving members, namely our own xact or - * other subxacts of this backend. It is legal for us to lock the + * other subxacts of this backend. It is legal for us to lock the * tuple in either case, however. We don't bother changing the * on-disk hint bits since we are about to overwrite the xmax * altogether. @@ -3431,7 +3431,7 @@ l3: /* * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax * as running, but it finished before - * TransactionIdIsInProgress() got to run. Treat it like + * TransactionIdIsInProgress() got to run. Treat it like * there's no locker in the tuple. */ } @@ -3467,8 +3467,8 @@ l3: MarkBufferDirty(*buffer); /* - * XLOG stuff. You might think that we don't need an XLOG record because - * there is no state change worth restoring after a crash. You would be + * XLOG stuff. You might think that we don't need an XLOG record because + * there is no state change worth restoring after a crash. You would be * wrong however: we have just written either a TransactionId or a * MultiXactId that may never have been seen on disk before, and we need * to make sure that there are XLOG entries covering those ID numbers. @@ -3530,7 +3530,7 @@ l3: * heap_inplace_update - update a tuple "in place" (ie, overwrite it) * * Overwriting violates both MVCC and transactional safety, so the uses - * of this function in Postgres are extremely limited. Nonetheless we + * of this function in Postgres are extremely limited. Nonetheless we * find some places to use it. * * The tuple cannot change size, and therefore it's reasonable to assume @@ -3684,7 +3684,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, /* * When we release shared lock, it's possible for someone else to change * xmax before we get the lock back, so repeat the check after acquiring - * exclusive lock. (We don't need this pushup for xmin, because only + * exclusive lock. (We don't need this pushup for xmin, because only * VACUUM could be interested in changing an existing tuple's xmin, and * there's only one VACUUM allowed on a table at a time.) */ @@ -3829,7 +3829,7 @@ heap_restrpos(HeapScanDesc scan) else { /* - * If we reached end of scan, rs_inited will now be false. We must + * If we reached end of scan, rs_inited will now be false. We must * reset it to true to keep heapgettup from doing the wrong thing. */ scan->rs_inited = true; @@ -4013,7 +4013,7 @@ log_heap_clean(Relation reln, Buffer buffer, } /* - * Perform XLogInsert for a heap-freeze operation. Caller must already + * Perform XLogInsert for a heap-freeze operation. Caller must already * have modified the buffer and marked it dirty. */ XLogRecPtr @@ -4056,7 +4056,7 @@ log_heap_freeze(Relation reln, Buffer buffer, } /* - * Perform XLogInsert for a heap-update operation. Caller must already + * Perform XLogInsert for a heap-update operation. Caller must already * have modified the buffer(s) and marked them dirty. */ static XLogRecPtr @@ -4135,7 +4135,7 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from, * for writing the page to disk after calling this routine. * * Note: all current callers build pages in private memory and write them - * directly to smgr, rather than using bufmgr. Therefore there is no need + * directly to smgr, rather than using bufmgr. Therefore there is no need * to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within * the critical section. * @@ -4617,7 +4617,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update) /* * In normal operation, it is important to lock the two pages in * page-number order, to avoid possible deadlocks against other update - * operations going the other way. However, during WAL replay there can + * operations going the other way. However, during WAL replay there can * be no other update happening, so we don't need to worry about that. But * we *do* need to worry that we don't expose an inconsistent state to Hot * Standby queries --- so the original page can't be unlocked before we've diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 72a69e52b02..173e82a3d90 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -117,7 +117,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock, * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the * same buffer we select for insertion of the new tuple (this could only * happen if space is freed in that page after heap_update finds there's not - * enough there). In that case, the page will be pinned and locked only once. + * enough there). In that case, the page will be pinned and locked only once. * * We normally use FSM to help us find free space. However, * if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to @@ -134,7 +134,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock, * for additional constraints needed for safe usage of this behavior.) * * The caller can also provide a BulkInsertState object to optimize many - * insertions into the same relation. This keeps a pin on the current + * insertions into the same relation. This keeps a pin on the current * insertion target page (to save pin/unpin cycles) and also passes a * BULKWRITE buffer selection strategy object to the buffer manager. * Passing NULL for bistate selects the default behavior. @@ -187,7 +187,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * We first try to put the tuple on the same page we last inserted a tuple - * on, as cached in the BulkInsertState or relcache entry. If that + * on, as cached in the BulkInsertState or relcache entry. If that * doesn't work, we ask the Free Space Map to locate a suitable page. * Since the FSM's info might be out of date, we have to be prepared to * loop around and retry multiple times. (To insure this isn't an infinite @@ -219,7 +219,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * If the FSM knows nothing of the rel, try the last page before we - * give up and extend. This avoids one-tuple-per-page syndrome during + * give up and extend. This avoids one-tuple-per-page syndrome during * bootstrapping or in a recently-started system. */ if (targetBlock == InvalidBlockNumber) @@ -280,7 +280,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Not enough space, so we must give up our page locks and pin (if - * any) and prepare to look elsewhere. We don't care which order we + * any) and prepare to look elsewhere. We don't care which order we * unlock the two buffers in, so this can be slightly simpler than the * code above. */ @@ -322,7 +322,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * XXX This does an lseek - rather expensive - but at the moment it is the - * only way to accurately determine how many blocks are in a relation. Is + * only way to accurately determine how many blocks are in a relation. Is * it worth keeping an accurate file length in shared memory someplace, * rather than relying on the kernel to do it for us? */ @@ -342,7 +342,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Release the file-extension lock; it's now OK for someone else to extend - * the relation some more. Note that we cannot release this lock before + * the relation some more. Note that we cannot release this lock before * we have buffer lock on the new page, or we risk a race condition * against vacuumlazy.c --- see comments therein. */ diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 1eec8454717..b5c719b922d 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -100,7 +100,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin) * Checking free space here is questionable since we aren't holding any * lock on the buffer; in the worst case we could get a bogus answer. It's * unlikely to be *seriously* wrong, though, since reading either pd_lower - * or pd_upper is probably atomic. Avoiding taking a lock seems more + * or pd_upper is probably atomic. Avoiding taking a lock seems more * important than sometimes getting a wrong answer in what is after all * just a heuristic estimate. */ @@ -319,8 +319,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, * OldestXmin is the cutoff XID used to identify dead tuples. * * We don't actually change the page here, except perhaps for hint-bit updates - * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in - * prstate showing the changes to be made. Items to be redirected are added + * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in + * prstate showing the changes to be made. Items to be redirected are added * to the redirected[] array (two entries per redirection); items to be set to * LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED * state are added to nowunused[]. @@ -362,7 +362,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, * We need this primarily to handle aborted HOT updates, that is, * XMIN_INVALID heap-only tuples. Those might not be linked to by * any chain, since the parent tuple might be re-updated before - * any pruning occurs. So we have to be able to reap them + * any pruning occurs. So we have to be able to reap them * separately from chain-pruning. (Note that * HeapTupleHeaderIsHotUpdated will never return true for an * XMIN_INVALID tuple, so this code will work even when there were @@ -549,7 +549,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, /* * If the root entry had been a normal tuple, we are deleting it, so - * count it in the result. But changing a redirect (even to DEAD + * count it in the result. But changing a redirect (even to DEAD * state) doesn't count. */ if (ItemIdIsNormal(rootlp)) @@ -638,7 +638,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum) * buffer, and is inside a critical section. * * This is split out because it is also used by heap_xlog_clean() - * to replay the WAL record when needed after a crash. Note that the + * to replay the WAL record when needed after a crash. Note that the * arguments are identical to those of log_heap_clean(). */ void diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index e56140950af..37fe306a80b 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -10,7 +10,7 @@ * * The caller is responsible for creating the new heap, all catalog * changes, supplying the tuples to be written to the new heap, and - * rebuilding indexes. The caller must hold AccessExclusiveLock on the + * rebuilding indexes. The caller must hold AccessExclusiveLock on the * target table, because we assume no one else is writing into it. * * To use the facility: @@ -43,7 +43,7 @@ * to substitute the correct ctid instead. * * For each ctid reference from A -> B, we might encounter either A first - * or B first. (Note that a tuple in the middle of a chain is both A and B + * or B first. (Note that a tuple in the middle of a chain is both A and B * of different pairs.) * * If we encounter A first, we'll store the tuple in the unresolved_tups @@ -58,11 +58,11 @@ * and can write A immediately with the correct ctid. * * Entries in the hash tables can be removed as soon as the later tuple - * is encountered. That helps to keep the memory usage down. At the end, + * is encountered. That helps to keep the memory usage down. At the end, * both tables are usually empty; we should have encountered both A and B * of each pair. However, it's possible for A to be RECENTLY_DEAD and B * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test - * for deadness using OldestXmin is not exact. In such a case we might + * for deadness using OldestXmin is not exact. In such a case we might * encounter B first, and skip it, and find A later. Then A would be added * to unresolved_tups, and stay there until end of the rewrite. Since * this case is very unusual, we don't worry about the memory usage. @@ -78,7 +78,7 @@ * of CLUSTERing on an unchanging key column, we'll see all the versions * of a given tuple together anyway, and so the peak memory usage is only * proportional to the number of RECENTLY_DEAD versions of a single row, not - * in the whole table. Note that if we do fail halfway through a CLUSTER, + * in the whole table. Note that if we do fail halfway through a CLUSTER, * the old table is still valid, so failure is not catastrophic. * * We can't use the normal heap_insert function to insert into the new @@ -277,7 +277,7 @@ end_heap_rewrite(RewriteState state) } /* - * If the rel is WAL-logged, must fsync before commit. We use heap_sync + * If the rel is WAL-logged, must fsync before commit. We use heap_sync * to ensure that the toast table gets fsync'd too. * * It's obvious that we must do this when not WAL-logging. It's less @@ -337,7 +337,7 @@ rewrite_heap_tuple(RewriteState state, * very-old xmin or xmax, so that future VACUUM effort can be saved. * * Note we abuse heap_freeze_tuple() a bit here, since it's expecting to - * be given a pointer to a tuple in a disk buffer. It happens though that + * be given a pointer to a tuple in a disk buffer. It happens though that * we can get the right things to happen by passing InvalidBuffer for the * buffer. */ @@ -548,7 +548,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple) } /* - * Insert a tuple to the new relation. This has to track heap_insert + * Insert a tuple to the new relation. This has to track heap_insert * and its subsidiary functions! * * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c index 957d1a12f30..30efe89dfc6 100644 --- a/src/backend/access/heap/syncscan.c +++ b/src/backend/access/heap/syncscan.c @@ -4,7 +4,7 @@ * heap scan synchronization support * * When multiple backends run a sequential scan on the same table, we try - * to keep them synchronized to reduce the overall I/O needed. The goal is + * to keep them synchronized to reduce the overall I/O needed. The goal is * to read each page into shared buffer cache only once, and let all backends * that take part in the shared scan process the page before it falls out of * the cache. @@ -26,7 +26,7 @@ * don't want such queries to slow down others. * * There can realistically only be a few large sequential scans on different - * tables in progress at any time. Therefore we just keep the scan positions + * tables in progress at any time. Therefore we just keep the scan positions * in a small LRU list which we scan every time we need to look up or update a * scan position. The whole mechanism is only applied for tables exceeding * a threshold size (but that is not the concern of this module). @@ -245,7 +245,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set) * relation, or 0 if no valid location is found. * * We expect the caller has just done RelationGetNumberOfBlocks(), and - * so that number is passed in rather than computing it again. The result + * so that number is passed in rather than computing it again. The result * is guaranteed less than relnblocks (assuming that's > 0). */ BlockNumber diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index a63cf404599..0ce52d9e474 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -550,7 +550,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * We took care of UPDATE above, so any external value we find * still in the tuple must be someone else's we cannot reuse. * Fetch it back (without decompression, unless we are forcing - * PLAIN storage). If necessary, we'll push it out as a new + * PLAIN storage). If necessary, we'll push it out as a new * external value below. */ if (VARATT_IS_EXTERNAL(new_value)) @@ -693,7 +693,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, /* * Second we look for attributes of attstorage 'x' or 'e' that are still - * inline. But skip this if there's no toast table to push them to. + * inline. But skip this if there's no toast table to push them to. */ while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen && @@ -803,7 +803,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, } /* - * Finally we store attributes of type 'm' externally. At this point we + * Finally we store attributes of type 'm' externally. At this point we * increase the target tuple size, so that 'm' attributes aren't stored * externally unless really necessary. */ @@ -1415,7 +1415,7 @@ toast_save_datum(Relation rel, Datum value, heap_insert(toastrel, toasttup, mycid, options, NULL); /* - * Create the index entry. We cheat a little here by not using + * Create the index entry. We cheat a little here by not using * FormIndexDatum: this relies on the knowledge that the index columns * are the same as the initial columns of the table. * diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index f03e88afdcc..915dad78d43 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -44,7 +44,7 @@ * * At the end of a scan, the AM's endscan routine undoes the locking, * but does *not* call IndexScanEnd --- the higher-level index_endscan - * routine does that. (We can't do it in the AM because index_endscan + * routine does that. (We can't do it in the AM because index_endscan * still needs to touch the IndexScanDesc after calling the AM.) * * Because of this, the AM does not have a choice whether to call @@ -184,7 +184,7 @@ BuildIndexValueDescription(Relation indexRelation, * at rd_opcintype not the index tupdesc. * * Note: this is a bit shaky for opclasses that have pseudotype - * input types such as ANYARRAY or RECORD. Currently, the + * input types such as ANYARRAY or RECORD. Currently, the * typoutput functions associated with the pseudotypes will work * okay, but we might have to try harder in future. */ @@ -410,7 +410,7 @@ systable_endscan(SysScanDesc sysscan) * index order. Also, for largely historical reasons, the index to use * is opened and locked by the caller, not here. * - * Currently we do not support non-index-based scans here. (In principle + * Currently we do not support non-index-based scans here. (In principle * we could do a heapscan and sort, but the uses are in places that * probably don't need to still work with corrupted catalog indexes.) * For the moment, therefore, these functions are merely the thinnest of diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 31d705c0561..4b7e4346c42 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -80,7 +80,7 @@ * * Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there * to check that we don't try to scan or do retail insertions into an index - * that is currently being rebuilt or pending rebuild. This helps to catch + * that is currently being rebuilt or pending rebuild. This helps to catch * things that don't work when reindexing system catalogs. The assertion * doesn't prevent the actual rebuild because we don't use RELATION_CHECKS * when calling the index AM's ambuild routine, and there is no reason for @@ -138,7 +138,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation, * index_open - open an index relation by relation OID * * If lockmode is not "NoLock", the specified kind of lock is - * obtained on the index. (Generally, NoLock should only be + * obtained on the index. (Generally, NoLock should only be * used if the caller knows it has some appropriate lock on the * index already.) * @@ -403,7 +403,7 @@ index_markpos(IndexScanDesc scan) * returnable tuple in each HOT chain, and so restoring the prior state at the * granularity of the index AM is sufficient. Since the only current user * of mark/restore functionality is nodeMergejoin.c, this effectively means - * that merge-join plans only work for MVCC snapshots. This could be fixed + * that merge-join plans only work for MVCC snapshots. This could be fixed * if necessary, but for now it seems unimportant. * ---------------- */ @@ -428,7 +428,7 @@ index_restrpos(IndexScanDesc scan) * index_getnext - get the next heap tuple from a scan * * The result is the next heap tuple satisfying the scan keys and the - * snapshot, or NULL if no more matching tuples exist. On success, + * snapshot, or NULL if no more matching tuples exist. On success, * the buffer containing the heap tuple is pinned (the pin will be dropped * at the next index_getnext or index_endscan). * @@ -466,7 +466,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) { /* * We are resuming scan of a HOT chain after having returned an - * earlier member. Must still hold pin on current heap page. + * earlier member. Must still hold pin on current heap page. */ Assert(BufferIsValid(scan->xs_cbuf)); Assert(ItemPointerGetBlockNumber(tid) == @@ -588,7 +588,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) /* * The xmin should match the previous xmax value, else chain is - * broken. (Note: this test is not optional because it protects + * broken. (Note: this test is not optional because it protects * us against the case where the prior chain member's xmax aborted * since we looked at it.) */ @@ -798,7 +798,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info, * particular indexed attribute are those with both types equal to * the index opclass' opcintype (note that this is subtly different * from the indexed attribute's own type: it may be a binary-compatible - * type instead). Only the default functions are stored in relcache + * type instead). Only the default functions are stored in relcache * entries --- access methods can use the syscache to look up non-default * functions. * @@ -832,7 +832,7 @@ index_getprocid(Relation irel, * index_getprocinfo * * This routine allows index AMs to keep fmgr lookup info for - * support procs in the relcache. As above, only the "default" + * support procs in the relcache. As above, only the "default" * functions for any particular indexed attribute are cached. * * Note: the return value points into cached data that will be lost during diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c index 23f2b61fe90..c3f8caab274 100644 --- a/src/backend/access/nbtree/nbtcompare.c +++ b/src/backend/access/nbtree/nbtcompare.c @@ -25,7 +25,7 @@ * Although any negative int32 (except INT_MIN) is acceptable for reporting * "<", and any positive int32 is acceptable for reporting ">", routines * that work on 32-bit or wider datatypes can't just return "a - b". - * That could overflow and give the wrong answer. Also, one must not + * That could overflow and give the wrong answer. Also, one must not * return INT_MIN to report "<", since some callers will negate the result. * * NOTE: it is critical that the comparison function impose a total order diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 735291f342e..c25da51cf8c 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -89,7 +89,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel); * and btinsert. By here, itup is filled in, including the TID. * * If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this - * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or + * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or * UNIQUE_CHECK_EXISTING) it will throw error for a duplicate. * For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and * don't actually insert. @@ -128,7 +128,7 @@ top: * If the page was split between the time that we surrendered our read * lock and acquired our write lock, then this page may no longer be the * right place for the key we want to insert. In this case, we need to - * move right in the tree. See Lehman and Yao for an excruciatingly + * move right in the tree. See Lehman and Yao for an excruciatingly * precise description. */ buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE); @@ -208,7 +208,7 @@ top: * is the first tuple on the next page. * * Returns InvalidTransactionId if there is no conflict, else an xact ID - * we must wait for to see if it commits a conflicting tuple. If an actual + * we must wait for to see if it commits a conflicting tuple. If an actual * conflict is detected, no return --- just ereport(). * * However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return @@ -290,7 +290,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, /* * If we are doing a recheck, we expect to find the tuple we - * are rechecking. It's not a duplicate, but we have to keep + * are rechecking. It's not a duplicate, but we have to keep * scanning. */ if (checkUnique == UNIQUE_CHECK_EXISTING && @@ -471,7 +471,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * If the new key is equal to one or more existing keys, we can * legitimately place it anywhere in the series of equal keys --- in fact, * if the new key is equal to the page's "high key" we can place it on - * the next page. If it is equal to the high key, and there's not room + * the next page. If it is equal to the high key, and there's not room * to insert the new tuple on the current page without splitting, then * we can move right hoping to find more free space and avoid a split. * (We should not move right indefinitely, however, since that leads to @@ -483,7 +483,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * removing any LP_DEAD tuples. * * On entry, *buf and *offsetptr point to the first legal position - * where the new tuple could be inserted. The caller should hold an + * where the new tuple could be inserted. The caller should hold an * exclusive lock on *buf. *offsetptr can also be set to * InvalidOffsetNumber, in which case the function will search for the * right location within the page if needed. On exit, they point to the @@ -549,7 +549,7 @@ _bt_findinsertloc(Relation rel, * on every insert. We implement "get tired" as a random choice, * since stopping after scanning a fixed number of pages wouldn't work * well (we'd never reach the right-hand side of previously split - * pages). Currently the probability of moving right is set at 0.99, + * pages). Currently the probability of moving right is set at 0.99, * which may seem too high to change the behavior much, but it does an * excellent job of preventing O(N^2) behavior with many equal keys. *---------- @@ -650,7 +650,7 @@ _bt_findinsertloc(Relation rel, * + updates the metapage if a true root or fast root is split. * * On entry, we must have the right buffer in which to do the - * insertion, and the buffer must be pinned and write-locked. On return, + * insertion, and the buffer must be pinned and write-locked. On return, * we will have dropped both the pin and the lock on the buffer. * * The locking interactions in this code are critical. You should @@ -916,7 +916,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, * origpage is the original page to be split. leftpage is a temporary * buffer that receives the left-sibling data, which will be copied back * into origpage on success. rightpage is the new page that receives the - * right-sibling data. If we fail before reaching the critical section, + * right-sibling data. If we fail before reaching the critical section, * origpage hasn't been modified and leftpage is only workspace. In * principle we shouldn't need to worry about rightpage either, because it * hasn't been linked into the btree page structure; but to avoid leaving @@ -1132,7 +1132,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, * page. If you're confused, imagine that page A splits to A B and * then again, yielding A C B, while vacuum is in progress. Tuples * originally in A could now be in either B or C, hence vacuum must - * examine both pages. But if D, our right sibling, has a different + * examine both pages. But if D, our right sibling, has a different * cycleid then it could not contain any tuples that were in A when * the vacuum started. */ @@ -1354,7 +1354,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, * * We return the index of the first existing tuple that should go on the * righthand page, plus a boolean indicating whether the new tuple goes on - * the left or right page. The bool is necessary to disambiguate the case + * the left or right page. The bool is necessary to disambiguate the case * where firstright == newitemoff. */ static OffsetNumber @@ -1590,7 +1590,7 @@ _bt_checksplitloc(FindSplitData *state, * * On entry, buf and rbuf are the left and right split pages, which we * still hold write locks on per the L&Y algorithm. We release the - * write locks once we have write lock on the parent page. (Any sooner, + * write locks once we have write lock on the parent page. (Any sooner, * and it'd be possible for some other process to try to split or delete * one of these pages, and get confused because it cannot find the downlink.) * @@ -1613,7 +1613,7 @@ _bt_insert_parent(Relation rel, * Here we have to do something Lehman and Yao don't talk about: deal with * a root split and construction of a new root. If our stack is empty * then we have just split a node on what had been the root level when we - * descended the tree. If it was still the root then we perform a + * descended the tree. If it was still the root then we perform a * new-root construction. If it *wasn't* the root anymore, search to find * the next higher level that someone constructed meanwhile, and find the * right place to insert as for the normal case. @@ -1763,7 +1763,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access) /* * These loops will check every item on the page --- but in an * order that's attuned to the probability of where it actually - * is. Scan to the right first, then to the left. + * is. Scan to the right first, then to the left. */ for (offnum = start; offnum <= maxoff; diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index d782d55aa47..f92ef73c728 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -12,7 +12,7 @@ * src/backend/access/nbtree/nbtpage.c * * NOTES - * Postgres btree pages look like ordinary relation pages. The opaque + * Postgres btree pages look like ordinary relation pages. The opaque * data at high addresses includes pointers to left and right siblings * and flag data describing page state. The first page in a btree, page * zero, is special -- it stores meta-information describing the tree. @@ -57,7 +57,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) metaopaque->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not essential + * Set pd_lower just past the end of the metadata. This is not essential * but it makes the page look compressible to xlog.c. */ ((PageHeader) page)->pd_lower = @@ -75,7 +75,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) * * The access type parameter (BT_READ or BT_WRITE) controls whether * a new root page will be created or not. If access = BT_READ, - * and no root page exists, we just return InvalidBuffer. For + * and no root page exists, we just return InvalidBuffer. For * BT_WRITE, we try to create the root page if it doesn't exist. * NOTE that the returned root page will have only a read lock set * on it even if access = BT_WRITE! @@ -192,7 +192,7 @@ _bt_getroot(Relation rel, int access) /* * Metadata initialized by someone else. In order to guarantee no * deadlocks, we have to release the metadata page and start all - * over again. (Is that really true? But it's hardly worth trying + * over again. (Is that really true? But it's hardly worth trying * to optimize this case.) */ _bt_relbuf(rel, metabuf); @@ -257,7 +257,7 @@ _bt_getroot(Relation rel, int access) CacheInvalidateRelcache(rel); /* - * swap root write lock for read lock. There is no danger of anyone + * swap root write lock for read lock. There is no danger of anyone * else accessing the new root page while it's unlocked, since no one * else knows where it is yet. */ @@ -325,7 +325,7 @@ _bt_getroot(Relation rel, int access) * By the time we acquire lock on the root page, it might have been split and * not be the true root anymore. This is okay for the present uses of this * routine; we only really need to be able to move up at least one tree level - * from whatever non-root page we were at. If we ever do need to lock the + * from whatever non-root page we were at. If we ever do need to lock the * one true root page, we could loop here, re-reading the metapage on each * failure. (Note that it wouldn't do to hold the lock on the metapage while * moving to the root --- that'd deadlock against any concurrent root split.) @@ -424,7 +424,7 @@ _bt_checkpage(Relation rel, Buffer buf) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) @@ -491,7 +491,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX /* * _bt_getbuf() -- Get a buffer by block number for read or write. * - * blkno == P_NEW means to get an unallocated index page. The page + * blkno == P_NEW means to get an unallocated index page. The page * will be initialized before returning it. * * When this routine returns, the appropriate lock is set on the @@ -522,7 +522,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) * First see if the FSM knows of any free pages. * * We can't trust the FSM's report unreservedly; we have to check that - * the page is still free. (For example, an already-free page could + * the page is still free. (For example, an already-free page could * have been re-used between the time the last VACUUM scanned it and * the time the VACUUM made its FSM updates.) * @@ -701,7 +701,7 @@ _bt_page_recyclable(Page page) /* * Delete item(s) from a btree page. * - * This must only be used for deleting leaf items. Deleting an item on a + * This must only be used for deleting leaf items. Deleting an item on a * non-leaf page has to be done as part of an atomic action that includes * deleting the page it points to. * @@ -769,7 +769,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, /* * The target-offsets array is not in the buffer, but pretend that it - * is. When XLogInsert stores the whole buffer, the offsets array + * is. When XLogInsert stores the whole buffer, the offsets array * need not be stored too. */ if (nitems > 0) @@ -1008,7 +1008,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack) BTPageOpaque opaque; /* - * We can never delete rightmost pages nor root pages. While at it, check + * We can never delete rightmost pages nor root pages. While at it, check * that page is not already deleted and is empty. */ page = BufferGetPage(buf); @@ -1080,7 +1080,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack) /* * During WAL recovery, we can't use _bt_search (for one reason, * it might invoke user-defined comparison functions that expect - * facilities not available in recovery mode). Instead, just set + * facilities not available in recovery mode). Instead, just set * up a dummy stack pointing to the left end of the parent tree * level, from which _bt_getstackbuf will walk right to the parent * page. Painful, but we don't care too much about performance in @@ -1115,7 +1115,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack) * target page. The sibling that was current a moment ago could have * split, so we may have to move right. This search could fail if either * the sibling or the target page was deleted by someone else meanwhile; - * if so, give up. (Right now, that should never happen, since page + * if so, give up. (Right now, that should never happen, since page * deletion is only done in VACUUM and there shouldn't be multiple VACUUMs * concurrently on the same table.) */ @@ -1144,7 +1144,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack) lbuf = InvalidBuffer; /* - * Next write-lock the target page itself. It should be okay to take just + * Next write-lock the target page itself. It should be okay to take just * a write lock not a superexclusive lock, since no scans would stop on an * empty page. */ @@ -1266,7 +1266,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack) /* * Check that the parent-page index items we're about to delete/overwrite - * contain what we expect. This can fail if the index has become corrupt + * contain what we expect. This can fail if the index has become corrupt * for some reason. We want to throw any error before entering the * critical section --- otherwise it'd be a PANIC. * diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 0926c807c1c..4398c34222c 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -154,7 +154,7 @@ btbuild(PG_FUNCTION_ARGS) /* * If we are reindexing a pre-existing index, it is critical to send out a * relcache invalidation SI message to ensure all backends re-read the - * index metapage. We expect that the caller will ensure that happens + * index metapage. We expect that the caller will ensure that happens * (typically as a side effect of updating index stats, but it must happen * even if the stats don't change!) */ @@ -219,7 +219,7 @@ btbuildempty(PG_FUNCTION_ARGS) metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, P_NONE, 0); - /* Write the page. If archiving/streaming, XLOG it. */ + /* Write the page. If archiving/streaming, XLOG it. */ smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE, (char *) metapage, true); if (XLogIsNeeded()) @@ -292,11 +292,11 @@ btgettuple(PG_FUNCTION_ARGS) if (scan->kill_prior_tuple) { /* - * Yes, remember it for later. (We'll deal with all such tuples + * Yes, remember it for later. (We'll deal with all such tuples * at once right before leaving the index page.) The test for * numKilled overrun is not just paranoia: if the caller reverses * direction in the indexscan then the same item might get entered - * multiple times. It's not worth trying to optimize that, so we + * multiple times. It's not worth trying to optimize that, so we * don't detect it, but instead just forget any excess entries. */ if (so->killedItems == NULL) @@ -862,7 +862,7 @@ restart: vstate->lastBlockLocked = blkno; /* - * Check whether we need to recurse back to earlier pages. What we + * Check whether we need to recurse back to earlier pages. What we * are concerned about is a page split that happened since we started * the vacuum scan. If the split moved some tuples to a lower page * then we might have missed 'em. If so, set up for tail recursion. diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 5c00fac533b..dfb8eeb6ff8 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -50,7 +50,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir); * * NOTE that the returned buffer is read-locked regardless of the access * parameter. However, access = BT_WRITE will allow an empty root page - * to be created and returned. When access = BT_READ, an empty index + * to be created and returned. When access = BT_READ, an empty index * will result in *bufP being set to InvalidBuffer. */ BTStack @@ -227,7 +227,7 @@ _bt_moveright(Relation rel, * (or leaf keys > given scankey when nextkey is true). * * This procedure is not responsible for walking right, it just examines - * the given page. _bt_binsrch() has no lock or refcount side effects + * the given page. _bt_binsrch() has no lock or refcount side effects * on the buffer. */ OffsetNumber @@ -359,7 +359,7 @@ _bt_compare(Relation rel, /* * The scan key is set up with the attribute number associated with each * term in the key. It is important that, if the index is multi-key, the - * scan contain the first k key attributes, and that they be in order. If + * scan contain the first k key attributes, and that they be in order. If * you think about how multi-key ordering works, you'll understand why * this is. * @@ -398,7 +398,7 @@ _bt_compare(Relation rel, /* * The sk_func needs to be passed the index value as left arg and * the sk_argument as right arg (they might be of different - * types). Since it is convenient for callers to think of + * types). Since it is convenient for callers to think of * _bt_compare as comparing the scankey to the index item, we have * to flip the sign of the comparison result. (Unless it's a DESC * column, in which case we *don't* flip the sign.) @@ -427,7 +427,7 @@ _bt_compare(Relation rel, * _bt_first() -- Find the first item in a scan. * * We need to be clever about the direction of scan, the search - * conditions, and the tree ordering. We find the first item (or, + * conditions, and the tree ordering. We find the first item (or, * if backwards scan, the last item) in the tree that satisfies the * qualifications in the scan key. On success exit, the page containing * the current index tuple is pinned but not locked, and data about @@ -480,7 +480,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * We want to identify the keys that can be used as starting boundaries; * these are =, >, or >= keys for a forward scan or =, <, <= keys for * a backwards scan. We can use keys for multiple attributes so long as - * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept + * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept * a > or < boundary or find an attribute with no boundary (which can be * thought of as the same as "> -infinity"), we can't use keys for any * attributes to its right, because it would break our simplistic notion @@ -643,7 +643,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * even if the row comparison is of ">" or "<" type, because the * condition applied to all but the last row member is effectively * ">=" or "<=", and so the extra keys don't break the positioning - * scheme. But, by the same token, if we aren't able to use all + * scheme. But, by the same token, if we aren't able to use all * the row members, then the part of the row comparison that we * did use has to be treated as just a ">=" or "<=" condition, and * so we'd better adjust strat_total accordingly. @@ -762,7 +762,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) /* * Find first item >= scankey, then back up one to arrive at last - * item < scankey. (Note: this positioning strategy is only used + * item < scankey. (Note: this positioning strategy is only used * for a backward scan, so that is always the correct starting * position.) */ @@ -811,7 +811,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) case BTGreaterEqualStrategyNumber: /* - * Find first item >= scankey. (This is only used for forward + * Find first item >= scankey. (This is only used for forward * scans.) */ nextkey = false; @@ -889,7 +889,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * * The actually desired starting point is either this item or the prior * one, or in the end-of-page case it's the first item on the next page or - * the last item on this page. Adjust the starting offset if needed. (If + * the last item on this page. Adjust the starting offset if needed. (If * this results in an offset before the first item or after the last one, * _bt_readpage will report no items found, and then we'll step to the * next page as needed.) @@ -1173,7 +1173,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir) * than the walk-right case because of the possibility that the page * to our left splits while we are in flight to it, plus the * possibility that the page we were on gets deleted after we leave - * it. See nbtree/README for details. + * it. See nbtree/README for details. */ for (;;) { @@ -1268,7 +1268,7 @@ _bt_walk_left(Relation rel, Buffer buf) * anymore, not that its left sibling got split more than four times. * * Note that it is correct to test P_ISDELETED not P_IGNORE here, - * because half-dead pages are still in the sibling chain. Caller + * because half-dead pages are still in the sibling chain. Caller * must reject half-dead pages if wanted. */ tries = 0; @@ -1294,7 +1294,7 @@ _bt_walk_left(Relation rel, Buffer buf) if (P_ISDELETED(opaque)) { /* - * It was deleted. Move right to first nondeleted page (there + * It was deleted. Move right to first nondeleted page (there * must be one); that is the page that has acquired the deleted * one's keyspace, so stepping left from it will take us where we * want to be. @@ -1338,7 +1338,7 @@ _bt_walk_left(Relation rel, Buffer buf) * _bt_get_endpoint() -- Find the first or last page on a given tree level * * If the index is empty, we will return InvalidBuffer; any other failure - * condition causes ereport(). We will not return a dead page. + * condition causes ereport(). We will not return a dead page. * * The returned buffer is pinned and read-locked. */ diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 93a928c66b2..0a61e09a08b 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -7,7 +7,7 @@ * * We use tuplesort.c to sort the given index tuples into order. * Then we scan the index tuples in order and build the btree pages - * for each level. We load source tuples into leaf-level pages. + * for each level. We load source tuples into leaf-level pages. * Whenever we fill a page at one level, we add a link to it to its * parent level (starting a new parent level if necessary). When * done, we write out each final page on each level, adding it to @@ -42,11 +42,11 @@ * * Since the index will never be used unless it is completely built, * from a crash-recovery point of view there is no need to WAL-log the - * steps of the build. After completing the index build, we can just sync + * steps of the build. After completing the index build, we can just sync * the whole file to disk using smgrimmedsync() before exiting this module. * This can be seen to be sufficient for crash recovery by considering that * it's effectively equivalent to what would happen if a CHECKPOINT occurred - * just after the index build. However, it is clearly not sufficient if the + * just after the index build. However, it is clearly not sufficient if the * DBA is using the WAL log for PITR or replication purposes, since another * machine would not be able to reconstruct the index from WAL. Therefore, * we log the completed index pages to WAL if and only if WAL archiving is @@ -88,7 +88,7 @@ struct BTSpool }; /* - * Status record for a btree page being built. We have one of these + * Status record for a btree page being built. We have one of these * for each active tree level. * * The reason we need to store a copy of the minimum key is that we'll @@ -157,7 +157,7 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead) * We size the sort area as maintenance_work_mem rather than work_mem to * speed index creation. This should be OK since a single backend can't * run multiple index creations in parallel. Note that creation of a - * unique index actually requires two BTSpool objects. We expect that the + * unique index actually requires two BTSpool objects. We expect that the * second one (for dead tuples) won't get very full, so we give it only * work_mem. */ @@ -296,7 +296,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) } /* - * Now write the page. There's no need for smgr to schedule an fsync for + * Now write the page. There's no need for smgr to schedule an fsync for * this write; we'll do it ourselves before ending the build. */ if (blkno == wstate->btws_pages_written) @@ -421,14 +421,14 @@ _bt_sortaddtup(Page page, * A leaf page being built looks like: * * +----------------+---------------------------------+ - * | PageHeaderData | linp0 linp1 linp2 ... | + * | PageHeaderData | linp0 linp1 linp2 ... | * +-----------+----+---------------------------------+ * | ... linpN | | * +-----------+--------------------------------------+ * | ^ last | * | | * +-------------+------------------------------------+ - * | | itemN ... | + * | | itemN ... | * +-------------+------------------+-----------------+ * | ... item3 item2 item1 | "special space" | * +--------------------------------+-----------------+ @@ -489,9 +489,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) "or use full text indexing."))); /* - * Check to see if page is "full". It's definitely full if the item won't + * Check to see if page is "full". It's definitely full if the item won't * fit. Otherwise, compare to the target freespace derived from the - * fillfactor. However, we must put at least two items on each page, so + * fillfactor. However, we must put at least two items on each page, so * disregard fillfactor if we don't have that many. */ if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY)) @@ -564,7 +564,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) } /* - * Write out the old page. We never need to touch it again, so we can + * Write out the old page. We never need to touch it again, so we can * free the opage workspace too. */ _bt_blwritepage(wstate, opage, oblkno); @@ -801,7 +801,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) /* * If the index is WAL-logged, we must fsync it down to disk before it's - * safe to commit the transaction. (For a non-WAL-logged index we don't + * safe to commit the transaction. (For a non-WAL-logged index we don't * care since the index will be uninteresting after a crash anyway.) * * It's obvious that we must do this when not WAL-logging the build. It's diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index f87eadcdec2..fe3ca62193c 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -95,7 +95,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup) * comparison data ultimately used must match the key datatypes. * * The result cannot be used with _bt_compare(), unless comparison - * data is first stored into the key entries. Currently this + * data is first stored into the key entries. Currently this * routine is only called by nbtsort.c and tuplesort.c, which have * their own comparison routines. */ @@ -166,7 +166,7 @@ _bt_freestack(BTStack stack) * _bt_preprocess_keys() -- Preprocess scan keys * * The caller-supplied search-type keys (in scan->keyData[]) are copied to - * so->keyData[] with possible transformation. scan->numberOfKeys is + * so->keyData[] with possible transformation. scan->numberOfKeys is * the number of input keys, so->numberOfKeys gets the number of output * keys (possibly less, never greater). * @@ -177,7 +177,7 @@ _bt_freestack(BTStack stack) * so that the index sorts in the desired direction. * * One key purpose of this routine is to discover how many scan keys - * must be satisfied to continue the scan. It also attempts to eliminate + * must be satisfied to continue the scan. It also attempts to eliminate * redundant keys and detect contradictory keys. (If the index opfamily * provides incomplete sets of cross-type operators, we may fail to detect * redundant or contradictory keys, but we can survive that.) @@ -209,7 +209,7 @@ _bt_freestack(BTStack stack) * that's the only one returned. (So, we return either a single = key, * or one or two boundary-condition keys for each attr.) However, if we * cannot compare two keys for lack of a suitable cross-type operator, - * we cannot eliminate either. If there are two such keys of the same + * we cannot eliminate either. If there are two such keys of the same * operator strategy, the second one is just pushed into the output array * without further processing here. We may also emit both >/>= or both * </<= keys if we can't compare them. The logic about required keys still @@ -406,7 +406,7 @@ _bt_preprocess_keys(IndexScanDesc scan) /* * Emit the cleaned-up keys into the outkeys[] array, and then - * mark them if they are required. They are required (possibly + * mark them if they are required. They are required (possibly * only in one direction) if all attrs before this one had "=". */ for (j = BTMaxStrategyNumber; --j >= 0;) @@ -504,7 +504,7 @@ _bt_preprocess_keys(IndexScanDesc scan) * and amoplefttype/amoprighttype equal to the two argument datatypes. * * If the opfamily doesn't supply a complete set of cross-type operators we - * may not be able to make the comparison. If we can make the comparison + * may not be able to make the comparison. If we can make the comparison * we store the operator result in *result and return TRUE. We return FALSE * if the comparison could not be made. * @@ -530,7 +530,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, StrategyNumber strat; /* - * First, deal with cases where one or both args are NULL. This should + * First, deal with cases where one or both args are NULL. This should * only happen when the scankeys represent IS NULL/NOT NULL conditions. */ if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL) @@ -670,7 +670,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, * * Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a * NULL comparison value. Since all btree operators are assumed strict, - * a NULL means that the qual cannot be satisfied. We return TRUE if the + * a NULL means that the qual cannot be satisfied. We return TRUE if the * comparison value isn't NULL, or FALSE if the scan should be abandoned. * * This function is applied to the *input* scankey structure; therefore @@ -699,7 +699,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * --- we can treat IS NULL as an equality operator for purposes of search * strategy. * - * Likewise, "x IS NOT NULL" is supported. We treat that as either "less + * Likewise, "x IS NOT NULL" is supported. We treat that as either "less * than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS * FIRST index. * @@ -771,7 +771,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * Mark a scankey as "required to continue the scan". * * Depending on the operator type, the key may be required for both scan - * directions or just one. Also, if the key is a row comparison header, + * directions or just one. Also, if the key is a row comparison header, * we have to mark the appropriate subsidiary ScanKeys as required. In * such cases, the first subsidiary key is required, but subsequent ones * are required only as long as they correspond to successive index columns @@ -783,7 +783,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * scribbling on a data structure belonging to the index AM's caller, not on * our private copy. This should be OK because the marking will not change * from scan to scan within a query, and so we'd just re-mark the same way - * anyway on a rescan. Something to keep an eye on though. + * anyway on a rescan. Something to keep an eye on though. */ static void _bt_mark_scankey_required(ScanKey skey) @@ -967,7 +967,7 @@ _bt_checkkeys(IndexScanDesc scan, /* * Since NULLs are sorted before non-NULLs, we know we have * reached the lower limit of the range of values for this - * index attr. On a backward scan, we can stop if this qual + * index attr. On a backward scan, we can stop if this qual * is one of the "must match" subset. On a forward scan, * however, we should keep going. */ @@ -980,8 +980,8 @@ _bt_checkkeys(IndexScanDesc scan, /* * Since NULLs are sorted after non-NULLs, we know we have * reached the upper limit of the range of values for this - * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. On a backward scan, + * index attr. On a forward scan, we can stop if this qual is + * one of the "must match" subset. On a backward scan, * however, we should keep going. */ if ((key->sk_flags & SK_BT_REQFWD) && @@ -1072,7 +1072,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, * Since NULLs are sorted before non-NULLs, we know we have * reached the lower limit of the range of values for this * index attr. On a backward scan, we can stop if this qual is - * one of the "must match" subset. On a forward scan, + * one of the "must match" subset. On a forward scan, * however, we should keep going. */ if ((subkey->sk_flags & SK_BT_REQBKWD) && @@ -1085,7 +1085,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, * Since NULLs are sorted after non-NULLs, we know we have * reached the upper limit of the range of values for this * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. On a backward scan, + * one of the "must match" subset. On a backward scan, * however, we should keep going. */ if ((subkey->sk_flags & SK_BT_REQFWD) && @@ -1103,7 +1103,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, { /* * Unlike the simple-scankey case, this isn't a disallowed case. - * But it can never match. If all the earlier row comparison + * But it can never match. If all the earlier row comparison * columns are required for the scan direction, we can stop the * scan, because there can't be another tuple that will succeed. */ @@ -1168,7 +1168,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Tuple fails this qual. If it's a required qual for the current * scan direction, then we can conclude no further tuples will pass, - * either. Note we have to look at the deciding column, not + * either. Note we have to look at the deciding column, not * necessarily the first or last column of the row condition. */ if ((subkey->sk_flags & SK_BT_REQFWD) && @@ -1194,7 +1194,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, * is sufficient for setting LP_DEAD status (which is only a hint). * * We match items by heap TID before assuming they are the right ones to - * delete. We cope with cases where items have moved right due to insertions. + * delete. We cope with cases where items have moved right due to insertions. * If an item has moved off the current page due to a split, we'll fail to * find it and do nothing (this is not an error case --- we assume the item * will eventually get marked in a future indexscan). Note that because we @@ -1280,8 +1280,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock) /* * The following routines manage a shared-memory area in which we track * assignment of "vacuum cycle IDs" to currently-active btree vacuuming - * operations. There is a single counter which increments each time we - * start a vacuum to assign it a cycle ID. Since multiple vacuums could + * operations. There is a single counter which increments each time we + * start a vacuum to assign it a cycle ID. Since multiple vacuums could * be active concurrently, we have to track the cycle ID for each active * vacuum; this requires at most MaxBackends entries (usually far fewer). * We assume at most one vacuum can be active for a given index. diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index f0fe8a3892e..7c72d4ce3d0 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -130,7 +130,7 @@ forget_matching_deletion(RelFileNode node, BlockNumber delblk) * in correct itemno sequence, but physically the opposite order from the * original, because we insert them in the opposite of itemno order. This * does not matter in any current btree code, but it's something to keep an - * eye on. Is it worth changing just on general principles? See also the + * eye on. Is it worth changing just on general principles? See also the * notes in btree_xlog_split(). */ static void @@ -181,7 +181,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn, pageop->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not essential + * Set pd_lower just past the end of the metadata. This is not essential * but it makes the page look compressible to xlog.c. */ ((PageHeader) metapg)->pd_lower = @@ -392,7 +392,7 @@ btree_xlog_split(bool onleft, bool isroot, /* * Remove the items from the left page that were copied to the - * right page. Also remove the old high key, if any. (We must + * right page. Also remove the old high key, if any. (We must * remove everything before trying to insert any items, else * we risk not having enough space.) */ diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index b8703aac91d..562ade85835 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -11,15 +11,15 @@ * log can be broken into relatively small, independent segments. * * XLOG interactions: this module generates an XLOG record whenever a new - * CLOG page is initialized to zeroes. Other writes of CLOG come from + * CLOG page is initialized to zeroes. Other writes of CLOG come from * recording of transaction commit or abort in xact.c, which generates its * own XLOG records for these events and will re-perform the status update - * on redo; so we need make no additional XLOG entry here. For synchronous + * on redo; so we need make no additional XLOG entry here. For synchronous * transaction commits, the XLOG is guaranteed flushed through the XLOG commit * record before we are called to log a commit, so the WAL rule "write xlog * before data" is satisfied automatically. However, for async commits we * must track the latest LSN affecting each CLOG page, so that we can flush - * XLOG that far and satisfy the WAL rule. We don't have to worry about this + * XLOG that far and satisfy the WAL rule. We don't have to worry about this * for aborts (whether sync or async), since the post-crash assumption would * be that such transactions failed anyway. * @@ -104,7 +104,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids, * in the tree of xid. In various cases nsubxids may be zero. * * lsn must be the WAL location of the commit record when recording an async - * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the + * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the * caller guarantees the commit record is already flushed in that case. It * should be InvalidXLogRecPtr for abort cases, too. * diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 9e4ecdc3bba..c940e1d2b1e 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -4,15 +4,15 @@ * PostgreSQL multi-transaction-log manager * * The pg_multixact manager is a pg_clog-like manager that stores an array - * of TransactionIds for each MultiXactId. It is a fundamental part of the - * shared-row-lock implementation. A share-locked tuple stores a + * of TransactionIds for each MultiXactId. It is a fundamental part of the + * shared-row-lock implementation. A share-locked tuple stores a * MultiXactId in its Xmax, and a transaction that needs to wait for the * tuple to be unlocked can sleep on the potentially-several TransactionIds * that compose the MultiXactId. * * We use two SLRU areas, one for storing the offsets at which the data * starts for each MultiXactId in the other one. This trick allows us to - * store variable length arrays of TransactionIds. (We could alternatively + * store variable length arrays of TransactionIds. (We could alternatively * use one area containing counts and TransactionIds, with valid MultiXactId * values pointing at slots containing counts; but that way seems less robust * since it would get completely confused if someone inquired about a bogus @@ -32,7 +32,7 @@ * * Like clog.c, and unlike subtrans.c, we have to preserve state across * crashes and ensure that MXID and offset numbering increases monotonically - * across a crash. We do this in the same way as it's done for transaction + * across a crash. We do this in the same way as it's done for transaction * IDs: the WAL record is guaranteed to contain evidence of every MXID we * could need to worry about, and we just make sure that at the end of * replay, the next-MXID and next-offset counters are at least as large as @@ -64,13 +64,13 @@ /* - * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is + * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is * used everywhere else in Postgres. * * Note: because both MultiXactOffsets and TransactionIds are 32 bits and * wrap around at 0xFFFFFFFF, MultiXact page numbering also wraps around at * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE, and segment numbering at - * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no + * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no * explicit notice of that fact in this module, except when comparing segment * and page numbers in TruncateMultiXact * (see MultiXact{Offset,Member}PagePrecedes). @@ -101,7 +101,7 @@ static SlruCtlData MultiXactMemberCtlData; #define MultiXactMemberCtl (&MultiXactMemberCtlData) /* - * MultiXact state shared across all backends. All this state is protected + * MultiXact state shared across all backends. All this state is protected * by MultiXactGenLock. (We also use MultiXactOffsetControlLock and * MultiXactMemberControlLock to guard accesses to the two sets of SLRU * buffers. For concurrency's sake, we avoid holding more than one of these @@ -343,7 +343,7 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid) /* * Determine which of the members of the MultiXactId are still running, * and use them to create a new one. (Removing dead members is just an - * optimization, but a useful one. Note we have the same race condition + * optimization, but a useful one. Note we have the same race condition * here as above: j could be 0 at the end of the loop.) */ newMembers = (TransactionId *) @@ -408,7 +408,7 @@ MultiXactIdIsRunning(MultiXactId multi) /* * This could be made faster by having another entry point in procarray.c, - * walking the PGPROC array only once for all the members. But in most + * walking the PGPROC array only once for all the members. But in most * cases nmembers should be small enough that it doesn't much matter. */ for (i = 0; i < nmembers; i++) @@ -527,7 +527,7 @@ MultiXactIdSetOldestMember(void) * The value to set is the oldest of nextMXact and all the valid per-backend * OldestMemberMXactId[] entries. Because of the locking we do, we can be * certain that no subsequent call to MultiXactIdSetOldestMember can set - * an OldestMemberMXactId[] entry older than what we compute here. Therefore + * an OldestMemberMXactId[] entry older than what we compute here. Therefore * there is no live transaction, now or later, that can be a member of any * MultiXactId older than the OldestVisibleMXactId we compute here. */ @@ -698,7 +698,7 @@ CreateMultiXactId(int nxids, TransactionId *xids) * heap_lock_tuple() to have put it there, and heap_lock_tuple() generates * an XLOG record that must follow ours. The normal LSN interlock between * the data page and that XLOG record will ensure that our XLOG record - * reaches disk first. If the SLRU members/offsets data reaches disk + * reaches disk first. If the SLRU members/offsets data reaches disk * sooner than the XLOG record, we do not care because we'll overwrite it * with zeroes unless the XLOG record is there too; see notes at top of * this file. @@ -805,7 +805,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset, * GetNewMultiXactId * Get the next MultiXactId. * - * Also, reserve the needed amount of space in the "members" area. The + * Also, reserve the needed amount of space in the "members" area. The * starting offset of the reserved space is returned in *offset. * * This may generate XLOG records for expansion of the offsets and/or members @@ -874,7 +874,7 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset) * until after file extension has succeeded! * * We don't care about MultiXactId wraparound here; it will be handled by - * the next iteration. But note that nextMXact may be InvalidMultiXactId + * the next iteration. But note that nextMXact may be InvalidMultiXactId * after this routine exits, so anyone else looking at the variable must * be prepared to deal with that. Similarly, nextOffset may be zero, but * we won't use that as the actual start offset of the next multixact. @@ -942,7 +942,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) * SLRU data if we did try to examine it. * * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is - * seen, it implies undetected ID wraparound has occurred. We just + * seen, it implies undetected ID wraparound has occurred. We just * silently assume that such an ID is no longer running. * * Shared lock is enough here since we aren't modifying any global state. @@ -958,7 +958,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) /* * Acquire the shared lock just long enough to grab the current counter - * values. We may need both nextMXact and nextOffset; see below. + * values. We may need both nextMXact and nextOffset; see below. */ LWLockAcquire(MultiXactGenLock, LW_SHARED); @@ -976,12 +976,12 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) /* * Find out the offset at which we need to start reading MultiXactMembers - * and the number of members in the multixact. We determine the latter as + * and the number of members in the multixact. We determine the latter as * the difference between this multixact's starting offset and the next * one's. However, there are some corner cases to worry about: * * 1. This multixact may be the latest one created, in which case there is - * no next one to look at. In this case the nextOffset value we just + * no next one to look at. In this case the nextOffset value we just * saved is the correct endpoint. * * 2. The next multixact may still be in process of being filled in: that @@ -992,11 +992,11 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids) * (because we are careful to pre-zero offset pages). Because * GetNewMultiXactId will never return zero as the starting offset for a * multixact, when we read zero as the next multixact's offset, we know we - * have this case. We sleep for a bit and try again. + * have this case. We sleep for a bit and try again. * * 3. Because GetNewMultiXactId increments offset zero to offset one to * handle case #2, there is an ambiguity near the point of offset - * wraparound. If we see next multixact's offset is one, is that our + * wraparound. If we see next multixact's offset is one, is that our * multixact's actual endpoint, or did it end at zero with a subsequent * increment? We handle this using the knowledge that if the zero'th * member slot wasn't filled, it'll contain zero, and zero isn't a valid @@ -1388,7 +1388,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info, /* * Initialization of shared memory for MultiXact. We use two SLRU areas, - * thus double memory. Also, reserve space for the shared MultiXactState + * thus double memory. Also, reserve space for the shared MultiXactState * struct and the per-backend MultiXactId arrays (two of those, too). */ Size @@ -1448,7 +1448,7 @@ MultiXactShmemInit(void) /* * This func must be called ONCE on system install. It creates the initial - * MultiXact segments. (The MultiXacts directories are assumed to have been + * MultiXact segments. (The MultiXacts directories are assumed to have been * created by initdb, and MultiXactShmemInit must have been called already.) */ void @@ -1568,7 +1568,7 @@ TrimMultiXact(void) MultiXactOffsetCtl->shared->latest_page_number = pageno; /* - * Zero out the remainder of the current offsets page. See notes in + * Zero out the remainder of the current offsets page. See notes in * StartupCLOG() for motivation. */ entryno = MultiXactIdToOffsetEntry(multi); @@ -1598,7 +1598,7 @@ TrimMultiXact(void) MultiXactMemberCtl->shared->latest_page_number = pageno; /* - * Zero out the remainder of the current members page. See notes in + * Zero out the remainder of the current members page. See notes in * TrimCLOG() for motivation. */ entryno = MXOffsetToMemberEntry(offset); @@ -1799,7 +1799,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers) * Remove all MultiXactOffset and MultiXactMember segments before the oldest * ones still of interest. * - * This is called only during checkpoints. We assume no more than one + * This is called only during checkpoints. We assume no more than one * backend does this at a time. * * XXX do we have any issues with needing to checkpoint here? @@ -1860,7 +1860,7 @@ TruncateMultiXact(void) return; /* - * We need to determine where to truncate MultiXactMember. If we found a + * We need to determine where to truncate MultiXactMember. If we found a * valid oldest MultiXactId, read its starting offset; otherwise we use * the nextOffset value we saved above. */ diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 69e5245beba..3e92903f938 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -15,7 +15,7 @@ * * We use a control LWLock to protect the shared data structures, plus * per-buffer LWLocks that synchronize I/O for each buffer. The control lock - * must be held to examine or modify any shared state. A process that is + * must be held to examine or modify any shared state. A process that is * reading in or writing out a page buffer does not hold the control lock, * only the per-buffer lock for the buffer it is working on. * @@ -34,7 +34,7 @@ * could have happened while we didn't have the lock). * * As with the regular buffer manager, it is possible for another process - * to re-dirty a page that is currently being written out. This is handled + * to re-dirty a page that is currently being written out. This is handled * by re-setting the page's page_dirty flag. * * @@ -96,7 +96,7 @@ typedef struct SlruFlushData *SlruFlush; * page_lru_count entries to be "reset" to lower values than they should have, * in case a process is delayed while it executes this macro. With care in * SlruSelectLRUPage(), this does little harm, and in any case the absolute - * worst possible consequence is a nonoptimal choice of page to evict. The + * worst possible consequence is a nonoptimal choice of page to evict. The * gain from allowing concurrent reads of SLRU pages seems worth it. */ #define SlruRecentlyUsed(shared, slotno) \ @@ -483,7 +483,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid) * * NOTE: only one write attempt is made here. Hence, it is possible that * the page is still dirty at exit (if someone else re-dirtied it during - * the write). However, we *do* attempt a fresh write even if the page + * the write). However, we *do* attempt a fresh write even if the page * is already being written; this is for checkpoints. * * Control lock must be held at entry, and will be held at exit. @@ -592,7 +592,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) * In a crash-and-restart situation, it's possible for us to receive * commands to set the commit status of transactions whose bits are in * already-truncated segments of the commit log (see notes in - * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case + * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case * where the file doesn't exist, and return zeroes instead. */ fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); @@ -1123,7 +1123,7 @@ restart:; /* * Hmm, we have (or may have) I/O operations acting on the page, so * we've got to wait for them to finish and then start again. This is - * the same logic as in SlruSelectLRUPage. (XXX if page is dirty, + * the same logic as in SlruSelectLRUPage. (XXX if page is dirty, * wouldn't it be OK to just discard it without writing it? For now, * keep the logic the same as it was.) */ diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index deef99aeb22..dff85c6b04d 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -5,7 +5,7 @@ * * The pg_subtrans manager is a pg_clog-like manager that stores the parent * transaction Id for each transaction. It is a fundamental part of the - * nested transactions impleme |