diff options
author | Bruce Momjian | 2012-06-10 19:20:04 +0000 |
---|---|---|
committer | Bruce Momjian | 2012-06-10 19:20:04 +0000 |
commit | 927d61eeff78363ea3938c818d07e511ebaf75cf (patch) | |
tree | 2f0bcecf53327f76272a8ce690fa62505520fab9 | |
parent | 60801944fa105252b48ea5688d47dfc05c695042 (diff) |
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
494 files changed, 7504 insertions, 7207 deletions
diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c index e48ea489dc..ad333b6644 100644 --- a/contrib/auto_explain/auto_explain.c +++ b/contrib/auto_explain/auto_explain.c @@ -23,7 +23,7 @@ static int auto_explain_log_min_duration = -1; /* msec or -1 */ static bool auto_explain_log_analyze = false; static bool auto_explain_log_verbose = false; static bool auto_explain_log_buffers = false; -static bool auto_explain_log_timing = false; +static bool auto_explain_log_timing = false; static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT; static bool auto_explain_log_nested_statements = false; diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 71acb35af6..1e62d8091a 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -1140,7 +1140,7 @@ storeHandler(PGresult *res, const PGdataValue *columns, * strings and add null termination. As a micro-optimization, allocate * all the strings with one palloc. */ - pbuflen = nfields; /* count the null terminators themselves */ + pbuflen = nfields; /* count the null terminators themselves */ for (i = 0; i < nfields; i++) { int len = columns[i].len; diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index 66fd0e62cc..e3b9223b3e 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -109,17 +109,17 @@ PG_FUNCTION_INFO_V1(file_fdw_validator); * FDW callback routines */ static void fileGetForeignRelSize(PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid); + RelOptInfo *baserel, + Oid foreigntableid); static void fileGetForeignPaths(PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid); + RelOptInfo *baserel, + Oid foreigntableid); static ForeignScan *fileGetForeignPlan(PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid, - ForeignPath *best_path, - List *tlist, - List *scan_clauses); + RelOptInfo *baserel, + Oid foreigntableid, + ForeignPath *best_path, + List *tlist, + List *scan_clauses); static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es); static void fileBeginForeignScan(ForeignScanState *node, int eflags); static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node); @@ -141,7 +141,7 @@ static void estimate_size(PlannerInfo *root, RelOptInfo *baserel, static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel, FileFdwPlanState *fdw_private, Cost *startup_cost, Cost *total_cost); -static int file_acquire_sample_rows(Relation onerel, int elevel, +static int file_acquire_sample_rows(Relation onerel, int elevel, HeapTuple *rows, int targrows, double *totalrows, double *totaldeadrows); @@ -180,7 +180,7 @@ file_fdw_validator(PG_FUNCTION_ARGS) List *options_list = untransformRelOptions(PG_GETARG_DATUM(0)); Oid catalog = PG_GETARG_OID(1); char *filename = NULL; - DefElem *force_not_null = NULL; + DefElem *force_not_null = NULL; List *other_options = NIL; ListCell *cell; @@ -233,7 +233,7 @@ file_fdw_validator(PG_FUNCTION_ARGS) buf.len > 0 ? errhint("Valid options in this context are: %s", buf.data) - : errhint("There are no valid options in this context."))); + : errhint("There are no valid options in this context."))); } /* @@ -393,13 +393,13 @@ get_file_fdw_attribute_options(Oid relid) options = GetForeignColumnOptions(relid, attnum); foreach(lc, options) { - DefElem *def = (DefElem *) lfirst(lc); + DefElem *def = (DefElem *) lfirst(lc); if (strcmp(def->defname, "force_not_null") == 0) { if (defGetBoolean(def)) { - char *attname = pstrdup(NameStr(attr->attname)); + char *attname = pstrdup(NameStr(attr->attname)); fnncolumns = lappend(fnncolumns, makeString(attname)); } @@ -429,8 +429,8 @@ fileGetForeignRelSize(PlannerInfo *root, FileFdwPlanState *fdw_private; /* - * Fetch options. We only need filename at this point, but we might - * as well get everything and not need to re-fetch it later in planning. + * Fetch options. We only need filename at this point, but we might as + * well get everything and not need to re-fetch it later in planning. */ fdw_private = (FileFdwPlanState *) palloc(sizeof(FileFdwPlanState)); fileGetOptions(foreigntableid, @@ -468,13 +468,14 @@ fileGetForeignPaths(PlannerInfo *root, baserel->rows, startup_cost, total_cost, - NIL, /* no pathkeys */ - NULL, /* no outer rel either */ - NIL)); /* no fdw_private data */ + NIL, /* no pathkeys */ + NULL, /* no outer rel either */ + NIL)); /* no fdw_private data */ /* * If data file was sorted, and we knew it somehow, we could insert - * appropriate pathkeys into the ForeignPath node to tell the planner that. + * appropriate pathkeys into the ForeignPath node to tell the planner + * that. */ } @@ -505,8 +506,8 @@ fileGetForeignPlan(PlannerInfo *root, return make_foreignscan(tlist, scan_clauses, scan_relid, - NIL, /* no expressions to evaluate */ - NIL); /* no private state either */ + NIL, /* no expressions to evaluate */ + NIL); /* no private state either */ } /* @@ -665,14 +666,14 @@ fileAnalyzeForeignTable(Relation relation, { char *filename; List *options; - struct stat stat_buf; + struct stat stat_buf; /* Fetch options of foreign table */ fileGetOptions(RelationGetRelid(relation), &filename, &options); /* - * Get size of the file. (XXX if we fail here, would it be better to - * just return false to skip analyzing the table?) + * Get size of the file. (XXX if we fail here, would it be better to just + * return false to skip analyzing the table?) */ if (stat(filename, &stat_buf) < 0) ereport(ERROR, @@ -746,7 +747,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel, * planner's idea of the relation width; which is bogus if not all * columns are being read, not to mention that the text representation * of a row probably isn't the same size as its internal - * representation. Possibly we could do something better, but the + * representation. Possibly we could do something better, but the * real answer to anyone who complains is "ANALYZE" ... */ int tuple_width; @@ -811,7 +812,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel, * which must have at least targrows entries. * The actual number of rows selected is returned as the function result. * We also count the total number of rows in the file and return it into - * *totalrows. Note that *totaldeadrows is always set to 0. + * *totalrows. Note that *totaldeadrows is always set to 0. * * Note that the returned list of rows is not always in order by physical * position in the file. Therefore, correlation estimates derived later @@ -824,7 +825,7 @@ file_acquire_sample_rows(Relation onerel, int elevel, double *totalrows, double *totaldeadrows) { int numrows = 0; - double rowstoskip = -1; /* -1 means not set yet */ + double rowstoskip = -1; /* -1 means not set yet */ double rstate; TupleDesc tupDesc; Datum *values; @@ -853,8 +854,8 @@ file_acquire_sample_rows(Relation onerel, int elevel, cstate = BeginCopyFrom(onerel, filename, NIL, options); /* - * Use per-tuple memory context to prevent leak of memory used to read rows - * from the file with Copy routines. + * Use per-tuple memory context to prevent leak of memory used to read + * rows from the file with Copy routines. */ tupcontext = AllocSetContextCreate(CurrentMemoryContext, "file_fdw temporary context", @@ -912,10 +913,10 @@ file_acquire_sample_rows(Relation onerel, int elevel, if (rowstoskip <= 0) { /* - * Found a suitable tuple, so save it, replacing one - * old tuple at random + * Found a suitable tuple, so save it, replacing one old tuple + * at random */ - int k = (int) (targrows * anl_random_fract()); + int k = (int) (targrows * anl_random_fract()); Assert(k >= 0 && k < targrows); heap_freetuple(rows[k]); diff --git a/contrib/pg_archivecleanup/pg_archivecleanup.c b/contrib/pg_archivecleanup/pg_archivecleanup.c index 20977805c8..a226101bbc 100644 --- a/contrib/pg_archivecleanup/pg_archivecleanup.c +++ b/contrib/pg_archivecleanup/pg_archivecleanup.c @@ -37,7 +37,7 @@ const char *progname; /* Options and defaults */ bool debug = false; /* are we debugging? */ bool dryrun = false; /* are we performing a dry-run operation? */ -char *additional_ext = NULL; /* Extension to remove from filenames */ +char *additional_ext = NULL; /* Extension to remove from filenames */ char *archiveLocation; /* where to find the archive? */ char *restartWALFileName; /* the file from which we can restart restore */ @@ -136,12 +136,13 @@ CleanupPriorWALFiles(void) * they were originally written, in case this worries you. */ if (strlen(walfile) == XLOG_DATA_FNAME_LEN && - strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN && + strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN && strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0) { - /* - * Use the original file name again now, including any extension - * that might have been chopped off before testing the sequence. + /* + * Use the original file name again now, including any + * extension that might have been chopped off before testing + * the sequence. */ snprintf(WALFilePath, MAXPGPATH, "%s/%s", archiveLocation, xlde->d_name); @@ -150,7 +151,7 @@ CleanupPriorWALFiles(void) { /* * Prints the name of the file to be removed and skips the - * actual removal. The regular printout is so that the + * actual removal. The regular printout is so that the * user can pipe the output into some other program. */ printf("%s\n", WALFilePath); @@ -298,7 +299,8 @@ main(int argc, char **argv) dryrun = true; break; case 'x': - additional_ext = optarg; /* Extension to remove from xlogfile names */ + additional_ext = optarg; /* Extension to remove from + * xlogfile names */ break; default: fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 06869fa344..aa11c144d6 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -103,19 +103,19 @@ typedef struct Counters int64 calls; /* # of times executed */ double total_time; /* total execution time, in msec */ int64 rows; /* total # of retrieved or affected rows */ - int64 shared_blks_hit; /* # of shared buffer hits */ + int64 shared_blks_hit; /* # of shared buffer hits */ int64 shared_blks_read; /* # of shared disk blocks read */ int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */ int64 shared_blks_written; /* # of shared disk blocks written */ - int64 local_blks_hit; /* # of local buffer hits */ - int64 local_blks_read; /* # of local disk blocks read */ + int64 local_blks_hit; /* # of local buffer hits */ + int64 local_blks_read; /* # of local disk blocks read */ int64 local_blks_dirtied; /* # of local disk blocks dirtied */ int64 local_blks_written; /* # of local disk blocks written */ - int64 temp_blks_read; /* # of temp blocks read */ + int64 temp_blks_read; /* # of temp blocks read */ int64 temp_blks_written; /* # of temp blocks written */ - double blk_read_time; /* time spent reading, in msec */ - double blk_write_time; /* time spent writing, in msec */ - double usage; /* usage factor */ + double blk_read_time; /* time spent reading, in msec */ + double blk_write_time; /* time spent writing, in msec */ + double usage; /* usage factor */ } Counters; /* @@ -140,7 +140,7 @@ typedef struct pgssSharedState { LWLockId lock; /* protects hashtable search/modification */ int query_size; /* max query length in bytes */ - double cur_median_usage; /* current median usage in hashtable */ + double cur_median_usage; /* current median usage in hashtable */ } pgssSharedState; /* @@ -150,7 +150,7 @@ typedef struct pgssLocationLen { int location; /* start offset in query text */ int length; /* length in bytes, or -1 to ignore */ -} pgssLocationLen; +} pgssLocationLen; /* * Working state for computing a query jumble and producing a normalized @@ -172,7 +172,7 @@ typedef struct pgssJumbleState /* Current number of valid entries in clocations array */ int clocations_count; -} pgssJumbleState; +} pgssJumbleState; /*---- Local variables ----*/ @@ -248,21 +248,21 @@ static uint32 pgss_hash_string(const char *str); static void pgss_store(const char *query, uint32 queryId, double total_time, uint64 rows, const BufferUsage *bufusage, - pgssJumbleState * jstate); + pgssJumbleState *jstate); static Size pgss_memsize(void); static pgssEntry *entry_alloc(pgssHashKey *key, const char *query, - int query_len, bool sticky); + int query_len, bool sticky); static void entry_dealloc(void); static void entry_reset(void); -static void AppendJumble(pgssJumbleState * jstate, +static void AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size); -static void JumbleQuery(pgssJumbleState * jstate, Query *query); -static void JumbleRangeTable(pgssJumbleState * jstate, List *rtable); -static void JumbleExpr(pgssJumbleState * jstate, Node *node); -static void RecordConstLocation(pgssJumbleState * jstate, int location); -static char *generate_normalized_query(pgssJumbleState * jstate, const char *query, +static void JumbleQuery(pgssJumbleState *jstate, Query *query); +static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable); +static void JumbleExpr(pgssJumbleState *jstate, Node *node); +static void RecordConstLocation(pgssJumbleState *jstate, int location); +static char *generate_normalized_query(pgssJumbleState *jstate, const char *query, int *query_len_p, int encoding); -static void fill_in_constant_lengths(pgssJumbleState * jstate, const char *query); +static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query); static int comp_location(const void *a, const void *b); @@ -513,8 +513,8 @@ pgss_shmem_startup(void) FreeFile(file); /* - * Remove the file so it's not included in backups/replication - * slaves, etc. A new file will be written on next shutdown. + * Remove the file so it's not included in backups/replication slaves, + * etc. A new file will be written on next shutdown. */ unlink(PGSS_DUMP_FILE); @@ -600,7 +600,7 @@ error: ereport(LOG, (errcode_for_file_access(), errmsg("could not write pg_stat_statement file \"%s\": %m", - PGSS_DUMP_FILE ".tmp"))); + PGSS_DUMP_FILE ".tmp"))); if (file) FreeFile(file); unlink(PGSS_DUMP_FILE ".tmp"); @@ -626,8 +626,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) * the statement contains an optimizable statement for which a queryId * could be derived (such as EXPLAIN or DECLARE CURSOR). For such cases, * runtime control will first go through ProcessUtility and then the - * executor, and we don't want the executor hooks to do anything, since - * we are already measuring the statement's costs at the utility level. + * executor, and we don't want the executor hooks to do anything, since we + * are already measuring the statement's costs at the utility level. */ if (query->utilityStmt) { @@ -768,7 +768,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) pgss_store(queryDesc->sourceText, queryId, - queryDesc->totaltime->total * 1000.0, /* convert to msec */ + queryDesc->totaltime->total * 1000.0, /* convert to msec */ queryDesc->estate->es_processed, &queryDesc->totaltime->bufusage, NULL); @@ -789,10 +789,9 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString, DestReceiver *dest, char *completionTag) { /* - * If it's an EXECUTE statement, we don't track it and don't increment - * the nesting level. This allows the cycles to be charged to the - * underlying PREPARE instead (by the Executor hooks), which is much more - * useful. + * If it's an EXECUTE statement, we don't track it and don't increment the + * nesting level. This allows the cycles to be charged to the underlying + * PREPARE instead (by the Executor hooks), which is much more useful. * * We also don't track execution of PREPARE. If we did, we would get one * hash table entry for the PREPARE (with hash calculated from the query @@ -942,7 +941,7 @@ static void pgss_store(const char *query, uint32 queryId, double total_time, uint64 rows, const BufferUsage *bufusage, - pgssJumbleState * jstate) + pgssJumbleState *jstate) { pgssHashKey key; pgssEntry *entry; @@ -1355,7 +1354,7 @@ entry_reset(void) * the current jumble. */ static void -AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size) +AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size) { unsigned char *jumble = jstate->jumble; Size jumble_len = jstate->jumble_len; @@ -1404,7 +1403,7 @@ AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size) * of information). */ static void -JumbleQuery(pgssJumbleState * jstate, Query *query) +JumbleQuery(pgssJumbleState *jstate, Query *query) { Assert(IsA(query, Query)); Assert(query->utilityStmt == NULL); @@ -1431,7 +1430,7 @@ JumbleQuery(pgssJumbleState * jstate, Query *query) * Jumble a range table */ static void -JumbleRangeTable(pgssJumbleState * jstate, List *rtable) +JumbleRangeTable(pgssJumbleState *jstate, List *rtable) { ListCell *lc; @@ -1485,11 +1484,11 @@ JumbleRangeTable(pgssJumbleState * jstate, List *rtable) * * Note: the reason we don't simply use expression_tree_walker() is that the * point of that function is to support tree walkers that don't care about - * most tree node types, but here we care about all types. We should complain + * most tree node types, but here we care about all types. We should complain * about any unrecognized node type. */ static void -JumbleExpr(pgssJumbleState * jstate, Node *node) +JumbleExpr(pgssJumbleState *jstate, Node *node) { ListCell *temp; @@ -1874,7 +1873,7 @@ JumbleExpr(pgssJumbleState * jstate, Node *node) * that is currently being walked. */ static void -RecordConstLocation(pgssJumbleState * jstate, int location) +RecordConstLocation(pgssJumbleState *jstate, int location) { /* -1 indicates unknown or undefined location */ if (location >= 0) @@ -1909,7 +1908,7 @@ RecordConstLocation(pgssJumbleState * jstate, int location) * Returns a palloc'd string, which is not necessarily null-terminated. */ static char * -generate_normalized_query(pgssJumbleState * jstate, const char *query, +generate_normalized_query(pgssJumbleState *jstate, const char *query, int *query_len_p, int encoding) { char *norm_query; @@ -2007,7 +2006,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query, * a problem. * * Duplicate constant pointers are possible, and will have their lengths - * marked as '-1', so that they are later ignored. (Actually, we assume the + * marked as '-1', so that they are later ignored. (Actually, we assume the * lengths were initialized as -1 to start with, and don't change them here.) * * N.B. There is an assumption that a '-' character at a Const location begins @@ -2015,7 +2014,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query, * reason for a constant to start with a '-'. */ static void -fill_in_constant_lengths(pgssJumbleState * jstate, const char *query) +fill_in_constant_lengths(pgssJumbleState *jstate, const char *query) { pgssLocationLen *locs; core_yyscan_t yyscanner; diff --git a/contrib/pg_test_fsync/pg_test_fsync.c b/contrib/pg_test_fsync/pg_test_fsync.c index 7f92bc8818..9fe2301e41 100644 --- a/contrib/pg_test_fsync/pg_test_fsync.c +++ b/contrib/pg_test_fsync/pg_test_fsync.c @@ -29,7 +29,7 @@ /* These are macros to avoid timing the function call overhead. */ #ifndef WIN32 -#define START_TIMER \ +#define START_TIMER \ do { \ alarm_triggered = false; \ alarm(secs_per_test); \ @@ -37,7 +37,7 @@ do { \ } while (0) #else /* WIN32 doesn't support alarm, so we create a thread and sleep there */ -#define START_TIMER \ +#define START_TIMER \ do { \ alarm_triggered = false; \ if (CreateThread(NULL, 0, process_alarm, NULL, 0, NULL) == \ @@ -55,7 +55,7 @@ do { \ gettimeofday(&stop_t, NULL); \ print_elapse(start_t, stop_t, ops); \ } while (0) - + static const char *progname; @@ -77,6 +77,7 @@ static void test_sync(int writes_per_op); static void test_open_syncs(void); static void test_open_sync(const char *msg, int writes_size); static void test_file_descriptor_sync(void); + #ifndef WIN32 static void process_alarm(int sig); #else diff --git a/contrib/pg_test_timing/pg_test_timing.c b/contrib/pg_test_timing/pg_test_timing.c index 4e43694338..b3f98abe5c 100644 --- a/contrib/pg_test_timing/pg_test_timing.c +++ b/contrib/pg_test_timing/pg_test_timing.c @@ -1,7 +1,7 @@ /* * pg_test_timing.c - * tests overhead of timing calls and their monotonicity: that - * they always move forward + * tests overhead of timing calls and their monotonicity: that + * they always move forward */ #include "postgres_fe.h" @@ -35,8 +35,8 @@ handle_args(int argc, char *argv[]) {"duration", required_argument, NULL, 'd'}, {NULL, 0, NULL, 0} }; - int option; /* Command line option */ - int optindex = 0; /* used by getopt_long */ + int option; /* Command line option */ + int optindex = 0; /* used by getopt_long */ if (argc > 1) { @@ -87,7 +87,7 @@ handle_args(int argc, char *argv[]) else { fprintf(stderr, - "%s: duration must be a positive integer (duration is \"%d\")\n", + "%s: duration must be a positive integer (duration is \"%d\")\n", progname, test_duration); fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); @@ -98,16 +98,22 @@ handle_args(int argc, char *argv[]) static void test_timing(int32 duration) { - uint64 total_time; - int64 time_elapsed = 0; - uint64 loop_count = 0; - uint64 prev, cur; - int32 diff, i, bits, found; - - instr_time start_time, end_time, temp; + uint64 total_time; + int64 time_elapsed = 0; + uint64 loop_count = 0; + uint64 prev, + cur; + int32 diff, + i, + bits, + found; + + instr_time start_time, + end_time, + temp; static int64 histogram[32]; - char buf[100]; + char buf[100]; total_time = duration > 0 ? duration * 1000000 : 0; @@ -146,7 +152,7 @@ test_timing(int32 duration) INSTR_TIME_SUBTRACT(end_time, start_time); printf("Per loop time including overhead: %0.2f nsec\n", - INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count); + INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count); printf("Histogram of timing durations:\n"); printf("%9s: %10s %9s\n", "< usec", "count", "percent"); diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c index 57bce01207..d59c8eb670 100644 --- a/contrib/pg_trgm/trgm_gist.c +++ b/contrib/pg_trgm/trgm_gist.c @@ -199,9 +199,9 @@ gtrgm_consistent(PG_FUNCTION_ARGS) * trigram extraction is relatively CPU-expensive. We must include * strategy number because trigram extraction depends on strategy. * - * The cached structure contains the strategy number, then the input - * query (starting at a MAXALIGN boundary), then the TRGM value (also - * starting at a MAXALIGN boundary). + * The cached structure contains the strategy number, then the input query + * (starting at a MAXALIGN boundary), then the TRGM value (also starting + * at a MAXALIGN boundary). */ if (cache == NULL || strategy != *((StrategyNumber *) cache) || @@ -341,8 +341,7 @@ gtrgm_distance(PG_FUNCTION_ARGS) char *cache = (char *) fcinfo->flinfo->fn_extra; /* - * Cache the generated trigrams across multiple calls with the same - * query. + * Cache the generated trigrams across multiple calls with the same query. */ if (cache == NULL || VARSIZE(cache) != querysize || diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c index 2669c09658..eed4a1eba7 100644 --- a/contrib/pg_upgrade/check.c +++ b/contrib/pg_upgrade/check.c @@ -168,7 +168,7 @@ issue_warnings(char *sequence_script_file_name) SYSTEMQUOTE "\"%s/psql\" --echo-queries " "--set ON_ERROR_STOP=on " "--no-psqlrc --port %d --username \"%s\" " - "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE, + "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE, new_cluster.bindir, new_cluster.port, os_info.user, sequence_script_file_name, UTILITY_LOG_FILE); unlink(sequence_script_file_name); @@ -204,7 +204,7 @@ output_completion_banner(char *analyze_script_file_name, else pg_log(PG_REPORT, "Optimizer statistics and free space information are not transferred\n" - "by pg_upgrade so, once you start the new server, consider running:\n" + "by pg_upgrade so, once you start the new server, consider running:\n" " %s\n\n", analyze_script_file_name); pg_log(PG_REPORT, @@ -238,7 +238,8 @@ check_cluster_versions(void) /* * We can't allow downgrading because we use the target pg_dumpall, and - * pg_dumpall cannot operate on new database versions, only older versions. + * pg_dumpall cannot operate on new database versions, only older + * versions. */ if (old_cluster.major_version > new_cluster.major_version) pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n"); @@ -402,31 +403,31 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) #endif fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %shave the default level of optimizer statistics.%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo\n\n"); fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo\n\n"); fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sthis script and run:%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %s vacuumdb --all %s%s\n", ECHO_QUOTE, - /* Did we copy the free space files? */ - (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? - "--analyze-only" : "--analyze", ECHO_QUOTE); + /* Did we copy the free space files? */ + (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? + "--analyze-only" : "--analyze", ECHO_QUOTE); fprintf(script, "echo\n\n"); #ifndef WIN32 @@ -441,15 +442,15 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) #endif fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %s--------------------------------------------------%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "vacuumdb --all --analyze-only\n"); fprintf(script, "echo\n"); fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo\n\n"); #ifndef WIN32 @@ -462,9 +463,9 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) #endif fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %s---------------------------------------------------%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "vacuumdb --all --analyze-only\n"); fprintf(script, "echo\n\n"); @@ -475,17 +476,17 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) #endif fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %s-------------------------------------------------------------%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "vacuumdb --all %s\n", - /* Did we copy the free space files? */ - (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? - "--analyze-only" : "--analyze"); + /* Did we copy the free space files? */ + (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? + "--analyze-only" : "--analyze"); fprintf(script, "echo\n\n"); fprintf(script, "echo %sDone%s\n", - ECHO_QUOTE, ECHO_QUOTE); + ECHO_QUOTE, ECHO_QUOTE); fclose(script); @@ -716,8 +717,8 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster) pg_log(PG_REPORT, "fatal\n"); pg_log(PG_FATAL, "Your installation contains \"contrib/isn\" functions which rely on the\n" - "bigint data type. Your old and new clusters pass bigint values\n" - "differently so this cluster cannot currently be upgraded. You can\n" + "bigint data type. Your old and new clusters pass bigint values\n" + "differently so this cluster cannot currently be upgraded. You can\n" "manually upgrade databases that use \"contrib/isn\" facilities and remove\n" "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n" "the problem functions is in the file:\n" @@ -764,9 +765,9 @@ check_for_reg_data_type_usage(ClusterInfo *cluster) PGconn *conn = connectToServer(cluster, active_db->db_name); /* - * While several relkinds don't store any data, e.g. views, they - * can be used to define data types of other columns, so we - * check all relkinds. + * While several relkinds don't store any data, e.g. views, they can + * be used to define data types of other columns, so we check all + * relkinds. */ res = executeQueryOrDie(conn, "SELECT n.nspname, c.relname, a.attname " @@ -777,16 +778,16 @@ check_for_reg_data_type_usage(ClusterInfo *cluster) " NOT a.attisdropped AND " " a.atttypid IN ( " " 'pg_catalog.regproc'::pg_catalog.regtype, " - " 'pg_catalog.regprocedure'::pg_catalog.regtype, " + " 'pg_catalog.regprocedure'::pg_catalog.regtype, " " 'pg_catalog.regoper'::pg_catalog.regtype, " - " 'pg_catalog.regoperator'::pg_catalog.regtype, " + " 'pg_catalog.regoperator'::pg_catalog.regtype, " /* regclass.oid is preserved, so 'regclass' is OK */ /* regtype.oid is preserved, so 'regtype' is OK */ - " 'pg_catalog.regconfig'::pg_catalog.regtype, " - " 'pg_catalog.regdictionary'::pg_catalog.regtype) AND " - " c.relnamespace = n.oid AND " - " n.nspname != 'pg_catalog' AND " - " n.nspname != 'information_schema'"); + " 'pg_catalog.regconfig'::pg_catalog.regtype, " + " 'pg_catalog.regdictionary'::pg_catalog.regtype) AND " + " c.relnamespace = n.oid AND " + " n.nspname != 'pg_catalog' AND " + " n.nspname != 'information_schema'"); ntups = PQntuples(res); i_nspname = PQfnumber(res, "nspname"); @@ -822,8 +823,8 @@ check_for_reg_data_type_usage(ClusterInfo *cluster) pg_log(PG_REPORT, "fatal\n"); pg_log(PG_FATAL, "Your installation contains one of the reg* data types in user tables.\n" - "These data types reference system OIDs that are not preserved by\n" - "pg_upgrade, so this cluster cannot currently be upgraded. You can\n" + "These data types reference system OIDs that are not preserved by\n" + "pg_upgrade, so this cluster cannot currently be upgraded. You can\n" "remove the problem tables and restart the upgrade. A list of the problem\n" "columns is in the file:\n" " %s\n\n", output_path); @@ -836,9 +837,11 @@ check_for_reg_data_type_usage(ClusterInfo *cluster) static void get_bin_version(ClusterInfo *cluster) { - char cmd[MAXPGPATH], cmd_output[MAX_STRING]; + char cmd[MAXPGPATH], + cmd_output[MAX_STRING]; FILE *output; - int pre_dot, post_dot; + int pre_dot, + post_dot; snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir); @@ -858,4 +861,3 @@ get_bin_version(ClusterInfo *cluster) cluster->bin_version = (pre_dot * 100 + post_dot) * 100; } - diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c index e01280db9e..6bffe549e5 100644 --- a/contrib/pg_upgrade/controldata.c +++ b/contrib/pg_upgrade/controldata.c @@ -129,6 +129,7 @@ get_control_data(ClusterInfo *cluster, bool live_check) pg_log(PG_VERBOSE, "%s", bufin); #ifdef WIN32 + /* * Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does * work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a @@ -506,7 +507,7 @@ check_control_data(ControlData *oldctrl, * This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose */ pg_log(PG_FATAL, - "You will need to rebuild the new server with configure option\n" + "You will need to rebuild the new server with configure option\n" "--disable-integer-datetimes or get server binaries built with those\n" "options.\n"); } @@ -531,6 +532,6 @@ disable_old_cluster(void) pg_log(PG_REPORT, "\n" "If you want to start the old cluster, you will need to remove\n" "the \".old\" suffix from %s/global/pg_control.old.\n" - "Because \"link\" mode was used, the old cluster cannot be safely\n" - "started once the new cluster has been started.\n\n", old_cluster.pgdata); + "Because \"link\" mode was used, the old cluster cannot be safely\n" + "started once the new cluster has been started.\n\n", old_cluster.pgdata); } diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c index 68cf0795aa..9e63bd5856 100644 --- a/contrib/pg_upgrade/exec.c +++ b/contrib/pg_upgrade/exec.c @@ -18,8 +18,9 @@ static void check_data_dir(const char *pg_data); static void check_bin_dir(ClusterInfo *cluster); static void validate_exec(const char *dir, const char *cmdName); + #ifdef WIN32 -static int win32_check_directory_write_permissions(void); +static int win32_check_directory_write_permissions(void); #endif @@ -64,7 +65,7 @@ exec_prog(bool throw_error, bool is_priv, pg_log(throw_error ? PG_FATAL : PG_REPORT, "Consult the last few lines of \"%s\" for\n" "the probable cause of the failure.\n", - log_file); + log_file); return 1; } @@ -142,12 +143,12 @@ verify_directories(void) static int win32_check_directory_write_permissions(void) { - int fd; + int fd; /* - * We open a file we would normally create anyway. We do this even in - * 'check' mode, which isn't ideal, but this is the best we can do. - */ + * We open a file we would normally create anyway. We do this even in + * 'check' mode, which isn't ideal, but this is the best we can do. + */ if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0) return -1; close(fd); @@ -184,7 +185,7 @@ check_data_dir(const char *pg_data) struct stat statBuf; snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data, - /* Win32 can't stat() a directory with a trailing slash. */ + /* Win32 can't stat() a directory with a trailing slash. */ *requiredSubdirs[subdirnum] ? "/" : "", requiredSubdirs[subdirnum]); diff --git a/contrib/pg_upgrade/file.c b/contrib/pg_upgrade/file.c index 0276636e03..1dd3722142 100644 --- a/contrib/pg_upgrade/file.c +++ b/contrib/pg_upgrade/file.c @@ -233,7 +233,7 @@ copy_file(const char *srcfile, const char *dstfile, bool force) * large number of times. */ int -load_directory(const char *dirname, struct dirent ***namelist) +load_directory(const char *dirname, struct dirent *** namelist) { DIR *dirdesc; struct dirent *direntry; @@ -251,7 +251,7 @@ load_directory(const char *dirname, struct dirent ***namelist) count++; *namelist = (struct dirent **) realloc((void *) (*namelist), - (size_t) ((name_num + 1) * sizeof(struct dirent *))); + (size_t) ((name_num + 1) * sizeof(struct dirent *))); if (*namelist == NULL) { @@ -314,7 +314,6 @@ win32_pghardlink(const char *src, const char *dst) else return 0; } - #endif @@ -322,13 +321,11 @@ win32_pghardlink(const char *src, const char *dst) FILE * fopen_priv(const char *path, const char *mode) { - mode_t old_umask = umask(S_IRWXG | S_IRWXO); - FILE *fp; + mode_t old_umask = umask(S_IRWXG | S_IRWXO); + FILE *fp; fp = fopen(path, mode); umask(old_umask); return fp; } - - diff --git a/contrib/pg_upgrade/function.c b/contrib/pg_upgrade/function.c index b4b17badb2..77bd3a0359 100644 --- a/contrib/pg_upgrade/function.c +++ b/contrib/pg_upgrade/function.c @@ -133,7 +133,7 @@ get_loadable_libraries(void) int totaltups; int dbnum; bool found_public_plpython_handler = false; - + ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *)); totaltups = 0; @@ -144,10 +144,10 @@ get_loadable_libraries(void) PGconn *conn = connectToServer(&old_cluster, active_db->db_name); /* - * Fetch all libraries referenced in this DB. We can't exclude - * the "pg_catalog" schema because, while such functions are not - * explicitly dumped by pg_dump, they do reference implicit objects - * that pg_dump does dump, e.g. CREATE LANGUAGE plperl. + * Fetch all libraries referenced in this DB. We can't exclude the + * "pg_catalog" schema because, while such functions are not + * explicitly dumped by pg_dump, they do reference implicit objects + * that pg_dump does dump, e.g. CREATE LANGUAGE plperl. */ ress[dbnum] = executeQueryOrDie(conn, "SELECT DISTINCT probin " @@ -158,26 +158,26 @@ get_loadable_libraries(void) FirstNormalObjectId); totaltups += PQntuples(ress[dbnum]); - /* - * Systems that install plpython before 8.1 have - * plpython_call_handler() defined in the "public" schema, causing - * pg_dumpall to dump it. However that function still references - * "plpython" (no "2"), so it throws an error on restore. This code - * checks for the problem function, reports affected databases to the - * user and explains how to remove them. - * 8.1 git commit: e0dedd0559f005d60c69c9772163e69c204bac69 - * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php - * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php - */ + /* + * Systems that install plpython before 8.1 have + * plpython_call_handler() defined in the "public" schema, causing + * pg_dumpall to dump it. However that function still references + * "plpython" (no "2"), so it throws an error on restore. This code + * checks for the problem function, reports affected databases to the + * user and explains how to remove them. 8.1 git commit: + * e0dedd0559f005d60c69c9772163e69c204bac69 + * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php + * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php + */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 901) { - PGresult *res; + PGresult *res; res = executeQueryOrDie(conn, "SELECT 1 " - "FROM pg_catalog.pg_proc JOIN pg_namespace " - " ON pronamespace = pg_namespace.oid " - "WHERE proname = 'plpython_call_handler' AND " + "FROM pg_catalog.pg_proc JOIN pg_namespace " + " ON pronamespace = pg_namespace.oid " + "WHERE proname = 'plpython_call_handler' AND " "nspname = 'public' AND " "prolang = 13 /* C */ AND " "probin = '$libdir/plpython' AND " @@ -188,23 +188,23 @@ get_loadable_libraries(void) if (!found_public_plpython_handler) { pg_log(PG_WARNING, - "\nThe old cluster has a \"plpython_call_handler\" function defined\n" - "in the \"public\" schema which is a duplicate of the one defined\n" - "in the \"pg_catalog\" schema. You can confirm this by executing\n" - "in psql:\n" - "\n" - " \\df *.plpython_call_handler\n" - "\n" - "The \"public\" schema version of this function was created by a\n" - "pre-8.1 install of plpython, and must be removed for pg_upgrade\n" - "to complete because it references a now-obsolete \"plpython\"\n" - "shared object file. You can remove the \"public\" schema version\n" - "of this function by running the following command:\n" - "\n" - " DROP FUNCTION public.plpython_call_handler()\n" - "\n" - "in each affected database:\n" - "\n"); + "\nThe old cluster has a \"plpython_call_handler\" function defined\n" + "in the \"public\" schema which is a duplicate of the one defined\n" + "in the \"pg_catalog\" schema. You can confirm this by executing\n" + "in psql:\n" + "\n" + " \\df *.plpython_call_handler\n" + "\n" + "The \"public\" schema version of this function was created by a\n" + "pre-8.1 install of plpython, and must be removed for pg_upgrade\n" + "to complete because it references a now-obsolete \"plpython\"\n" + "shared object file. You can remove the \"public\" schema version\n" + "of this function by running the following command:\n" + "\n" + " DROP FUNCTION public.plpython_call_handler()\n" + "\n" + "in each affected database:\n" + "\n"); } pg_log(PG_WARNING, " %s\n", active_db->db_name); found_public_plpython_handler = true; @@ -217,9 +217,9 @@ get_loadable_libraries(void) if (found_public_plpython_handler) pg_log(PG_FATAL, - "Remove the problem functions from the old cluster to continue.\n"); - - totaltups++; /* reserve for pg_upgrade_support */ + "Remove the problem functions from the old cluster to continue.\n"); + + totaltups++; /* reserve for pg_upgrade_support */ /* Allocate what's certainly enough space */ os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *)); @@ -293,17 +293,17 @@ check_loadable_libraries(void) PGresult *res; /* - * In Postgres 9.0, Python 3 support was added, and to do that, a - * plpython2u language was created with library name plpython2.so - * as a symbolic link to plpython.so. In Postgres 9.1, only the - * plpython2.so library was created, and both plpythonu and - * plpython2u pointing to it. For this reason, any reference to - * library name "plpython" in an old PG <= 9.1 cluster must look - * for "plpython2" in the new cluster. + * In Postgres 9.0, Python 3 support was added, and to do that, a + * plpython2u language was created with library name plpython2.so as a + * symbolic link to plpython.so. In Postgres 9.1, only the + * plpython2.so library was created, and both plpythonu and plpython2u + * pointing to it. For this reason, any reference to library name + * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in + * the new cluster. * - * For this case, we could check pg_pltemplate, but that only works - * for languages, and does not help with function shared objects, - * so we just do a general fix. + * For this case, we could check pg_pltemplate, but that only works + * for languages, and does not help with function shared objects, so + * we just do a general fix. */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 && strcmp(lib, "$libdir/plpython") == 0) @@ -325,7 +325,7 @@ check_loadable_libraries(void) /* exit and report missing support library with special message */ if (strcmp(lib, PG_UPGRADE_SUPPORT) == 0) pg_log(PG_FATAL, - "The pg_upgrade_support module must be created and installed in the new cluster.\n"); + "The pg_upgrade_support module must be created and installed in the new cluster.\n"); if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) pg_log(PG_FATAL, "Could not open file \"%s\": %s\n", diff --git a/contrib/pg_upgrade/info.c b/contrib/pg_upgrade/info.c index 5b2b9eb28c..74b13e782d 100644 --- a/contrib/pg_upgrade/info.c +++ b/contrib/pg_upgrade/info.c @@ -57,12 +57,12 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, old_db->db_name, old_rel->reloid, new_rel->reloid); /* - * TOAST table names initially match the heap pg_class oid. - * In pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, - * TOAST table names change during ALTER TABLE ALTER COLUMN SET TYPE. - * In >= 9.0, TOAST relation names always use heap table oids, hence - * we cannot check relation names when upgrading from pre-9.0. - * Clusters upgraded to 9.0 will get matching TOAST names. + * TOAST table names initially match the heap pg_class oid. In + * pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST + * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >= + * 9.0, TOAST relation names always use heap table oids, hence we + * cannot check relation names when upgrading from pre-9.0. Clusters + * upgraded to 9.0 will get matching TOAST names. */ if (strcmp(old_rel->nspname, new_rel->nspname) != 0 || ((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 || @@ -194,16 +194,16 @@ get_db_infos(ClusterInfo *cluster) char query[QUERY_ALLOC]; snprintf(query, sizeof(query), - "SELECT d.oid, d.datname, %s " - "FROM pg_catalog.pg_database d " - " LEFT OUTER JOIN pg_catalog.pg_tablespace t " - " ON d.dattablespace = t.oid " - "WHERE d.datallowconn = true " + "SELECT d.oid, d.datname, %s " + "FROM pg_catalog.pg_database d " + " LEFT OUTER JOIN pg_catalog.pg_tablespace t " + " ON d.dattablespace = t.oid " + "WHERE d.datallowconn = true " /* we don't preserve pg_database.oid so we sort by name */ - "ORDER BY 2", + "ORDER BY 2", /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? - "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); + (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? + "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); res = executeQueryOrDie(conn, "%s", query); @@ -276,7 +276,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) " LEFT OUTER JOIN pg_catalog.pg_tablespace t " " ON c.reltablespace = t.oid " "WHERE relkind IN ('r','t', 'i'%s) AND " - /* exclude possible orphaned temp tables */ + /* exclude possible orphaned temp tables */ " ((n.nspname !~ '^pg_temp_' AND " " n.nspname !~ '^pg_toast_temp_' AND " " n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND " diff --git a/contrib/pg_upgrade/option.c b/contrib/pg_upgrade/option.c index 66a70cac8a..ccf00434d3 100644 --- a/contrib/pg_upgrade/option.c +++ b/contrib/pg_upgrade/option.c @@ -56,10 +56,10 @@ parseCommandLine(int argc, char *argv[]) int option; /* Command line option */ int optindex = 0; /* used by getopt_long */ int os_user_effective_id; - FILE *fp; - char **filename; + FILE *fp; + char **filename; time_t run_time = time(NULL); - + user_opts.transfer_mode = TRANSFER_MODE_COPY; os_info.progname = get_progname(argv[0]); @@ -138,11 +138,11 @@ parseCommandLine(int argc, char *argv[]) new_cluster.pgopts = pg_strdup(optarg); break; - /* - * Someday, the port number option could be removed and - * passed using -o/-O, but that requires postmaster -C - * to be supported on all old/new versions. - */ + /* + * Someday, the port number option could be removed and passed + * using -o/-O, but that requires postmaster -C to be + * supported on all old/new versions. + */ case 'p': if ((old_cluster.port = atoi(optarg)) <= 0) { @@ -196,21 +196,21 @@ parseCommandLine(int argc, char *argv[]) /* Start with newline because we might be appending to a file. */ fprintf(fp, "\n" "-----------------------------------------------------------------\n" - " pg_upgrade run on %s" - "-----------------------------------------------------------------\n\n", - ctime(&run_time)); + " pg_upgrade run on %s" + "-----------------------------------------------------------------\n\n", + ctime(&run_time)); fclose(fp); } /* Get values from env if not already set */ check_required_directory(&old_cluster.bindir, "PGBINOLD", "-b", - "old cluster binaries reside"); + "old cluster binaries reside"); check_required_directory(&new_cluster.bindir, "PGBINNEW", "-B", - "new cluster binaries reside"); + "new cluster binaries reside"); check_required_directory(&old_cluster.pgdata, "PGDATAOLD", "-d", - "old cluster data resides"); + "old cluster data resides"); check_required_directory(&new_cluster.pgdata, "PGDATANEW", "-D", - "new cluster data resides"); + "new cluster data resides"); } @@ -285,7 +285,7 @@ or\n"), old_cluster.port, new_cluster.port, os_info.user); */ static void check_required_directory(char **dirpath, char *envVarName, - char *cmdLineOption, char *description) + char *cmdLineOption, char *description) { if (*dirpath == NULL || strlen(*dirpath) == 0) { @@ -322,8 +322,10 @@ void adjust_data_dir(ClusterInfo *cluster) { char filename[MAXPGPATH]; - char cmd[MAXPGPATH], cmd_output[MAX_STRING]; - FILE *fp, *output; + char cmd[MAXPGPATH], + cmd_output[MAX_STRING]; + FILE *fp, + *output; /* If there is no postgresql.conf, it can't be a config-only dir */ snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig); @@ -345,10 +347,9 @@ adjust_data_dir(ClusterInfo *cluster) CLUSTER_NAME(cluster)); /* - * We don't have a data directory yet, so we can't check the PG - * version, so this might fail --- only works for PG 9.2+. If this - * fails, pg_upgrade will fail anyway because the data files will not - * be found. + * We don't have a data directory yet, so we can't check the PG version, + * so this might fail --- only works for PG 9.2+. If this fails, + * pg_upgrade will fail anyway because the data files will not be found. */ snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory", cluster->bindir, cluster->pgconfig); @@ -356,7 +357,7 @@ adjust_data_dir(ClusterInfo *cluster) if ((output = popen(cmd, "r")) == NULL || fgets(cmd_output, sizeof(cmd_output), output) == NULL) pg_log(PG_FATAL, "Could not get data directory using %s: %s\n", - cmd, getErrorText(errno)); + cmd, getErrorText(errno)); pclose(output); diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c index 465ecdd6b3..3537fc2bd0 100644 --- a/contrib/pg_upgrade/pg_upgrade.c +++ b/contrib/pg_upgrade/pg_upgrade.c @@ -55,7 +55,7 @@ ClusterInfo old_cluster, new_cluster; OSInfo os_info; -char *output_files[] = { +char *output_files[] = { SERVER_LOG_FILE, #ifdef WIN32 /* unique file for pg_ctl start */ @@ -122,11 +122,10 @@ main(int argc, char **argv) stop_postmaster(false); /* - * Most failures happen in create_new_objects(), which has - * completed at this point. We do this here because it is just - * before linking, which will link the old and new cluster data - * files, preventing the old cluster from being safely started - * once the new cluster is started. + * Most failures happen in create_new_objects(), which has completed at + * this point. We do this here because it is just before linking, which + * will link the old and new cluster data files, preventing the old + * cluster from being safely started once the new cluster is started. */ if (user_opts.transfer_mode == TRANSFER_MODE_LINK) disable_old_cluster(); @@ -215,8 +214,8 @@ prepare_new_cluster(void) exec_prog(true, true, UTILITY_LOG_FILE, SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" " "--all --analyze %s >> \"%s\" 2>&1" SYSTEMQUOTE, - new_cluster.bindir, new_cluster.port, os_info.user, - log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE); + new_cluster.bindir, new_cluster.port, os_info.user, + log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE); check_ok(); /* @@ -229,8 +228,8 @@ prepare_new_cluster(void) exec_prog(true, true, UTILITY_LOG_FILE, SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" " "--all --freeze %s >> \"%s\" 2>&1" SYSTEMQUOTE, - new_cluster.bindir, new_cluster.port, os_info.user, - log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE); + new_cluster.bindir, new_cluster.port, os_info.user, + log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE); check_ok(); get_pg_database_relfilenode(&new_cluster); @@ -252,8 +251,8 @@ prepare_new_databases(void) /* * Install support functions in the global-object restore database to - * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template - * database so objects we add into 'template1' are not propogated. They + * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template + * database so objects we add into 'template1' are not propogated. They * are removed on pg_upgrade exit. */ install_support_functions_in_new_db("template1"); @@ -267,7 +266,7 @@ prepare_new_databases(void) exec_prog(true, true, RESTORE_LOG_FILE, SYSTEMQUOTE "\"%s/psql\" --echo-queries " "--set ON_ERROR_STOP=on " - /* --no-psqlrc prevents AUTOCOMMIT=off */ + /* --no-psqlrc prevents AUTOCOMMIT=off */ "--no-psqlrc --port %d --username \"%s\" " "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE, new_cluster.bindir, new_cluster.port, os_info.user, @@ -453,13 +452,13 @@ set_frozenxids(void) static void cleanup(void) { - + fclose(log_opts.internal); /* Remove dump and log files? */ if (!log_opts.retain) { - char **filename; + char **filename; for (filename = output_files; *filename != NULL; filename++) unlink(*filename); diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h index 26aa7bb1d2..d12590ac6b 100644 --- a/contrib/pg_upgrade/pg_upgrade.h +++ b/contrib/pg_upgrade/pg_upgrade.h @@ -75,7 +75,7 @@ extern char *output_files[]; #define RM_CMD "rm -f" #define RMDIR_CMD "rm -rf" #define SCRIPT_EXT "sh" -#define ECHO_QUOTE "'" +#define ECHO_QUOTE "'" #else #define pg_copy_file CopyFile #define pg_mv_file pgrename @@ -85,7 +85,7 @@ extern char *output_files[]; #define RMDIR_CMD "RMDIR /s/q" #define SCRIPT_EXT "bat" #define EXE_EXT ".exe" -#define ECHO_QUOTE "" +#define ECHO_QUOTE "" #endif #define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \ @@ -98,7 +98,7 @@ extern char *output_files[]; /* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1 development */ #define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251 /* - * Visibility map changed with this 9.2 commit, + * Visibility map changed with this 9.2 commit, * 8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version. */ #define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031 @@ -114,7 +114,7 @@ typedef struct Oid reloid; /* relation oid */ Oid relfilenode; /* relation relfile node */ /* relation tablespace path, or "" for the cluster default */ - char tablespace[MAXPGPATH]; + char tablespace[MAXPGPATH]; } RelInfo; typedef struct @@ -222,9 +222,11 @@ typedef struct ControlData controldata; /* pg_control information */ DbInfoArr dbarr; /* dbinfos array */ char *pgdata; /* pathname for cluster's $PGDATA directory */ - char *pgconfig; /* pathname for cluster's config file directory */ + char *pgconfig; /* pathname for cluster's config file + * directory */ char *bindir; /* pathname for cluster's executable directory */ - char *pgopts; /* options to pass to the server, like pg_ctl -o */ + char *pgopts; /* options to pass to the server, like pg_ctl + * -o */ unsigned short port; /* port number where postmaster is waiting */ uint32 major_version; /* PG_VERSION of cluster */ char major_version_str[64]; /* string PG_VERSION of cluster */ @@ -291,8 +293,8 @@ void check_old_cluster(bool live_check, void check_new_cluster(void); void report_clusters_compatible(void); void issue_warnings(char *sequence_script_file_name); -void output_completion_banner(char *analyze_script_file_name, - char *deletion_script_file_name); +void output_completion_banner(char *analyze_script_file_name, + char *deletion_script_file_name); void check_cluster_versions(void); void check_cluster_compatibility(bool live_check); void create_script_for_old_cluster_deletion(char **deletion_script_file_name); @@ -314,9 +316,10 @@ void split_old_dump(void); /* exec.c */ -int exec_prog(bool throw_error, bool is_priv, - const char *log_file, const char *cmd, ...) - __attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5))); +int +exec_prog(bool throw_error, bool is_priv, + const char *log_file, const char *cmd,...) +__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5))); void verify_directories(void); bool is_server_running(const char *datadir); @@ -353,14 +356,14 @@ const char *setupPageConverter(pageCnvCtx **result); typedef void *pageCnvCtx; #endif -int load_directory(const char *dirname, struct dirent ***namelist); +int load_directory(const char *dirname, struct dirent *** namelist); const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src, const char *dst, bool force); const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src, const char *dst); void check_hard_link(void); -FILE *fopen_priv(const char *path, const char *mode); +FILE *fopen_priv(const char *path, const char *mode); /* function.c */ @@ -399,8 +402,9 @@ void init_tablespaces(void); /* server.c */ PGconn *connectToServer(ClusterInfo *cluster, const char *db_name); -PGresult *executeQueryOrDie(PGconn *conn, const char *fmt, ...) - __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); +PGresult * +executeQueryOrDie(PGconn *conn, const char *fmt,...) +__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); void start_postmaster(ClusterInfo *cluster); void stop_postmaster(bool fast); @@ -413,12 +417,15 @@ void check_pghost_envvar(void); char *quote_identifier(const char *s); int get_user_info(char **user_name); void check_ok(void); -void report_status(eLogType type, const char *fmt, ...) - __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); -void pg_log(eLogType type, char *fmt, ...) - __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); -void prep_status(const char *fmt, ...) - __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); +void +report_status(eLogType type, const char *fmt,...) +__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); +void +pg_log(eLogType type, char *fmt,...) +__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); +void +prep_status(const char *fmt,...) +__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); void check_ok(void); char *pg_strdup(const char *s); void *pg_malloc(int size); diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c index 45d6c5415b..3509585de7 100644 --- a/contrib/pg_upgrade/relfilenode.c +++ b/contrib/pg_upgrade/relfilenode.c @@ -34,26 +34,28 @@ const char * transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata) { - int old_dbnum, new_dbnum; + int old_dbnum, + new_dbnum; const char *msg = NULL; prep_status("%s user relation files\n", - user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying"); + user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying"); /* Scan the old cluster databases and transfer their files */ for (old_dbnum = new_dbnum = 0; old_dbnum < old_db_arr->ndbs; old_dbnum++, new_dbnum++) { - DbInfo *old_db = &old_db_arr->dbs[old_dbnum], *new_db = NULL; + DbInfo *old_db = &old_db_arr->dbs[old_dbnum], + *new_db = NULL; FileNameMap *mappings; int n_maps; pageCnvCtx *pageConverter = NULL; /* - * Advance past any databases that exist in the new cluster - * but not in the old, e.g. "postgres". (The user might - * have removed the 'postgres' database from the old cluster.) + * Advance past any databases that exist in the new cluster but not in + * the old, e.g. "postgres". (The user might have removed the + * 'postgres' database from the old cluster.) */ for (; new_dbnum < new_db_arr->ndbs; new_dbnum++) { @@ -83,8 +85,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, } } - prep_status(" "); /* in case nothing printed; pass a space so gcc - * doesn't complain about empty format + prep_status(" "); /* in case nothing printed; pass a space so + * gcc doesn't complain about empty format * string */ check_ok(); @@ -137,14 +139,14 @@ transfer_single_new_db(pageCnvCtx *pageConverter, int mapnum; int fileno; bool vm_crashsafe_change = false; - + old_dir[0] = '\0'; /* Do not copy non-crashsafe vm files for binaries that assume crashsafety */ if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER && new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER) vm_crashsafe_change = true; - + for (mapnum = 0; mapnum < size; mapnum++) { char old_file[MAXPGPATH]; @@ -190,8 +192,8 @@ transfer_single_new_db(pageCnvCtx *pageConverter, for (fileno = 0; fileno < numFiles; fileno++) { - char *vm_offset = strstr(namelist[fileno]->d_name, "_vm"); - bool is_vm_file = false; + char *vm_offset = strstr(namelist[fileno]->d_name, "_vm"); + bool is_vm_file = false; /* Is a visibility map file? (name ends with _vm) */ if (vm_offset && strlen(vm_offset) == strlen("_vm")) diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c index f557453df2..f83d6fa866 100644 --- a/contrib/pg_upgrade/server.c +++ b/contrib/pg_upgrade/server.c @@ -161,7 +161,7 @@ start_postmaster(ClusterInfo *cluster) snprintf(cmd, sizeof(cmd), SYSTEMQUOTE "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" " "-o \"-p %d %s %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE, - cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port, + cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port, (cluster->controldata.cat_ver >= BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" : "-c autovacuum=off -c autovacuum_freeze_max_age=2000000000", @@ -172,11 +172,11 @@ start_postmaster(ClusterInfo *cluster) * it might supply a reason for the failure. */ pg_ctl_return = exec_prog(false, true, - /* pass both file names if the differ */ - (strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ? - SERVER_LOG_FILE : - SERVER_LOG_FILE " or " SERVER_START_LOG_FILE, - "%s", cmd); + /* pass both file names if the differ */ + (strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ? + SERVER_LOG_FILE : + SERVER_LOG_FILE " or " SERVER_START_LOG_FILE, + "%s", cmd); /* Check to see if we can connect to the server; if not, report it. */ if ((conn = get_db_conn(cluster, "template1")) == NULL || @@ -211,14 +211,14 @@ stop_postmaster(bool fast) else if (os_info.running_cluster == &new_cluster) cluster = &new_cluster; else - return; /* no cluster running */ + return; /* no cluster running */ snprintf(cmd, sizeof(cmd), SYSTEMQUOTE "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" " "%s stop >> \"%s\" 2>&1" SYSTEMQUOTE, cluster->bindir, cluster->pgconfig, cluster->pgopts ? cluster->pgopts : "", - fast ? "-m fast" : "", SERVER_STOP_LOG_FILE); + fast ? "-m fast" : "", SERVER_STOP_LOG_FILE); exec_prog(fast ? false : true, true, SERVER_STOP_LOG_FILE, "%s", cmd); diff --git a/contrib/pg_upgrade/tablespace.c b/contrib/pg_upgrade/tablespace.c index 6b61f4bac1..b783b6251e 100644 --- a/contrib/pg_upgrade/tablespace.c +++ b/contrib/pg_upgrade/tablespace.c @@ -52,8 +52,8 @@ get_tablespace_paths(void) "WHERE spcname != 'pg_default' AND " " spcname != 'pg_global'", /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ? - "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation"); + (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ? + "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation"); res = executeQueryOrDie(conn, "%s", query); diff --git a/contrib/pg_upgrade/version_old_8_3.c b/contrib/pg_upgrade/version_old_8_3.c index 542425c7c9..b681c0984e 100644 --- a/contrib/pg_upgrade/version_old_8_3.c +++ b/contrib/pg_upgrade/version_old_8_3.c @@ -60,10 +60,10 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster) " NOT a.attisdropped AND " " a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND " " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ + /* exclude possible orphaned temp tables */ " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + " n.nspname !~ '^pg_toast_temp_' AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); ntups = PQntuples(res); i_nspname = PQfnumber(res, "nspname"); @@ -98,9 +98,9 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster) pg_log(PG_REPORT, "fatal\n"); pg_log(PG_FATAL, "Your installation contains the \"name\" data type in user tables. This\n" - "data type changed its internal alignment between your old and new\n" + "data type changed its internal alignment between your old and new\n" "clusters so this cluster cannot currently be upgraded. You can remove\n" - "the problem tables and restart the upgrade. A list of the problem\n" + "the problem tables and restart the upgrade. A list of the problem\n" "columns is in the file:\n" " %s\n\n", output_path); } @@ -150,10 +150,10 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster) " NOT a.attisdropped AND " " a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND " " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ + /* exclude possible orphaned temp tables */ " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + " n.nspname !~ '^pg_toast_temp_' AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); ntups = PQntuples(res); i_nspname = PQfnumber(res, "nspname"); @@ -189,7 +189,7 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster) pg_log(PG_FATAL, "Your installation contains the \"tsquery\" data type. This data type\n" "added a new internal field between your old and new clusters so this\n" - "cluster cannot currently be upgraded. You can remove the problem\n" + "cluster cannot currently be upgraded. You can remove the problem\n" "columns and restart the upgrade. A list of the problem columns is in the\n" "file:\n" " %s\n\n", output_path); @@ -328,10 +328,10 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode) " NOT a.attisdropped AND " " a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND " " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ + /* exclude possible orphaned temp tables */ " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + " n.nspname !~ '^pg_toast_temp_' AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); /* * This macro is used below to avoid reindexing indexes already rebuilt @@ -527,7 +527,7 @@ old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode) "must be reindexed with the REINDEX command. The file:\n" " %s\n" "when executed by psql by the database superuser will recreate all invalid\n" - "indexes; until then, none of these indexes will be used.\n\n", + "indexes; until then, none of these indexes will be used.\n\n", output_path); } else @@ -648,10 +648,10 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster, pg_log(PG_WARNING, "\n" "Your installation contains indexes using \"bpchar_pattern_ops\". These\n" "indexes have different internal formats between your old and new clusters\n" - "so they must be reindexed with the REINDEX command. The file:\n" + "so they must be reindexed with the REINDEX command. The file:\n" " %s\n" "when executed by psql by the database superuser will recreate all invalid\n" - "indexes; until then, none of these indexes will be used.\n\n", + "indexes; until then, none of these indexes will be used.\n\n", output_path); } else @@ -699,10 +699,10 @@ old_8_3_create_sequence_script(ClusterInfo *cluster) " pg_catalog.pg_namespace n " "WHERE c.relkind = 'S' AND " " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ + /* exclude possible orphaned temp tables */ " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + " n.nspname !~ '^pg_toast_temp_' AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); ntups = PQntuples(res); i_nspname = PQfnumber(res, "nspname"); diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c index b0e699187b..25fb15a847 100644 --- a/contrib/pgbench/pgbench.c +++ b/contrib/pgbench/pgbench.c @@ -66,7 +66,7 @@ typedef struct win32_pthread *pthread_t; typedef int pthread_attr_t; -static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg); +static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); static int pthread_join(pthread_t th, void **thread_return); #elif defined(ENABLE_THREAD_SAFETY) /* Use platform-dependent pthread capability */ @@ -84,7 +84,7 @@ static int pthread_join(pthread_t th, void **thread_return); typedef struct fork_pthread *pthread_t; typedef int pthread_attr_t; -static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg); +static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); static int pthread_join(pthread_t th, void **thread_return); #endif @@ -198,7 +198,7 @@ typedef struct instr_time start_time; /* thread start time */ instr_time *exec_elapsed; /* time spent executing cmds (per Command) */ int *exec_count; /* number of cmd executions (per Command) */ - unsigned short random_state[3]; /* separate randomness for each thread */ + unsigned short random_state[3]; /* separate randomness for each thread */ } TState; #define INVALID_THREAD ((pthread_t) 0) @@ -1075,7 +1075,7 @@ top: /* * getrand() neeeds to be able to subtract max from min and add - * one the result without overflowing. Since we know max > min, + * one the result without overflowing. Since we know max > min, * we can detect overflow just by checking for a negative result. * But we must check both that the subtraction doesn't overflow, * and that adding one to the result doesn't overflow either. @@ -1267,10 +1267,11 @@ init(void) * versions. Since pgbench has never pretended to be fully TPC-B * compliant anyway, we stick with the historical behavior. */ - struct ddlinfo { - char *table; - char *cols; - int declare_fillfactor; + struct ddlinfo + { + char *table; + char *cols; + int declare_fillfactor; }; struct ddlinfo DDLs[] = { { @@ -1321,15 +1322,16 @@ init(void) /* Construct new create table statement. */ opts[0] = '\0'; if (ddl->declare_fillfactor) - snprintf(opts+strlen(opts), 256-strlen(opts), - " with (fillfactor=%d)", fillfactor); + snprintf(opts + strlen(opts), 256 - strlen(opts), + " with (fillfactor=%d)", fillfactor); if (tablespace != NULL) { - char *escape_tablespace; + char *escape_tablespace; + escape_tablespace = PQescapeIdentifier(con, tablespace, strlen(tablespace)); - snprintf(opts+strlen(opts), 256-strlen(opts), - " tablespace %s", escape_tablespace); + snprintf(opts + strlen(opts), 256 - strlen(opts), + " tablespace %s", escape_tablespace); PQfreemem(escape_tablespace); } snprintf(buffer, 256, "create%s table %s(%s)%s", @@ -1404,17 +1406,18 @@ init(void) fprintf(stderr, "set primary key...\n"); for (i = 0; i < lengthof(DDLAFTERs); i++) { - char buffer[256]; + char buffer[256]; strncpy(buffer, DDLAFTERs[i], 256); if (index_tablespace != NULL) { - char *escape_tablespace; + char *escape_tablespace; + escape_tablespace = PQescapeIdentifier(con, index_tablespace, strlen(index_tablespace)); - snprintf(buffer+strlen(buffer), 256-strlen(buffer), - " using index tablespace %s", escape_tablespace); + snprintf(buffer + strlen(buffer), 256 - strlen(buffer), + " using index tablespace %s", escape_tablespace); PQfreemem(escape_tablespace); } @@ -1861,10 +1864,10 @@ main(int argc, char **argv) int i; static struct option long_options[] = { - {"index-tablespace", required_argument, NULL, 3}, - {"tablespace", required_argument, NULL, 2}, - {"unlogged-tables", no_argument, &unlogged_tables, 1}, - {NULL, 0, NULL, 0} + {"index-tablespace", required_argument, NULL, 3}, + {"tablespace", required_argument, NULL, 2}, + {"unlogged-tables", no_argument, &unlogged_tables, 1}, + {NULL, 0, NULL, 0} }; #ifdef HAVE_GETRLIMIT @@ -2065,10 +2068,10 @@ main(int argc, char **argv) case 0: /* This covers long options which take no argument. */ break; - case 2: /* tablespace */ + case 2: /* tablespace */ tablespace = optarg; break; - case 3: /* index-tablespace */ + case 3: /* index-tablespace */ index_tablespace = optarg; break; default: @@ -2571,7 +2574,7 @@ typedef struct fork_pthread static int pthread_create(pthread_t *thread, - pthread_attr_t * attr, + pthread_attr_t *attr, void *(*start_routine) (void *), void *arg) { @@ -2687,7 +2690,7 @@ win32_pthread_run(void *arg) static int pthread_create(pthread_t *thread, - pthread_attr_t * attr, + pthread_attr_t *attr, void *(*start_routine) (void *), void *arg) { diff --git a/contrib/pgcrypto/crypt-md5.c b/contrib/pgcrypto/crypt-md5.c index 6c7a2b329e..2a5cd70208 100644 --- a/contrib/pgcrypto/crypt-md5.c +++ b/contrib/pgcrypto/crypt-md5.c @@ -34,8 +34,8 @@ char * px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen) { static char *magic = "$1$"; /* This string is magic for this algorithm. - * Having it this way, we can get better - * later on */ + * Having it this way, we can get better later + * on */ static char *p; static const char *sp, *ep; diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h index 610b7fad78..80e8624460 100644 --- a/contrib/pgcrypto/px.h +++ b/contrib/pgcrypto/px.h @@ -204,8 +204,9 @@ const char *px_resolve_alias(const PX_Alias *aliases, const char *name); void px_set_debug_handler(void (*handler) (const char *)); #ifdef PX_DEBUG -void px_debug(const char *fmt, ...) - __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); +void +px_debug(const char *fmt,...) +__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); #else #define px_debug(...) #endif diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c index 9f2ec1f210..d4fc8a0fd6 100644 --- a/contrib/pgstattuple/pgstatindex.c +++ b/contrib/pgstattuple/pgstatindex.c @@ -95,7 +95,7 @@ pgstatindex(PG_FUNCTION_ARGS) BlockNumber nblocks; BlockNumber blkno; BTIndexStat indexStat; - BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD); + BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD); if (!superuser()) ereport(ERROR, @@ -160,7 +160,7 @@ pgstatindex(PG_FUNCTION_ARGS) CHECK_FOR_INTERRUPTS(); /* Read and lock buffer */ - buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy); + buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); page = BufferGetPage(buffer); diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index c9be8c92e4..2b62b78506 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -62,7 +62,7 @@ typedef struct pgstattuple_type } pgstattuple_type; typedef void (*pgstat_page) (pgstattuple_type *, Relation, BlockNumber, - BufferAccessStrategy); + BufferAccessStrategy); static Datum build_pgstattuple_type(pgstattuple_type *stat, FunctionCallInfo fcinfo); diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c index 0c395c42a3..5a4246752a 100644 --- a/contrib/sepgsql/database.c +++ b/contrib/sepgsql/database.c @@ -32,19 +32,19 @@ void sepgsql_database_post_create(Oid databaseId, const char *dtemplate) { Relation rel; - ScanKeyData skey; - SysScanDesc sscan; + ScanKeyData skey; + SysScanDesc sscan; HeapTuple tuple; char *tcontext; char *ncontext; char audit_name[NAMEDATALEN + 20]; - ObjectAddress object; - Form_pg_database datForm; + ObjectAddress object; + Form_pg_database datForm; /* - * Oid of the source database is not saved in pg_database catalog, - * so we collect its identifier using contextual information. - * If NULL, its default is "template1" according to createdb(). + * Oid of the source database is not saved in pg_database catalog, so we + * collect its identifier using contextual information. If NULL, its + * default is "template1" according to createdb(). */ if (!dtemplate) dtemplate = "template1"; @@ -56,6 +56,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate) tcontext = sepgsql_get_label(object.classId, object.objectId, object.objectSubId); + /* * check db_database:{getattr} permission */ @@ -67,11 +68,11 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate) true); /* - * Compute a default security label of the newly created database - * based on a pair of security label of client and source database. + * Compute a default security label of the newly created database based on + * a pair of security label of client and source database. * - * XXX - uncoming version of libselinux supports to take object - * name to handle special treatment on default security label. + * XXX - uncoming version of libselinux supports to take object name to + * handle special treatment on default security label. */ rel = heap_open(DatabaseRelationId, AccessShareLock); @@ -91,6 +92,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate) ncontext = sepgsql_compute_create(sepgsql_get_client_label(), tcontext, SEPG_CLASS_DB_DATABASE); + /* * check db_database:{create} permission */ @@ -126,8 +128,8 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate) void sepgsql_database_drop(Oid databaseId) { - ObjectAddress object; - char *audit_name; + ObjectAddress object; + char *audit_name; /* * check db_database:{drop} permission @@ -153,8 +155,8 @@ sepgsql_database_drop(Oid databaseId) void sepgsql_database_relabel(Oid databaseId, const char *seclabel) { - ObjectAddress object; - char *audit_name; + ObjectAddress object; + char *audit_name; object.classId = DatabaseRelationId; object.objectId = databaseId; @@ -170,6 +172,7 @@ sepgsql_database_relabel(Oid databaseId, const char *seclabel) SEPG_DB_DATABASE__RELABELFROM, audit_name, true); + /* * check db_database:{relabelto} permission */ diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c index 17aa41cf4e..47a1087417 100644 --- a/contrib/sepgsql/dml.c +++ b/contrib/sepgsql/dml.c @@ -150,7 +150,7 @@ check_relation_privileges(Oid relOid, uint32 required, bool abort) { - ObjectAddress object; + ObjectAddress object; char *audit_name; Bitmapset *columns; int index; diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c index ffa078677c..914519109c 100644 --- a/contrib/sepgsql/hooks.c +++ b/contrib/sepgsql/hooks.c @@ -52,9 +52,9 @@ typedef struct * command. Elsewhere (including the case of default) NULL. */ const char *createdb_dtemplate; -} sepgsql_context_info_t; +} sepgsql_context_info_t; -static sepgsql_context_info_t sepgsql_context_info; +static sepgsql_context_info_t sepgsql_context_info; /* * GUC: sepgsql.permissive = (on|off) @@ -101,7 +101,7 @@ sepgsql_object_access(ObjectAccessType access, { case DatabaseRelationId: sepgsql_database_post_create(objectId, - sepgsql_context_info.createdb_dtemplate); + sepgsql_context_info.createdb_dtemplate); break; case NamespaceRelationId: @@ -115,9 +115,8 @@ sepgsql_object_access(ObjectAccessType access, * All cases we want to apply permission checks on * creation of a new relation are invocation of the * heap_create_with_catalog via DefineRelation or - * OpenIntoRel. - * Elsewhere, we need neither assignment of security - * label nor permission checks. + * OpenIntoRel. Elsewhere, we need neither assignment + * of security label nor permission checks. */ switch (sepgsql_context_info.cmdtype) { @@ -150,12 +149,12 @@ sepgsql_object_access(ObjectAccessType access, case OAT_DROP: { - ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg; + ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg; /* - * No need to apply permission checks on object deletion - * due to internal cleanups; such as removal of temporary - * database object on session closed. + * No need to apply permission checks on object deletion due + * to internal cleanups; such as removal of temporary database + * object on session closed. */ if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0) break; @@ -219,13 +218,13 @@ sepgsql_exec_check_perms(List *rangeTabls, bool abort) /* * sepgsql_executor_start * - * It saves contextual information during ExecutorStart to distinguish + * It saves contextual information during ExecutorStart to distinguish * a case with/without permission checks later. */ static void sepgsql_executor_start(QueryDesc *queryDesc, int eflags) { - sepgsql_context_info_t saved_context_info = sepgsql_context_info; + sepgsql_context_info_t saved_context_info = sepgsql_context_info; PG_TRY(); { @@ -270,28 +269,29 @@ sepgsql_utility_command(Node *parsetree, DestReceiver *dest, char *completionTag) { - sepgsql_context_info_t saved_context_info = sepgsql_context_info; - ListCell *cell; + sepgsql_context_info_t saved_context_info = sepgsql_context_info; + ListCell *cell; PG_TRY(); { /* * Check command tag to avoid nefarious operations, and save the - * current contextual information to determine whether we should - * apply permission checks here, or not. + * current contextual information to determine whether we should apply + * permission checks here, or not. */ sepgsql_context_info.cmdtype = nodeTag(parsetree); switch (nodeTag(parsetree)) { case T_CreatedbStmt: + /* * We hope to reference name of the source database, but it * does not appear in system catalog. So, we save it here. */ - foreach (cell, ((CreatedbStmt *) parsetree)->options) + foreach(cell, ((CreatedbStmt *) parsetree)->options) { - DefElem *defel = (DefElem *) lfirst(cell); + DefElem *defel = (DefElem *) lfirst(cell); if (strcmp(defel->defname, "template") == 0) { @@ -303,6 +303,7 @@ sepgsql_utility_command(Node *parsetree, break; case T_LoadStmt: + /* * We reject LOAD command across the board on enforcing mode, * because a binary module can arbitrarily override hooks. @@ -315,6 +316,7 @@ sepgsql_utility_command(Node *parsetree, } break; default: + /* * Right now we don't check any other utility commands, * because it needs more detailed information to make access diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c index 85f4efe072..23577b5844 100644 --- a/contrib/sepgsql/label.c +++ b/contrib/sepgsql/label.c @@ -58,17 +58,18 @@ static fmgr_hook_type next_fmgr_hook = NULL; * we use the list client_label_pending of pending_label to keep track of which * labels were set during the (sub-)transactions. */ -static char *client_label_peer = NULL; /* set by getpeercon(3) */ -static List *client_label_pending = NIL; /* pending list being set by - * sepgsql_setcon() */ -static char *client_label_committed = NULL; /* set by sepgsql_setcon(), - * and already committed */ -static char *client_label_func = NULL; /* set by trusted procedure */ - -typedef struct { - SubTransactionId subid; - char *label; -} pending_label; +static char *client_label_peer = NULL; /* set by getpeercon(3) */ +static List *client_label_pending = NIL; /* pending list being set by + * sepgsql_setcon() */ +static char *client_label_committed = NULL; /* set by sepgsql_setcon(), + * and already committed */ +static char *client_label_func = NULL; /* set by trusted procedure */ + +typedef struct +{ + SubTransactionId subid; + char *label; +} pending_label; /* * sepgsql_get_client_label @@ -87,7 +88,7 @@ sepgsql_get_client_label(void) /* uncommitted sepgsql_setcon() value */ if (client_label_pending) { - pending_label *plabel = llast(client_label_pending); + pending_label *plabel = llast(client_label_pending); if (plabel->label) return plabel->label; @@ -104,16 +105,16 @@ sepgsql_get_client_label(void) * sepgsql_set_client_label * * This routine tries to switch the current security label of the client, and - * checks related permissions. The supplied new label shall be added to the + * checks related permissions. The supplied new label shall be added to the * client_label_pending list, then saved at transaction-commit time to ensure * transaction-awareness. */ static void sepgsql_set_client_label(const char *new_label) { - const char *tcontext; - MemoryContext oldcxt; - pending_label *plabel; + const char *tcontext; + MemoryContext oldcxt; + pending_label *plabel; /* Reset to the initial client label, if NULL */ if (!new_label) @@ -140,9 +141,10 @@ sepgsql_set_client_label(const char *new_label) SEPG_PROCESS__DYNTRANSITION, NULL, true); + /* - * Append the supplied new_label on the pending list until - * the current transaction is committed. + * Append the supplied new_label on the pending list until the current + * transaction is committed. */ oldcxt = MemoryContextSwitchTo(CurTransactionContext); @@ -158,7 +160,7 @@ sepgsql_set_client_label(const char *new_label) /* * sepgsql_xact_callback * - * A callback routine of transaction commit/abort/prepare. Commmit or abort + * A callback routine of transaction commit/abort/prepare. Commmit or abort * changes in the client_label_pending list. */ static void @@ -168,8 +170,8 @@ sepgsql_xact_callback(XactEvent event, void *arg) { if (client_label_pending != NIL) { - pending_label *plabel = llast(client_label_pending); - char *new_label; + pending_label *plabel = llast(client_label_pending); + char *new_label; if (plabel->label) new_label = MemoryContextStrdup(TopMemoryContext, @@ -181,10 +183,11 @@ sepgsql_xact_callback(XactEvent event, void *arg) pfree(client_label_committed); client_label_committed = new_label; + /* - * XXX - Note that items of client_label_pending are allocated - * on CurTransactionContext, thus, all acquired memory region - * shall be released implicitly. + * XXX - Note that items of client_label_pending are allocated on + * CurTransactionContext, thus, all acquired memory region shall + * be released implicitly. */ client_label_pending = NIL; } @@ -212,7 +215,8 @@ sepgsql_subxact_callback(SubXactEvent event, SubTransactionId mySubid, prev = NULL; for (cell = list_head(client_label_pending); cell; cell = next) { - pending_label *plabel = lfirst(cell); + pending_label *plabel = lfirst(cell); + next = lnext(cell); if (plabel->subid == mySubid) @@ -272,7 +276,7 @@ sepgsql_client_auth(Port *port, int status) static bool sepgsql_needs_fmgr_hook(Oid functionId) { - ObjectAddress object; + ObjectAddress object; if (next_needs_fmgr_hook && (*next_needs_fmgr_hook) (functionId)) @@ -340,8 +344,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event, /* * process:transition permission between old and new label, - * when user tries to switch security label of the client - * on execution of trusted procedure. + * when user tries to switch security label of the client on + * execution of trusted procedure. */ if (stack->new_label) sepgsql_avc_check_perms_label(stack->new_label, diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c index 1efbc906c6..b68314d878 100644 --- a/contrib/sepgsql/proc.c +++ b/contrib/sepgsql/proc.c @@ -42,9 +42,9 @@ sepgsql_proc_post_create(Oid functionId) char *tcontext; char *ncontext; int i; - StringInfoData audit_name; - ObjectAddress object; - Form_pg_proc proForm; + StringInfoData audit_name; + ObjectAddress object; + Form_pg_proc proForm; /* * Fetch namespace of the new procedure. Because pg_proc entry is not @@ -77,6 +77,7 @@ sepgsql_proc_post_create(Oid functionId) SEPG_DB_SCHEMA__ADD_NAME, getObjectDescription(&object), true); + /* * XXX - db_language:{implement} also should be checked here */ @@ -97,9 +98,10 @@ sepgsql_proc_post_create(Oid functionId) */ initStringInfo(&audit_name); appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname)); - for (i=0; i < proForm->pronargs; i++) + for (i = 0; i < proForm->pronargs; i++) { - Oid typeoid = proForm->proargtypes.values[i]; + Oid typeoid = proForm->proargtypes.values[i]; + if (i > 0) appendStringInfoChar(&audit_name, ','); appendStringInfoString(&audit_name, format_type_be(typeoid)); @@ -111,6 +113,7 @@ sepgsql_proc_post_create(Oid functionId) SEPG_DB_PROCEDURE__CREATE, audit_name.data, true); + /* * Assign the default security label on a new procedure */ @@ -138,8 +141,8 @@ sepgsql_proc_post_create(Oid functionId) void sepgsql_proc_drop(Oid functionId) { - ObjectAddress object; - char *audit_name; + ObjectAddress object; + char *audit_name; /* * check db_schema:{remove_name} permission @@ -156,19 +159,19 @@ sepgsql_proc_drop(Oid functionId) true); pfree(audit_name); - /* - * check db_procedure:{drop} permission - */ + /* + * check db_procedure:{drop} permission + */ object.classId = ProcedureRelationId; object.objectId = functionId; object.objectSubId = 0; audit_name = getObjectDescription(&object); - sepgsql_avc_check_perms(&object, - SEPG_CLASS_DB_PROCEDURE, - SEPG_DB_PROCEDURE__DROP, - audit_name, - true); + sepgsql_avc_check_perms(&object, + SEPG_CLASS_DB_PROCEDURE, + SEPG_DB_PROCEDURE__DROP, + audit_name, + true); pfree(audit_name); } @@ -181,8 +184,8 @@ sepgsql_proc_drop(Oid functionId) void sepgsql_proc_relabel(Oid functionId, const char *seclabel) { - ObjectAddress object; - char *audit_name; + ObjectAddress object; + char *audit_name; object.classId = ProcedureRelationId; object.objectId = functionId; @@ -198,6 +201,7 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel) SEPG_DB_PROCEDURE__RELABELFROM, audit_name, true); + /* * check db_procedure:{relabelto} permission */ diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c index 259be49268..e759a7d98e 100644 --- a/contrib/sepgsql/relation.c +++ b/contrib/sepgsql/relation.c @@ -44,9 +44,9 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum) char *scontext; char *tcontext; char *ncontext; - char audit_name[2*NAMEDATALEN + 20]; + char audit_name[2 * NAMEDATALEN + 20]; ObjectAddress object; - Form_pg_attribute attForm; + Form_pg_attribute attForm; /* * Only attributes within regular relation have individual security @@ -84,6 +84,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum) tcontext = sepgsql_get_label(RelationRelationId, relOid, 0); ncontext = sepgsql_compute_create(scontext, tcontext, SEPG_CLASS_DB_COLUMN); + /* * check db_column:{create} permission */ @@ -118,8 +119,8 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum) void sepgsql_attribute_drop(Oid relOid, AttrNumber attnum) { - ObjectAddress object; - char *audit_name; + ObjectAddress object; + char *audit_name; if (get_rel_relkind(relOid) != RELKIND_RELATION) return; @@ -151,7 +152,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum, const char *seclabel) { ObjectAddress object; - char *audit_name; + char *audit_name; if (get_rel_relkind(relOid) != RELKIND_RELATION) ereport(ERROR, @@ -172,6 +173,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum, SEPG_DB_COLUMN__RELABELFROM, audit_name, true); + /* * check db_column:{relabelto} permission */ @@ -203,7 +205,7 @@ sepgsql_relation_post_create(Oid relOid) char *tcontext; /* schema */ char *rcontext; /* relation */ char *ccontext; /* column */ - char audit_name[2*NAMEDATALEN + 20]; + char audit_name[2 * NAMEDATALEN + 20]; /* * Fetch catalog record of the new relation. Because pg_class entry is not @@ -254,6 +256,7 @@ sepgsql_relation_post_create(Oid relOid) SEPG_DB_SCHEMA__ADD_NAME, getObjectDescription(&object), true); + /* * Compute a default security label when we create a new relation object * under the specified namespace. @@ -273,6 +276,7 @@ sepgsql_relation_post_create(Oid relOid) SEPG_DB_DATABASE__CREATE, audit_name, true); + /* * Assign the default security label on the new relation */ @@ -288,10 +292,10 @@ sepgsql_relation_post_create(Oid relOid) if (classForm->relkind == RELKIND_RELATION) { Relation arel; - ScanKeyData akey; - SysScanDesc ascan; + ScanKeyData akey; + SysScanDesc ascan; HeapTuple atup; - Form_pg_attribute attForm; + Form_pg_attribute attForm; arel = heap_open(AttributeRelationId, AccessShareLock); @@ -315,6 +319,7 @@ sepgsql_relation_post_create(Oid relOid) ccontext = sepgsql_compute_create(scontext, rcontext, SEPG_CLASS_DB_COLUMN); + /* * check db_column:{create} permission */ @@ -348,10 +353,10 @@ out: void sepgsql_relation_drop(Oid relOid) { - ObjectAddress object; - char *audit_name; - uint16_t tclass = 0; - char relkind; + ObjectAddress object; + char *audit_name; + uint16_t tclass = 0; + char relkind; relkind = get_rel_relkind(relOid); if (relkind == RELKIND_RELATION) @@ -398,13 +403,13 @@ sepgsql_relation_drop(Oid relOid) */ if (relkind == RELKIND_RELATION) { - Form_pg_attribute attForm; + Form_pg_attribute attForm; CatCList *attrList; HeapTuple atttup; int i; attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid)); - for (i=0; i < attrList->n_members; i++) + for (i = 0; i < attrList->n_members; i++) { atttup = &attrList->members[i]->tuple; attForm = (Form_pg_attribute) GETSTRUCT(atttup); @@ -436,7 +441,7 @@ sepgsql_relation_drop(Oid relOid) void sepgsql_relation_relabel(Oid relOid, const char *seclabel) { - ObjectAddress object; + ObjectAddress object; char *audit_name; char relkind; uint16_t tclass = 0; @@ -468,6 +473,7 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel) SEPG_DB_TABLE__RELABELFROM, audit_name, true); + /* * check db_xxx:{relabelto} permission */ diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c index 31d60efe18..230449dc4b 100644 --- a/contrib/sepgsql/schema.c +++ b/contrib/sepgsql/schema.c @@ -35,22 +35,22 @@ void sepgsql_schema_post_create(Oid namespaceId) { Relation rel; - ScanKeyData skey; - SysScanDesc sscan; + ScanKeyData skey; + SysScanDesc sscan; HeapTuple tuple; char *tcontext; char *ncontext; char audit_name[NAMEDATALEN + 20]; - ObjectAddress object; - Form_pg_namespace nspForm; + ObjectAddress object; + Form_pg_namespace nspForm; /* * Compute a default security label when we create a new schema object * under the working database. * - * XXX - uncoming version of libselinux supports to take object - * name to handle special treatment on default security label; - * such as special label on "pg_temp" schema. + * XXX - uncoming version of libselinux supports to take object name to + * handle special treatment on default security label; such as special + * label on "pg_temp" schema. */ rel = heap_open(NamespaceRelationId, AccessShareLock); @@ -71,6 +71,7 @@ sepgsql_schema_post_create(Oid namespaceId) ncontext = sepgsql_compute_create(sepgsql_get_client_label(), tcontext, SEPG_CLASS_DB_SCHEMA); + /* * check db_schema:{create} */ @@ -104,8 +105,8 @@ sepgsql_schema_post_create(Oid namespaceId) void sepgsql_schema_drop(Oid namespaceId) { - ObjectAddress object; - char *audit_name; + ObjectAddress object; + char *audit_name; /* * check db_schema:{drop} permission @@ -116,7 +117,7 @@ sepgsql_schema_drop(Oid namespaceId) audit_name = getObjectDescription(&object); sepgsql_avc_check_perms(&object, - SEPG_CLASS_DB_SCHEMA, + SEPG_CLASS_DB_SCHEMA, SEPG_DB_SCHEMA__DROP, audit_name, true); @@ -132,8 +133,8 @@ sepgsql_schema_drop(Oid namespaceId) void sepgsql_schema_relabel(Oid namespaceId, const char *seclabel) { - ObjectAddress object; - char *audit_name; + ObjectAddress object; + char *audit_name; object.classId = NamespaceRelationId; object.objectId = namespaceId; @@ -149,6 +150,7 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel) SEPG_DB_SCHEMA__RELABELFROM, audit_name, true); + /* * check db_schema:{relabelto} permission */ diff --git a/contrib/sepgsql/sepgsql.h b/contrib/sepgsql/sepgsql.h index 708d4ee656..479b136909 100644 --- a/contrib/sepgsql/sepgsql.h +++ b/contrib/sepgsql/sepgsql.h @@ -248,20 +248,21 @@ extern bool sepgsql_check_perms(const char *scontext, uint32 required, const char *audit_name, bool abort); + /* * uavc.c */ #define SEPGSQL_AVC_NOAUDIT ((void *)(-1)) extern bool sepgsql_avc_check_perms_label(const char *tcontext, - uint16 tclass, - uint32 required, - const char *audit_name, - bool abort); + uint16 tclass, + uint32 required, + const char *audit_name, + bool abort); extern bool sepgsql_avc_check_perms(const ObjectAddress *tobject, - uint16 tclass, - uint32 required, - const char *audit_name, - bool abort); + uint16 tclass, + uint32 required, + const char *audit_name, + bool abort); extern char *sepgsql_avc_trusted_proc(Oid functionId); extern void sepgsql_avc_init(void); @@ -269,7 +270,7 @@ extern void sepgsql_avc_init(void); * label.c */ extern char *sepgsql_get_client_label(void); -extern void sepgsql_init_client_label(void); +extern void sepgsql_init_client_label(void); extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId); extern void sepgsql_object_relabel(const ObjectAddress *object, @@ -290,7 +291,7 @@ extern bool sepgsql_dml_privileges(List *rangeTabls, bool abort); * database.c */ extern void sepgsql_database_post_create(Oid databaseId, - const char *dtemplate); + const char *dtemplate); extern void sepgsql_database_drop(Oid databaseId); extern void sepgsql_database_relabel(Oid databaseId, const char *seclabel); diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c index 905f87dfc8..9641a17d79 100644 --- a/contrib/sepgsql/uavc.c +++ b/contrib/sepgsql/uavc.c @@ -30,22 +30,22 @@ */ typedef struct { - uint32 hash; /* hash value of this cache entry */ - char *scontext; /* security context of the subject */ - char *tcontext; /* security context of the target */ - uint16 tclass; /* object class of the target */ + uint32 hash; /* hash value of this cache entry */ + char *scontext; /* security context of the subject */ + char *tcontext; /* security context of the target */ + uint16 tclass; /* object class of the target */ - uint32 allowed; /* permissions to be allowed */ - uint32 auditallow; /* permissions to be audited on allowed */ - uint32 auditdeny; /* permissions to be audited on denied */ + uint32 allowed; /* permissions to be allowed */ + uint32 auditallow; /* permissions to be audited on allowed */ + uint32 auditdeny; /* permissions to be audited on denied */ - bool permissive; /* true, if permissive rule */ - bool hot_cache; /* true, if recently referenced */ + bool permissive; /* true, if permissive rule */ + bool hot_cache; /* true, if recently referenced */ bool tcontext_is_valid; - /* true, if tcontext is valid */ - char *ncontext; /* temporary scontext on execution of trusted - * procedure, or NULL elsewhere */ -} avc_cache; + /* true, if tcontext is valid */ + char *ncontext; /* temporary scontext on execution of trusted + * procedure, or NULL elsewhere */ +} avc_cache; /* * Declaration of static variables @@ -54,12 +54,12 @@ typedef struct #define AVC_NUM_RECLAIM 16 #define AVC_DEF_THRESHOLD 384 -static MemoryContext avc_mem_cxt; -static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */ -static int avc_num_caches; /* number of caches currently used */ -static int avc_lru_hint; /* index of the buckets to be reclaimed next */ -static int avc_threshold; /* threshold to launch cache-reclaiming */ -static char *avc_unlabeled; /* system 'unlabeled' label */ +static MemoryContext avc_mem_cxt; +static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */ +static int avc_num_caches; /* number of caches currently used */ +static int avc_lru_hint; /* index of the buckets to be reclaimed next */ +static int avc_threshold; /* threshold to launch cache-reclaiming */ +static char *avc_unlabeled; /* system 'unlabeled' label */ /* * Hash function @@ -67,8 +67,8 @@ static char *avc_unlabeled; /* system 'unlabeled' label */ static uint32 sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass) { - return hash_any((const unsigned char *)scontext, strlen(scontext)) - ^ hash_any((const unsigned char *)tcontext, strlen(tcontext)) + return hash_any((const unsigned char *) scontext, strlen(scontext)) + ^ hash_any((const unsigned char *) tcontext, strlen(tcontext)) ^ tclass; } @@ -88,7 +88,7 @@ sepgsql_avc_reset(void) /* * Reclaim caches recently unreferenced - */ + */ static void sepgsql_avc_reclaim(void) { @@ -142,15 +142,15 @@ sepgsql_avc_reclaim(void) * Access control decisions must be atomic, but multiple system calls may * be required to make a decision; thus, when referencing the access vector * cache, we must loop until we complete without an intervening cache flush - * event. In practice, looping even once should be very rare. Callers should + * event. In practice, looping even once should be very rare. Callers should * do something like this: * - * sepgsql_avc_check_valid(); - * do { - * : - * <reference to uavc> - * : - * } while (!sepgsql_avc_check_valid()) + * sepgsql_avc_check_valid(); + * do { + * : + * <reference to uavc> + * : + * } while (!sepgsql_avc_check_valid()) * * ------------------------------------------------------------------------- */ @@ -169,7 +169,7 @@ sepgsql_avc_check_valid(void) /* * sepgsql_avc_unlabeled * - * Returns an alternative label to be applied when no label or an invalid + * Returns an alternative label to be applied when no label or an invalid * label would otherwise be assigned. */ static char * @@ -177,12 +177,12 @@ sepgsql_avc_unlabeled(void) { if (!avc_unlabeled) { - security_context_t unlabeled; + security_context_t unlabeled; if (security_get_initial_context_raw("unlabeled", &unlabeled) < 0) ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("SELinux: failed to get initial security label: %m"))); + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("SELinux: failed to get initial security label: %m"))); PG_TRY(); { avc_unlabeled = MemoryContextStrdup(avc_mem_cxt, unlabeled); @@ -200,7 +200,7 @@ sepgsql_avc_unlabeled(void) } /* - * sepgsql_avc_compute + * sepgsql_avc_compute * * A fallback path, when cache mishit. It asks SELinux its access control * decision for the supplied pair of security context and object class. @@ -208,24 +208,24 @@ sepgsql_avc_unlabeled(void) static avc_cache * sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass) { - char *ucontext = NULL; - char *ncontext = NULL; - MemoryContext oldctx; - avc_cache *cache; - uint32 hash; - int index; - struct av_decision avd; + char *ucontext = NULL; + char *ncontext = NULL; + MemoryContext oldctx; + avc_cache *cache; + uint32 hash; + int index; + struct av_decision avd; hash = sepgsql_avc_hash(scontext, tcontext, tclass); index = hash % AVC_NUM_SLOTS; /* - * Validation check of the supplied security context. - * Because it always invoke system-call, frequent check should be avoided. - * Unless security policy is reloaded, validation status shall be kept, so - * we also cache whether the supplied security context was valid, or not. + * Validation check of the supplied security context. Because it always + * invoke system-call, frequent check should be avoided. Unless security + * policy is reloaded, validation status shall be kept, so we also cache + * whether the supplied security context was valid, or not. */ - if (security_check_context_raw((security_context_t)tcontext) != 0) + if (security_check_context_raw((security_context_t) tcontext) != 0) ucontext = sepgsql_avc_unlabeled(); /* @@ -237,15 +237,14 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass) sepgsql_compute_avd(scontext, ucontext, tclass, &avd); /* - * It also caches a security label to be switched when a client - * labeled as 'scontext' executes a procedure labeled as 'tcontext', - * not only access control decision on the procedure. - * The security label to be switched shall be computed uniquely on - * a pair of 'scontext' and 'tcontext', thus, it is reasonable to - * cache the new label on avc, and enables to reduce unnecessary - * system calls. - * It shall be referenced at sepgsql_needs_fmgr_hook to check whether - * the supplied function is a trusted procedure, or not. + * It also caches a security label to be switched when a client labeled as + * 'scontext' executes a procedure labeled as 'tcontext', not only access + * control decision on the procedure. The security label to be switched + * shall be computed uniquely on a pair of 'scontext' and 'tcontext', + * thus, it is reasonable to cache the new label on avc, and enables to + * reduce unnecessary system calls. It shall be referenced at + * sepgsql_needs_fmgr_hook to check whether the supplied function is a + * trusted procedure, or not. */ if (tclass == SEPG_CLASS_DB_PROCEDURE) { @@ -269,7 +268,7 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass) cache = palloc0(sizeof(avc_cache)); - cache->hash = hash; + cache->hash = hash; cache->scontext = pstrdup(scontext); cache->tcontext = pstrdup(tcontext); cache->tclass = tclass; @@ -314,7 +313,7 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass) hash = sepgsql_avc_hash(scontext, tcontext, tclass); index = hash % AVC_NUM_SLOTS; - foreach (cell, avc_slots[index]) + foreach(cell, avc_slots[index]) { cache = lfirst(cell); @@ -348,14 +347,15 @@ sepgsql_avc_check_perms_label(const char *tcontext, uint16 tclass, uint32 required, const char *audit_name, bool abort) { - char *scontext = sepgsql_get_client_label(); + char *scontext = sepgsql_get_client_label(); avc_cache *cache; uint32 denied; uint32 audited; bool result; sepgsql_avc_check_valid(); - do { + do + { result = true; /* @@ -377,16 +377,16 @@ sepgsql_avc_check_perms_label(const char *tcontext, audited = (denied ? (denied & ~0) : (required & ~0)); else audited = denied ? (denied & cache->auditdeny) - : (required & cache->auditallow); + : (required & cache->auditallow); if (denied) { /* * In permissive mode or permissive domain, violated permissions * shall be audited to the log files at once, and then implicitly - * allowed to avoid a flood of access denied logs, because - * the purpose of permissive mode/domain is to collect a violation - * log that will make it possible to fix up the security policy. + * allowed to avoid a flood of access denied logs, because the + * purpose of permissive mode/domain is to collect a violation log + * that will make it possible to fix up the security policy. */ if (!sepgsql_getenforce() || cache->permissive) cache->allowed |= required; @@ -397,10 +397,10 @@ sepgsql_avc_check_perms_label(const char *tcontext, /* * In the case when we have something auditable actions here, - * sepgsql_audit_log shall be called with text representation of - * security labels for both of subject and object. - * It records this access violation, so DBA will be able to find - * out unexpected security problems later. + * sepgsql_audit_log shall be called with text representation of security + * labels for both of subject and object. It records this access + * violation, so DBA will be able to find out unexpected security problems + * later. */ if (audited != 0 && audit_name != SEPGSQL_AVC_NOAUDIT && @@ -428,8 +428,8 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject, uint16 tclass, uint32 required, const char *audit_name, bool abort) { - char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG); - bool rc; + char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG); + bool rc; rc = sepgsql_avc_check_perms_label(tcontext, tclass, required, @@ -450,10 +450,10 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject, char * sepgsql_avc_trusted_proc(Oid functionId) { - char *scontext = sepgsql_get_client_label(); - char *tcontext; - ObjectAddress tobject; - avc_cache *cache; + char *scontext = sepgsql_get_client_label(); + char *tcontext; + ObjectAddress tobject; + avc_cache *cache; tobject.classId = ProcedureRelationId; tobject.objectId = functionId; @@ -461,7 +461,8 @@ sepgsql_avc_trusted_proc(Oid functionId) tcontext = GetSecurityLabel(&tobject, SEPGSQL_LABEL_TAG); sepgsql_avc_check_valid(); - do { + do + { if (tcontext) cache = sepgsql_avc_lookup(scontext, tcontext, SEPG_CLASS_DB_PROCEDURE); @@ -492,7 +493,7 @@ sepgsql_avc_exit(int code, Datum arg) void sepgsql_avc_init(void) { - int rc; + int rc; /* * All the avc stuff shall be allocated on avc_mem_cxt @@ -508,12 +509,11 @@ sepgsql_avc_init(void) avc_threshold = AVC_DEF_THRESHOLD; /* - * SELinux allows to mmap(2) its kernel status page in read-only mode - * to inform userspace applications its status updating (such as - * policy reloading) without system-call invocations. - * This feature is only supported in Linux-2.6.38 or later, however, - * libselinux provides a fallback mode to know its status using - * netlink sockets. + * SELinux allows to mmap(2) its kernel status page in read-only mode to + * inform userspace applications its status updating (such as policy + * reloading) without system-call invocations. This feature is only + * supported in Linux-2.6.38 or later, however, libselinux provides a + * fallback mode to know its status using netlink sockets. */ rc = selinux_status_open(1); if (rc < 0) diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c index 39a0160587..8dc565a190 100644 --- a/contrib/spi/refint.c +++ b/contrib/spi/refint.c @@ -536,8 +536,7 @@ check_foreign_key(PG_FUNCTION_ARGS) /* * Remember that SPI_prepare places plan in current memory context - * - so, we have to save plan in Top memory context for later - * use. + * - so, we have to save plan in Top memory context for later use. */ if (SPI_keepplan(pplan)) /* internal error */ diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c index 641a8c3425..958a496b24 100644 --- a/contrib/vacuumlo/vacuumlo.c +++ b/contrib/vacuumlo/vacuumlo.c @@ -69,7 +69,7 @@ vacuumlo(const char *database, const struct _param * param) int i; static char *password = NULL; bool new_pass; - bool success = true; + bool success = true; /* Note: password can be carried over from a previous call */ if (param->pg_prompt == TRI_YES && password == NULL) @@ -261,8 +261,8 @@ vacuumlo(const char *database, const struct _param * param) * We don't want to run each delete as an individual transaction, because * the commit overhead would be high. However, since 9.0 the backend will * acquire a lock per deleted LO, so deleting too many LOs per transaction - * risks running out of room in the shared-memory lock table. - * Accordingly, we delete up to transaction_limit LOs per transaction. + * risks running out of room in the shared-memory lock table. Accordingly, + * we delete up to transaction_limit LOs per transaction. */ res = PQexec(conn, "begin"); if (PQresultStatus(res) != PGRES_COMMAND_OK) @@ -459,8 +459,8 @@ main(int argc, char **argv) if (param.transaction_limit < 0) { fprintf(stderr, - "%s: transaction limit must not be negative (0 disables)\n", - progname); + "%s: transaction limit must not be negative (0 disables)\n", + progname); exit(1); } break; diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index 2ddee59fcb..660d25c349 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -702,126 +702,126 @@ xpath_table(PG_FUNCTION_ARGS) PG_TRY(); { - /* For each row i.e. document returned from SPI */ - for (i = 0; i < proc; i++) - { - char *pkey; - char *xmldoc; - xmlXPathContextPtr ctxt; - xmlXPathObjectPtr res; - xmlChar *resstr; - xmlXPathCompExprPtr comppath; - - /* Extract the row data as C Strings */ - spi_tuple = tuptable->vals[i]; - pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1); - xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2); - - /* - * Clear the values array, so that not-well-formed documents return - * NULL in all columns. Note that this also means that spare columns - * will be NULL. - */ - for (j = 0; j < ret_tupdesc->natts; j++) - values[j] = NULL; - - /* Insert primary key */ - values[0] = pkey; - - /* Parse the document */ - if (xmldoc) - doctree = xmlParseMemory(xmldoc, strlen(xmldoc)); - else /* treat NULL as not well-formed */ - doctree = NULL; - - if (doctree == NULL) + /* For each row i.e. document returned from SPI */ + for (i = 0; i < proc; i++) { - /* not well-formed, so output all-NULL tuple */ - ret_tuple = BuildTupleFromCStrings(attinmeta, values); - tuplestore_puttuple(tupstore, ret_tuple); - heap_freetuple(ret_tuple); - } - else - { - /* New loop here - we have to deal with nodeset results */ - rownr = 0; - - do + char *pkey; + char *xmldoc; + xmlXPathContextPtr ctxt; + xmlXPathObjectPtr res; + xmlChar *resstr; + xmlXPathCompExprPtr comppath; + + /* Extract the row data as C Strings */ + spi_tuple = tuptable->vals[i]; + pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1); + xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2); + + /* + * Clear the values array, so that not-well-formed documents + * return NULL in all columns. Note that this also means that + * spare columns will be NULL. + */ + for (j = 0; j < ret_tupdesc->natts; j++) + values[j] = NULL; + + /* Insert primary key */ + values[0] = pkey; + + /* Parse the document */ + if (xmldoc) + doctree = xmlParseMemory(xmldoc, strlen(xmldoc)); + else /* treat NULL as not well-formed */ + doctree = NULL; + + if (doctree == NULL) { - /* Now evaluate the set of xpaths. */ - had_values = false; - for (j = 0; j < numpaths; j++) + /* not well-formed, so output all-NULL tuple */ + ret_tuple = BuildTupleFromCStrings(attinmeta, values); + tuplestore_puttuple(tupstore, ret_tuple); + heap_freetuple(ret_tuple); + } + else + { + /* New loop here - we have to deal with nodeset results */ + rownr = 0; + + do { - ctxt = xmlXPathNewContext(doctree); - ctxt->node = xmlDocGetRootElement(doctree); + /* Now evaluate the set of xpaths. */ + had_values = false; + for (j = 0; j < numpaths; j++) + { + ctxt = xmlXPathNewContext(doctree); + ctxt->node = xmlDocGetRootElement(doctree); - /* compile the path */ - comppath = xmlXPathCompile(xpaths[j]); - if (comppath == NULL) - xml_ereport(xmlerrcxt, ERROR, - ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, - "XPath Syntax Error"); + /* compile the path */ + comppath = xmlXPathCompile(xpaths[j]); + if (comppath == NULL) + xml_ereport(xmlerrcxt, ERROR, + ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, + "XPath Syntax Error"); - /* Now evaluate the path expression. */ - res = xmlXPathCompiledEval(comppath, ctxt); - xmlXPathFreeCompExpr(comppath); + /* Now evaluate the path expression. */ + res = xmlXPathCompiledEval(comppath, ctxt); + xmlXPathFreeCompExpr(comppath); - if (res != NULL) - { - switch (res->type) + if (res != NULL) { - case XPATH_NODESET: - /* We see if this nodeset has enough nodes */ - if (res->nodesetval != NULL && - rownr < res->nodesetval->nodeNr) - { - resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]); - had_values = true; - } - else - resstr = NULL; - - break; - - case XPATH_STRING: - resstr = xmlStrdup(res->stringval); - break; - - default: - elog(NOTICE, "unsupported XQuery result: %d", res->type); - resstr = xmlStrdup((const xmlChar *) "<unsupported/>"); + switch (res->type) + { + case XPATH_NODESET: + /* We see if this nodeset has enough nodes */ + if (res->nodesetval != NULL && + rownr < res->nodesetval->nodeNr) + { + resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]); + had_values = true; + } + else + resstr = NULL; + + break; + + case XPATH_STRING: + resstr = xmlStrdup(res->stringval); + break; + + default: + elog(NOTICE, "unsupported XQuery result: %d", res->type); + resstr = xmlStrdup((const xmlChar *) "<unsupported/>"); + } + + /* + * Insert this into the appropriate column in the + * result tuple. + */ + values[j + 1] = (char *) resstr; } - - /* - * Insert this into the appropriate column in the - * result tuple. - */ - values[j + 1] = (char *) resstr; + xmlXPathFreeContext(ctxt); } - xmlXPathFreeContext(ctxt); - } - /* Now add the tuple to the output, if there is one. */ - if (had_values) - { - ret_tuple = BuildTupleFromCStrings(attinmeta, values); - tuplestore_puttuple(tupstore, ret_tuple); - heap_freetuple(ret_tuple); - } + /* Now add the tuple to the output, if there is one. */ + if (had_values) + { + ret_tuple = BuildTupleFromCStrings(attinmeta, values); + tuplestore_puttuple(tupstore, ret_tuple); + heap_freetuple(ret_tuple); + } - rownr++; - } while (had_values); - } + rownr++; + } while (had_values); + } - if (doctree != NULL) - xmlFreeDoc(doctree); - doctree = NULL; + if (doctree != NULL) + xmlFreeDoc(doctree); + doctree = NULL; - if (pkey) - pfree(pkey); - if (xmldoc) - pfree(xmldoc); - } + if (pkey) + pfree(pkey); + if (xmldoc) + pfree(xmldoc); + } } PG_CATCH(); { diff --git a/contrib/xml2/xslt_proc.c b/contrib/xml2/xslt_proc.c index ba1171a041..a93931d261 100644 --- a/contrib/xml2/xslt_proc.c +++ b/contrib/xml2/xslt_proc.c @@ -85,40 +85,40 @@ xslt_process(PG_FUNCTION_ARGS) { /* Check to see if document is a file or a literal */ - if (VARDATA(doct)[0] == '<') - doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ); - else - doctree = xmlParseFile(text_to_cstring(doct)); - - if (doctree == NULL) - xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, - "error parsing XML document"); + if (VARDATA(doct)[0] == '<') + doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ); + else + doctree = xmlParseFile(text_to_cstring(doct)); - /* Same for stylesheet */ - if (VARDATA(ssheet)[0] == '<') - { - ssdoc = xmlParseMemory((char *) VARDATA(ssheet), - VARSIZE(ssheet) - VARHDRSZ); - if (ssdoc == NULL) + if (doctree == NULL) xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, - "error parsing stylesheet as XML document"); + "error parsing XML document"); - stylesheet = xsltParseStylesheetDoc(ssdoc); - } - else - stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet)); + /* Same for stylesheet */ + if (VARDATA(ssheet)[0] == '<') + { + ssdoc = xmlParseMemory((char *) VARDATA(ssheet), + VARSIZE(ssheet) - VARHDRSZ); + if (ssdoc == NULL) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, + "error parsing stylesheet as XML document"); + + stylesheet = xsltParseStylesheetDoc(ssdoc); + } + else + stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet)); - if (stylesheet == NULL) - xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, - "failed to parse stylesheet"); + if (stylesheet == NULL) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, + "failed to parse stylesheet"); - restree = xsltApplyStylesheet(stylesheet, doctree, params); + restree = xsltApplyStylesheet(stylesheet, doctree, params); - if (restree == NULL) - xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, - "failed to apply stylesheet"); + if (restree == NULL) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION, + "failed to apply stylesheet"); - resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet); + resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet); } PG_CATCH(); { diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 1efaaee1a8..783590ea55 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -27,7 +27,7 @@ /* non-export function prototypes */ static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate); static bool gistinserttuple(GISTInsertState *state, GISTInsertStack *stack, - GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum); + GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum); static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, IndexTuple *tuples, int ntup, OffsetNumber oldoffnum, @@ -781,8 +781,8 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum) { /* * Page was split while we looked elsewhere. We didn't see the - * downlink to the right page when we scanned the parent, so - * add it to the queue now. + * downlink to the right page when we scanned the parent, so add + * it to the queue now. * * Put the right page ahead of the queue, so that we visit it * next. That's important, because if this is the lowest internal @@ -829,7 +829,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum) elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u", RelationGetRelationName(r), child); - return NULL; /* keep compiler quiet */ + return NULL; /* keep compiler quiet */ } /* @@ -1046,7 +1046,7 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate) */ static bool gistinserttuple(GISTInsertState *state, GISTInsertStack *stack, - GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum) + GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum) { return gistinserttuples(state, stack, giststate, &tuple, 1, oldoffnum, InvalidBuffer, InvalidBuffer, false, false); @@ -1308,7 +1308,7 @@ initGISTstate(Relation index) giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE)); giststate->scanCxt = scanCxt; - giststate->tempCxt = scanCxt; /* caller must change this if needed */ + giststate->tempCxt = scanCxt; /* caller must change this if needed */ giststate->tupdesc = index->rd_att; for (i = 0; i < index->rd_att->natts; i++) diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 712e59ac90..8caf485676 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -48,7 +48,7 @@ typedef enum * before switching to the buffering build * mode */ GIST_BUFFERING_ACTIVE /* in buffering build mode */ -} GistBufferingMode; +} GistBufferingMode; /* Working state for gistbuild and its callback */ typedef struct @@ -263,7 +263,7 @@ gistValidateBufferingOption(char *value) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid value for \"buffering\" option"), - errdetail("Valid values are \"on\", \"off\", and \"auto\"."))); + errdetail("Valid values are \"on\", \"off\", and \"auto\"."))); } } @@ -567,7 +567,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup, BlockNumber childblkno; Buffer buffer; bool result = false; - BlockNumber blkno; + BlockNumber blkno; int level; OffsetNumber downlinkoffnum = InvalidOffsetNumber; BlockNumber parentblkno = InvalidBlockNumber; @@ -623,7 +623,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup, { gistbufferinginserttuples(buildstate, buffer, level, &newtup, 1, childoffnum, - InvalidBlockNumber, InvalidOffsetNumber); + InvalidBlockNumber, InvalidOffsetNumber); /* gistbufferinginserttuples() released the buffer */ } else @@ -716,26 +716,26 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level, /* * All the downlinks on the old root page are now on one of the child - * pages. Visit all the new child pages to memorize the parents of - * the grandchildren. + * pages. Visit all the new child pages to memorize the parents of the + * grandchildren. */ if (gfbb->rootlevel > 1) { maxoff = PageGetMaxOffsetNumber(page); for (off = FirstOffsetNumber; off <= maxoff; off++) { - ItemId iid = PageGetItemId(page, off); - IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); + ItemId iid = PageGetItemId(page, off); + IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid)); - Buffer childbuf = ReadBuffer(buildstate->indexrel, childblkno); + Buffer childbuf = ReadBuffer(buildstate->indexrel, childblkno); LockBuffer(childbuf, GIST_SHARE); gistMemorizeAllDownlinks(buildstate, childbuf); UnlockReleaseBuffer(childbuf); /* - * Also remember that the parent of the new child page is - * the root block. + * Also remember that the parent of the new child page is the + * root block. */ gistMemorizeParent(buildstate, childblkno, GIST_ROOT_BLKNO); } @@ -789,8 +789,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level, * Remember the parent of each new child page in our parent map. * This assumes that the downlinks fit on the parent page. If the * parent page is split, too, when we recurse up to insert the - * downlinks, the recursive gistbufferinginserttuples() call - * will update the map again. + * downlinks, the recursive gistbufferinginserttuples() call will + * update the map again. */ if (level > 0) gistMemorizeParent(buildstate, @@ -879,8 +879,9 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate, if (parent == *parentblkno && *parentblkno != InvalidBlockNumber && *downlinkoffnum != InvalidOffsetNumber && *downlinkoffnum <= maxoff) { - ItemId iid = PageGetItemId(page, *downlinkoffnum); - IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); + ItemId iid = PageGetItemId(page, *downlinkoffnum); + IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); + if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno) { /* Still there */ @@ -889,16 +890,17 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate, } /* - * Downlink was not at the offset where it used to be. Scan the page - * to find it. During normal gist insertions, it might've moved to another - * page, to the right, but during a buffering build, we keep track of - * the parent of each page in the lookup table so we should always know - * what page it's on. + * Downlink was not at the offset where it used to be. Scan the page to + * find it. During normal gist insertions, it might've moved to another + * page, to the right, but during a buffering build, we keep track of the + * parent of each page in the lookup table so we should always know what + * page it's on. */ for (off = FirstOffsetNumber; off <= maxoff; off = OffsetNumberNext(off)) { - ItemId iid = PageGetItemId(page, off); - IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); + ItemId iid = PageGetItemId(page, off); + IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); + if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno) { /* yes!!, found it */ @@ -908,7 +910,7 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate, } elog(ERROR, "failed to re-find parent for block %u", childblkno); - return InvalidBuffer; /* keep compiler quiet */ + return InvalidBuffer; /* keep compiler quiet */ } /* @@ -1129,7 +1131,7 @@ gistGetMaxLevel(Relation index) typedef struct { - BlockNumber childblkno; /* hash key */ + BlockNumber childblkno; /* hash key */ BlockNumber parentblkno; } ParentMapEntry; @@ -1156,9 +1158,9 @@ gistMemorizeParent(GISTBuildState *buildstate, BlockNumber child, BlockNumber pa bool found; entry = (ParentMapEntry *) hash_search(buildstate->parentMap, - (const void *) &child, - HASH_ENTER, - &found); + (const void *) &child, + HASH_ENTER, + &found); entry->parentblkno = parent; } @@ -1171,16 +1173,17 @@ gistMemorizeAllDownlinks(GISTBuildState *buildstate, Buffer parentbuf) OffsetNumber maxoff; OffsetNumber off; BlockNumber parentblkno = BufferGetBlockNumber(parentbuf); - Page page = BufferGetPage(parentbuf); + Page page = BufferGetPage(parentbuf); Assert(!GistPageIsLeaf(page)); maxoff = PageGetMaxOffsetNumber(page); for (off = FirstOffsetNumber; off <= maxoff; off++) { - ItemId iid = PageGetItemId(page, off); - IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); + ItemId iid = PageGetItemId(page, off); + IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid)); + gistMemorizeParent(buildstate, childblkno, parentblkno); } } @@ -1193,9 +1196,9 @@ gistGetParent(GISTBuildState *buildstate, BlockNumber child) /* Find node buffer in hash table */ entry = (ParentMapEntry *) hash_search(buildstate->parentMap, - (const void *) &child, - HASH_FIND, - &found); + (const void *) &child, + HASH_FIND, + &found); if (!found) elog(ERROR, "could not find parent of block %d in lookup table", child); diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 3feca263a7..39aec856f9 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -528,7 +528,7 @@ typedef struct bool isnull[INDEX_MAX_KEYS]; GISTPageSplitInfo *splitinfo; GISTNodeBuffer *nodeBuffer; -} RelocationBufferInfo; +} RelocationBufferInfo; /* * At page split, distribute tuples from the buffer of the split page to diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index d97c64ede3..09e911d098 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -244,7 +244,7 @@ typedef struct int index; /* Delta between penalties of entry insertion into different groups */ double delta; -} CommonEntry; +} CommonEntry; /* * Context for g_box_consider_split. Contains information about currently @@ -267,7 +267,7 @@ typedef struct int dim; /* axis of this split */ double range; /* width of general MBR projection to the * selected axis */ -} ConsiderSplitContext; +} ConsiderSplitContext; /* * Interval represents projection of box to axis. @@ -276,7 +276,7 @@ typedef struct { double lower, upper; -} SplitInterval; +} SplitInterval; /* * Interval comparison function by lower bound of the interval; diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index bf139de824..c9fc9ba97f 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -124,7 +124,7 @@ gistbeginscan(PG_FUNCTION_ARGS) so->giststate = giststate; giststate->tempCxt = createTempGistContext(); so->queue = NULL; - so->queueCxt = giststate->scanCxt; /* see gistrescan */ + so->queueCxt = giststate->scanCxt; /* see gistrescan */ /* workspaces with size dependent on numberOfOrderBys: */ so->tmpTreeItem = palloc(GSTIHDRSZ + sizeof(double) * scan->numberOfOrderBys); diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c index 2ec69a60d4..739fc597ce 100644 --- a/src/backend/access/gist/gistsplit.c +++ b/src/backend/access/gist/gistsplit.c @@ -581,8 +581,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *gist if (v->spl_equiv == NULL) { /* - * simple case: left and right keys for attno column are - * equal + * simple case: left and right keys for attno column are equal */ gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1); } diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 96dabdb48a..bbea5e4eac 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -391,7 +391,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, uint32 ovflbitno; int32 bitmappage, bitmapbit; - Bucket bucket PG_USED_FOR_ASSERTS_ONLY; + Bucket bucket PG_USED_FOR_ASSERTS_ONLY; /* Get information from the doomed page */ _hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 2d81383ae8..9519e73e54 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -223,9 +223,9 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) } /* - * Be sure to check for interrupts at least once per page. Checks at - * higher code levels won't be able to stop a seqscan that encounters - * many pages' worth of consecutive dead tuples. + * Be sure to check for interrupts at least once per page. Checks at + * higher code levels won't be able to stop a seqscan that encounters many + * pages' worth of consecutive dead tuples. */ CHECK_FOR_INTERRUPTS(); @@ -997,8 +997,8 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode) * * Same as relation_openrv, but with an additional missing_ok argument * allowing a NULL return rather than an error if the relation is not - * found. (Note that some other causes, such as permissions problems, - * will still result in an ereport.) + * found. (Note that some other causes, such as permissions problems, + * will still result in an ereport.) * ---------------- */ Relation @@ -1105,7 +1105,7 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode) * by a RangeVar node * * As above, but optionally return NULL instead of failing for - * relation-not-found. + * relation-not-found. * ---------------- */ Relation @@ -1588,10 +1588,10 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, /* * When first_call is true (and thus, skip is initially false) we'll - * return the first tuple we find. But on later passes, heapTuple + * return the first tuple we find. But on later passes, heapTuple * will initially be pointing to the tuple we returned last time. - * Returning it again would be incorrect (and would loop forever), - * so we skip it and return the next match we find. + * Returning it again would be incorrect (and would loop forever), so + * we skip it and return the next match we find. */ if (!skip) { @@ -1651,7 +1651,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot, { bool result; Buffer buffer; - HeapTupleData heapTuple; + HeapTupleData heapTuple; buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); LockBuffer(buffer, BUFFER_LOCK_SHARE); @@ -1885,14 +1885,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, heaptup = heap_prepare_insert(relation, tup, xid, cid, options); /* - * We're about to do the actual insert -- but check for conflict first, - * to avoid possibly having to roll back work we've just done. + * We're about to do the actual insert -- but check for conflict first, to + * avoid possibly having to roll back work we've just done. * - * For a heap insert, we only need to check for table-level SSI locks. - * Our new tuple can't possibly conflict with existing tuple locks, and - * heap page locks are only consolidated versions of tuple locks; they do - * not lock "gaps" as index page locks do. So we don't need to identify - * a buffer before making the call. + * For a heap insert, we only need to check for table-level SSI locks. Our + * new tuple can't possibly conflict with existing tuple locks, and heap + * page locks are only consolidated versions of tuple locks; they do not + * lock "gaps" as index page locks do. So we don't need to identify a + * buffer before making the call. */ CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); @@ -2123,11 +2123,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, * We're about to do the actual inserts -- but check for conflict first, * to avoid possibly having to roll back work we've just done. * - * For a heap insert, we only need to check for table-level SSI locks. - * Our new tuple can't possibly conflict with existing tuple locks, and - * heap page locks are only consolidated versions of tuple locks; they do - * not lock "gaps" as index page locks do. So we don't need to identify - * a buffer before making the call. + * For a heap insert, we only need to check for table-level SSI locks. Our + * new tuple can't possibly conflict with existing tuple locks, and heap + * page locks are only consolidated versions of tuple locks; they do not + * lock "gaps" as index page locks do. So we don't need to identify a + * buffer before making the call. */ CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); @@ -2137,12 +2137,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, Buffer buffer; Buffer vmbuffer = InvalidBuffer; bool all_visible_cleared = false; - int nthispage; + int nthispage; /* - * Find buffer where at least the next tuple will fit. If the page - * is all-visible, this will also pin the requisite visibility map - * page. + * Find buffer where at least the next tuple will fit. If the page is + * all-visible, this will also pin the requisite visibility map page. */ buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len, InvalidBuffer, options, bistate, @@ -2358,7 +2357,7 @@ heap_delete(Relation relation, ItemPointer tid, ItemId lp; HeapTupleData tp; Page page; - BlockNumber block; + BlockNumber block; Buffer buffer; Buffer vmbuffer = InvalidBuffer; bool have_tuple_lock = false; @@ -2372,10 +2371,10 @@ heap_delete(Relation relation, ItemPointer tid, page = BufferGetPage(buffer); /* - * Before locking the buffer, pin the visibility map page if it appears - * to be necessary. Since we haven't got the lock yet, someone else might - * be in the middle of changing this, so we'll need to recheck after - * we have the lock. + * Before locking the buffer, pin the visibility map page if it appears to + * be necessary. Since we haven't got the lock yet, someone else might be + * in the middle of changing this, so we'll need to recheck after we have + * the lock. */ if (PageIsAllVisible(page)) visibilitymap_pin(relation, block, &vmbuffer); @@ -2717,7 +2716,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, HeapTupleData oldtup; HeapTuple heaptup; Page page; - BlockNumber block; + BlockNumber block; Buffer buffer, newbuf, vmbuffer = InvalidBuffer, @@ -2753,10 +2752,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, page = BufferGetPage(buffer); /* - * Before locking the buffer, pin the visibility map page if it appears - * to be necessary. Since we haven't got the lock yet, someone else might - * be in the middle of changing this, so we'll need to recheck after - * we have the lock. + * Before locking the buffer, pin the visibility map page if it appears to + * be necessary. Since we haven't got the lock yet, someone else might be + * in the middle of changing this, so we'll need to recheck after we have + * the lock. */ if (PageIsAllVisible(page)) visibilitymap_pin(relation, block, &vmbuffer); @@ -2900,11 +2899,11 @@ l2: /* * If we didn't pin the visibility map page and the page has become all - * visible while we were busy locking the buffer, or during some subsequent - * window during which we had it unlocked, we'll have to unlock and - * re-lock, to avoid holding the buffer lock across an I/O. That's a bit - * unfortunate, esepecially since we'll now have to recheck whether the - * tuple has been locked or updated under us, but hopefully it won't + * visible while we were busy locking the buffer, or during some + * subsequent window during which we had it unlocked, we'll have to unlock + * and re-lock, to avoid holding the buffer lock across an I/O. That's a + * bit unfortunate, esepecially since we'll now have to recheck whether + * the tuple has been locked or updated under us, but hopefully it won't * happen very often. */ if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) @@ -3196,11 +3195,11 @@ l2: /* * Mark old tuple for invalidation from system caches at next command - * boundary, and mark the new tuple for invalidation in case we abort. - * We have to do this before releasing the buffer because oldtup is in - * the buffer. (heaptup is all in local memory, but it's necessary to - * process both tuple versions in one call to inval.c so we can avoid - * redundant sinval messages.) + * boundary, and mark the new tuple for invalidation in case we abort. We + * have to do this before releasing the buffer because oldtup is in the + * buffer. (heaptup is all in local memory, but it's necessary to process + * both tuple versions in one call to inval.c so we can avoid redundant + * sinval messages.) */ CacheInvalidateHeapTuple(relation, &oldtup, heaptup); @@ -4069,7 +4068,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid) */ bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, - Buffer buf) + Buffer buf) { TransactionId xid; @@ -4368,9 +4367,9 @@ log_heap_freeze(Relation reln, Buffer buffer, } /* - * Perform XLogInsert for a heap-visible operation. 'block' is the block + * Perform XLogInsert for a heap-visible operation. 'block' is the block * being marked all-visible, and vm_buffer is the buffer containing the - * corresponding visibility map block. Both should have already been modified + * corresponding visibility map block. Both should have already been modified * and dirtied. */ XLogRecPtr @@ -4705,7 +4704,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record) Page page; /* - * Read the heap page, if it still exists. If the heap file has been + * Read the heap page, if it still exists. If the heap file has been * dropped or truncated later in recovery, this might fail. In that case, * there's no point in doing anything further, since the visibility map * will have to be cleared out at the same time. @@ -4731,17 +4730,16 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record) LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* - * We don't bump the LSN of the heap page when setting the visibility - * map bit, because that would generate an unworkable volume of - * full-page writes. This exposes us to torn page hazards, but since - * we're not inspecting the existing page contents in any way, we - * don't care. + * We don't bump the LSN of the heap page when setting the visibility map + * bit, because that would generate an unworkable volume of full-page + * writes. This exposes us to torn page hazards, but since we're not + * inspecting the existing page contents in any way, we don't care. * - * However, all operations that clear the visibility map bit *do* bump - * the LSN, and those operations will only be replayed if the XLOG LSN - * follows the page LSN. Thus, if the page LSN has advanced past our - * XLOG record's LSN, we mustn't mark the page all-visible, because - * the subsequent update won't be replayed to clear the flag. + * However, all operations that clear the visibility map bit *do* bump the + * LSN, and those operations will only be replayed if the XLOG LSN follows + * the page LSN. Thus, if the page LSN has advanced past our XLOG + * record's LSN, we mustn't mark the page all-visible, because the + * subsequent update won't be replayed to clear the flag. */ if (!XLByteLE(lsn, PageGetLSN(page))) { @@ -4772,10 +4770,10 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record) * Don't set the bit if replay has already passed this point. * * It might be safe to do this unconditionally; if replay has past - * this point, we'll replay at least as far this time as we did before, - * and if this bit needs to be cleared, the record responsible for - * doing so should be again replayed, and clear it. For right now, - * out of an abundance of conservatism, we use the same test here + * this point, we'll replay at least as far this time as we did + * before, and if this bit needs to be cleared, the record responsible + * for doing so should be again replayed, and clear it. For right + * now, out of an abundance of conservatism, we use the same test here * we did for the heap page; if this results in a dropped bit, no real * harm is done; and the next VACUUM will fix it. */ @@ -5183,7 +5181,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update) if (xlrec->all_visible_cleared) { Relation reln = CreateFakeRelcacheEntry(xlrec->target.node); - BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid); + BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid); Buffer vmbuffer = InvalidBuffer; visibilitymap_pin(reln, block, &vmbuffer); @@ -5267,7 +5265,7 @@ newt:; if (xlrec->new_all_visible_cleared) { Relation reln = CreateFakeRelcacheEntry(xlrec->target.node); - BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid); + BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid); Buffer vmbuffer = InvalidBuffer; visibilitymap_pin(reln, block, &vmbuffer); @@ -5690,7 +5688,7 @@ heap2_desc(StringInfo buf, uint8 xl_info, char *rec) else appendStringInfo(buf, "multi-insert: "); appendStringInfo(buf, "rel %u/%u/%u; blk %u; %d tuples", - xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode, + xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode, xlrec->blkno, xlrec->ntuples); } else diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 30ef1bf7e0..19a34923c7 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -109,8 +109,8 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, BlockNumber block1, BlockNumber block2, Buffer *vmbuffer1, Buffer *vmbuffer2) { - bool need_to_pin_buffer1; - bool need_to_pin_buffer2; + bool need_to_pin_buffer1; + bool need_to_pin_buffer2; Assert(BufferIsValid(buffer1)); Assert(buffer2 == InvalidBuffer || buffer1 <= buffer2); @@ -145,7 +145,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, /* * If there are two buffers involved and we pinned just one of them, * it's possible that the second one became all-visible while we were - * busy pinning the first one. If it looks like that's a possible + * busy pinning the first one. If it looks like that's a possible * scenario, we'll need to make a second pass through this loop. */ if (buffer2 == InvalidBuffer || buffer1 == buffer2 @@ -302,11 +302,11 @@ RelationGetBufferForTuple(Relation relation, Size len, * block if one was given, taking suitable care with lock ordering and * the possibility they are the same block. * - * If the page-level all-visible flag is set, caller will need to clear - * both that and the corresponding visibility map bit. However, by the - * time we return, we'll have x-locked the buffer, and we don't want to - * do any I/O while in that state. So we check the bit here before - * taking the lock, and pin the page if it appears necessary. + * If the page-level all-visible flag is set, caller will need to + * clear both that and the corresponding visibility map bit. However, + * by the time we return, we'll have x-locked the buffer, and we don't + * want to do any I/O while in that state. So we check the bit here + * before taking the lock, and pin the page if it appears necessary. * Checking without the lock creates a risk of getting the wrong * answer, so we'll have to recheck after acquiring the lock. */ @@ -347,23 +347,24 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * We now have the target page (and the other buffer, if any) pinned - * and locked. However, since our initial PageIsAllVisible checks - * were performed before acquiring the lock, the results might now - * be out of date, either for the selected victim buffer, or for the - * other buffer passed by the caller. In that case, we'll need to give - * up our locks, go get the pin(s) we failed to get earlier, and + * and locked. However, since our initial PageIsAllVisible checks + * were performed before acquiring the lock, the results might now be + * out of date, either for the selected victim buffer, or for the + * other buffer passed by the caller. In that case, we'll need to + * give up our locks, go get the pin(s) we failed to get earlier, and * re-lock. That's pretty painful, but hopefully shouldn't happen * often. * - * Note that there's a small possibility that we didn't pin the - * page above but still have the correct page pinned anyway, either - * because we've already made a previous pass through this loop, or - * because caller passed us the right page anyway. + * Note that there's a small possibility that we didn't pin the page + * above but still have the correct page pinned anyway, either because + * we've already made a previous pass through this loop, or because + * caller passed us the right page anyway. * * Note also that it's possible that by the time we get the pin and * retake the buffer locks, the visibility map bit will have been - * cleared by some other backend anyway. In that case, we'll have done - * a bit of extra work for no gain, but there's no real harm done. + * cleared by some other backend anyway. In that case, we'll have + * done a bit of extra work for no gain, but there's no real harm + * done. */ if (otherBuffer == InvalidBuffer || buffer <= otherBuffer) GetVisibilityMapPins(relation, buffer, otherBuffer, diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index 28b5a20ae7..050f048a9b 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -75,7 +75,7 @@ do { \ static void toast_delete_datum(Relation rel, Datum value); static Datum toast_save_datum(Relation rel, Datum value, - struct varlena *oldexternal, int options); + struct varlena * oldexternal, int options); static bool toastrel_valueid_exists(Relation toastrel, Oid valueid); static bool toastid_valueid_exists(Oid toastrelid, Oid valueid); static struct varlena *toast_fetch_datum(struct varlena * attr); @@ -1233,7 +1233,7 @@ toast_compress_datum(Datum value) */ static Datum toast_save_datum(Relation rel, Datum value, - struct varlena *oldexternal, int options) + struct varlena * oldexternal, int options) { Relation toastrel; Relation toastidx; @@ -1353,7 +1353,7 @@ toast_save_datum(Relation rel, Datum value, * those versions could easily reference the same toast value. * When we copy the second or later version of such a row, * reusing the OID will mean we select an OID that's already - * in the new toast table. Check for that, and if so, just + * in the new toast table. Check for that, and if so, just * fall through without writing the data again. * * While annoying and ugly-looking, this is a good thing diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 9152c7d151..eb5625906f 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -16,7 +16,7 @@ * visibilitymap_pin_ok - check whether correct map page is already pinned * visibilitymap_set - set a bit in a previously pinned page * visibilitymap_test - test if a bit is set - * visibilitymap_count - count number of bits set in visibility map + * visibilitymap_count - count number of bits set in visibility map * visibilitymap_truncate - truncate the visibility map * * NOTES @@ -27,7 +27,7 @@ * the sense that we make sure that whenever a bit is set, we know the * condition is true, but if a bit is not set, it might or might not be true. * - * Clearing a visibility map bit is not separately WAL-logged. The callers + * Clearing a visibility map bit is not separately WAL-logged. The callers * must make sure that whenever a bit is cleared, the bit is cleared on WAL * replay of the updating operation as well. * @@ -36,9 +36,9 @@ * it may still be the case that every tuple on the page is visible to all * transactions; we just don't know that for certain. The difficulty is that * there are two bits which are typically set together: the PD_ALL_VISIBLE bit - * on the page itself, and the visibility map bit. If a crash occurs after the + * on the page itself, and the visibility map bit. If a crash occurs after the * visibility map page makes it to disk and before the updated heap page makes - * it to disk, redo must set the bit on the heap page. Otherwise, the next + * it to disk, redo must set the bit on the heap page. Otherwise, the next * insert, update, or delete on the heap page will fail to realize that the * visibility map bit must be cleared, possibly causing index-only scans to * return wrong answers. @@ -59,10 +59,10 @@ * the buffer lock over any I/O that may be required to read in the visibility * map page. To avoid this, we examine the heap page before locking it; * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map - * bit. Then, we lock the buffer. But this creates a race condition: there + * bit. Then, we lock the buffer. But this creates a race condition: there * is a possibility that in the time it takes to lock the buffer, the * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the - * buffer, pin the visibility map page, and relock the buffer. This shouldn't + * buffer, pin the visibility map page, and relock the buffer. This shouldn't * happen often, because only VACUUM currently sets visibility map bits, * and the race will only occur if VACUUM processes a given page at almost * exactly the same time that someone tries to further modify it. @@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf) * visibilitymap_set - set a bit on a previously pinned page * * recptr is the LSN of the XLOG record we're replaying, if we're in recovery, - * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the + * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the * one provided; in normal running, we generate a new XLOG record and set the - * page LSN to that value. cutoff_xid is the largest xmin on the page being + * page LSN to that value. cutoff_xid is the largest xmin on the page being * marked all-visible; it is needed for Hot Standby, and can be * InvalidTransactionId if the page contains no tuples. * @@ -295,10 +295,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr, * releasing *buf after it's done testing and setting bits. * * NOTE: This function is typically called without a lock on the heap page, - * so somebody else could change the bit just after we look at it. In fact, + * so somebody else could change the bit just after we look at it. In fact, * since we don't lock the visibility map page either, it's even possible that * someone else could have changed the bit just before we look at it, but yet - * we might see the old value. It is the caller's responsibility to deal with + * we might see the old value. It is the caller's responsibility to deal with * all concurrency issues! */ bool @@ -344,7 +344,7 @@ visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *buf) } /* - * visibilitymap_count - count number of bits set in visibility map + * visibilitymap_count - count number of bits set in visibility map * * Note: we ignore the possibility of race conditions when the table is being * extended concurrently with the call. New pages added to the table aren't @@ -356,16 +356,16 @@ visibilitymap_count(Relation rel) BlockNumber result = 0; BlockNumber mapBlock; - for (mapBlock = 0; ; mapBlock++) + for (mapBlock = 0;; mapBlock++) { Buffer mapBuffer; unsigned char *map; int i; /* - * Read till we fall off the end of the map. We assume that any - * extra bytes in the last page are zeroed, so we don't bother - * excluding them from the count. + * Read till we fall off the end of the map. We assume that any extra + * bytes in the last page are zeroed, so we don't bother excluding + * them from the count. */ mapBuffer = vm_readbuf(rel, mapBlock, false); if (!BufferIsValid(mapBuffer)) @@ -496,11 +496,11 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend) Buffer buf; /* - * We might not have opened the relation at the smgr level yet, or we might - * have been forced to close it by a sinval message. The code below won't - * necessarily notice relation extension immediately when extend = false, - * so we rely on sinval messages to ensure that our ideas about the size of - * the map aren't too far out of date. + * We might not have opened the relation at the smgr level yet, or we + * might have been forced to close it by a sinval message. The code below + * won't necessarily notice relation extension immediately when extend = + * false, so we rely on sinval messages to ensure that our ideas about the + * size of the map aren't too far out of date. */ RelationOpenSmgr(rel); diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index d54b669bf3..26fd9b6e11 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -93,7 +93,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys) else scan->orderByData = NULL; - scan->xs_want_itup = false; /* may be set later */ + scan->xs_want_itup = false; /* may be set later */ /* * During recovery we ignore killed tuples and don't bother to kill them diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 16ac4e1b9f..d64df319c5 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -435,7 +435,7 @@ index_restrpos(IndexScanDesc scan) ItemPointer index_getnext_tid(IndexScanDesc scan, ScanDirection direction) { - FmgrInfo *procedure; + FmgrInfo *procedure; bool found; SCAN_CHECKS; @@ -495,7 +495,7 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction) HeapTuple index_fetch_heap(IndexScanDesc scan) { - ItemPointer tid = &scan->xs_ctup.t_self; + ItemPointer tid = &scan->xs_ctup.t_self; bool all_dead = false; bool got_heap_tuple; @@ -530,8 +530,8 @@ index_fetch_heap(IndexScanDesc scan) if (got_heap_tuple) { /* - * Only in a non-MVCC snapshot can more than one member of the - * HOT chain be visible. + * Only in a non-MVCC snapshot can more than one member of the HOT + * chain be visible. */ scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot); pgstat_count_heap_fetch(scan->indexRelation); @@ -544,7 +544,7 @@ index_fetch_heap(IndexScanDesc scan) /* * If we scanned a whole HOT chain and found only dead tuples, tell index * AM to kill its entry for that TID (this will take effect in the next - * amgettuple call, in index_getnext_tid). We do not do this when in + * amgettuple call, in index_getnext_tid). We do not do this when in * recovery because it may violate MVCC to do so. See comments in * RelationGetIndexScan(). */ diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c index fedde934a3..d610bef798 100644 --- a/src/backend/access/nbtree/nbtcompare.c +++ b/src/backend/access/nbtree/nbtcompare.c @@ -82,7 +82,7 @@ btint2fastcmp(Datum x, Datum y, SortSupport ssup) Datum btint2sortsupport(PG_FUNCTION_ARGS) { - SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); + SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); ssup->comparator = btint2fastcmp; PG_RETURN_VOID(); @@ -119,7 +119,7 @@ btint4fastcmp(Datum x, Datum y, SortSupport ssup) Datum btint4sortsupport(PG_FUNCTION_ARGS) { - SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); + SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); ssup->comparator = btint4fastcmp; PG_RETURN_VOID(); @@ -156,7 +156,7 @@ btint8fastcmp(Datum x, Datum y, SortSupport ssup) Datum btint8sortsupport(PG_FUNCTION_ARGS) { - SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); + SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); ssup->comparator = btint8fastcmp; PG_RETURN_VOID(); @@ -277,7 +277,7 @@ btoidfastcmp(Datum x, Datum y, SortSupport ssup) Datum btoidsortsupport(PG_FUNCTION_ARGS) { - SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); + SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); ssup->comparator = btoidfastcmp; PG_RETURN_VOID(); @@ -338,7 +338,7 @@ btnamefastcmp(Datum x, Datum y, SortSupport ssup) Datum btnamesortsupport(PG_FUNCTION_ARGS) { - SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); + SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); ssup->comparator = btnamefastcmp; PG_RETURN_VOID(); diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index e6dec618c7..016ce2283c 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -1362,7 +1362,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack) * we're in VACUUM and would not otherwise have an XID. Having already * updated links to the target, ReadNewTransactionId() suffices as an * upper bound. Any scan having retained a now-stale link is advertising - * in its PGXACT an xmin less than or equal to the value we read here. It + * in its PGXACT an xmin less than or equal to the value we read here. It * will continue to do so, holding back RecentGlobalXmin, for the duration * of that scan. */ diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 184fc3bb79..41d06edb15 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -433,7 +433,7 @@ btbeginscan(PG_FUNCTION_ARGS) /* * We don't know yet whether the scan will be index-only, so we do not - * allocate the tuple workspace arrays until btrescan. However, we set up + * allocate the tuple workspace arrays until btrescan. However, we set up * scan->xs_itupdesc whether we'll need it or not, since that's so cheap. */ so->currTuples = so->markTuples = NULL; @@ -478,7 +478,7 @@ btrescan(PG_FUNCTION_ARGS) /* * Allocate tuple workspace arrays, if needed for an index-only scan and - * not already done in a previous rescan call. To save on palloc + * not already done in a previous rescan call. To save on palloc * overhead, both workspaces are allocated as one palloc block; only this * function and btendscan know that. * diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index b701c3f819..e0c952368b 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -564,11 +564,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) ScanKeyEntryInitialize(chosen, (SK_SEARCHNOTNULL | SK_ISNULL | (impliesNN->sk_flags & - (SK_BT_DESC | SK_BT_NULLS_FIRST))), + (SK_BT_DESC | SK_BT_NULLS_FIRST))), curattr, - ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ? - BTGreaterStrategyNumber : - BTLessStrategyNumber), + ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ? + BTGreaterStrategyNumber : + BTLessStrategyNumber), InvalidOid, InvalidOid, InvalidOid, diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index f79ce552b6..33ad8915f5 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -37,10 +37,10 @@ typedef struct BTSortArrayContext static Datum _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey, StrategyNumber strat, Datum *elems, int nelems); -static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey, +static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey, bool reverse, Datum *elems, int nelems); -static int _bt_compare_array_elements(const void *a, const void *b, void *arg); +static int _bt_compare_array_elements(const void *a, const void *b, void *arg); static bool _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, ScanKey leftarg, ScanKey rightarg, bool *result); @@ -227,8 +227,8 @@ _bt_preprocess_array_keys(IndexScanDesc scan) } /* - * Make a scan-lifespan context to hold array-associated data, or reset - * it if we already have one from a previous rescan cycle. + * Make a scan-lifespan context to hold array-associated data, or reset it + * if we already have one from a previous rescan cycle. */ if (so->arrayContext == NULL) so->arrayContext = AllocSetContextCreate(CurrentMemoryContext, @@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan) continue; /* - * First, deconstruct the array into elements. Anything allocated + * First, deconstruct the array into elements. Anything allocated * here (including a possibly detoasted array value) is in the * workspace context. */ @@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan) &elem_values, &elem_nulls, &num_elems); /* - * Compress out any null elements. We can ignore them since we assume + * Compress out any null elements. We can ignore them since we assume * all btree operators are strict. */ num_nonnulls = 0; @@ -338,7 +338,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan) * successive primitive indexscans produce data in index order. */ num_elems = _bt_sort_array_elements(scan, cur, - (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0, + (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0, elem_values, num_nonnulls); /* @@ -387,9 +387,10 @@ _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey, /* * Look up the appropriate comparison operator in the opfamily. * - * Note: it's possible that this would fail, if the opfamily is incomplete, - * but it seems quite unlikely that an opfamily would omit non-cross-type - * comparison operators for any datatype that it supports at all. + * Note: it's possible that this would fail, if the opfamily is + * incomplete, but it seems quite unlikely that an opfamily would omit + * non-cross-type comparison operators for any datatype that it supports + * at all. */ cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1], elemtype, @@ -455,9 +456,10 @@ _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey, /* * Look up the appropriate comparison function in the opfamily. * - * Note: it's possible that this would fail, if the opfamily is incomplete, - * but it seems quite unlikely that an opfamily would omit non-cross-type - * support functions for any datatype that it supports at all. + * Note: it's possible that this would fail, if the opfamily is + * incomplete, but it seems quite unlikely that an opfamily would omit + * non-cross-type support functions for any datatype that it supports at + * all. */ cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1], elemtype, @@ -515,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg) * _bt_start_array_keys() -- Initialize array keys at start of a scan * * Set up the cur_elem counters and fill in the first sk_argument value for - * each array scankey. We can't do this until we know the scan direction. + * each array scankey. We can't do this until we know the scan direction. */ void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir) @@ -609,8 +611,8 @@ _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir) * so that the index sorts in the desired direction. * * One key purpose of this routine is to discover which scan keys must be - * satisfied to continue the scan. It also attempts to eliminate redundant - * keys and detect contradictory keys. (If the index opfamily provides + * satisfied to continue the scan. It also attempts to eliminate redundant + * keys and detect contradictory keys. (If the index opfamily provides * incomplete sets of cross-type operators, we may fail to detect redundant * or contradictory keys, but we can survive that.) * @@ -676,7 +678,7 @@ _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir) * Note: the reason we have to copy the preprocessed scan keys into private * storage is that we are modifying the array based on comparisons of the * key argument values, which could change on a rescan or after moving to - * new elements of array keys. Therefore we can't overwrite the source data. + * new elements of array keys. Therefore we can't overwrite the source data. */ void _bt_preprocess_keys(IndexScanDesc scan) @@ -781,8 +783,8 @@ _bt_preprocess_keys(IndexScanDesc scan) * set qual_ok to false and abandon further processing. * * We also have to deal with the case of "key IS NULL", which is - * unsatisfiable in combination with any other index condition. - * By the time we get here, that's been classified as an equality + * unsatisfiable in combination with any other index condition. By + * the time we get here, that's been classified as an equality * check, and we've rejected any combination of it with a regular * equality condition; but not with other types of conditions. */ @@ -1421,12 +1423,12 @@ _bt_checkkeys(IndexScanDesc scan, /* * Since NULLs are sorted before non-NULLs, we know we have * reached the lower limit of the range of values for this - * index attr. On a backward scan, we can stop if this qual + * index attr. On a backward scan, we can stop if this qual * is one of the "must match" subset. We can stop regardless * of whether the qual is > or <, so long as it's required, - * because it's not possible for any future tuples to pass. - * On a forward scan, however, we must keep going, because we - * may have initially positioned to the start of the index. + * because it's not possible for any future tuples to pass. On + * a forward scan, however, we must keep going, because we may + * have initially positioned to the start of the index. */ if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) && ScanDirectionIsBackward(dir)) @@ -1437,11 +1439,11 @@ _bt_checkkeys(IndexScanDesc scan, /* * Since NULLs are sorted after non-NULLs, we know we have * reached the upper limit of the range of values for this - * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. We can stop regardless of + * index attr. On a forward scan, we can stop if this qual is + * one of the "must match" subset. We can stop regardless of * whether the qual is > or <, so long as it's required, - * because it's not possible for any future tuples to pass. - * On a backward scan, however, we must keep going, because we + * because it's not possible for any future tuples to pass. On + * a backward scan, however, we must keep going, because we * may have initially positioned to the end of the index. */ if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) && @@ -1532,12 +1534,12 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Since NULLs are sorted before non-NULLs, we know we have * reached the lower limit of the range of values for this - * index attr. On a backward scan, we can stop if this qual + * index attr. On a backward scan, we can stop if this qual * is one of the "must match" subset. We can stop regardless * of whether the qual is > or <, so long as it's required, - * because it's not possible for any future tuples to pass. - * On a forward scan, however, we must keep going, because we - * may have initially positioned to the start of the index. + * because it's not possible for any future tuples to pass. On + * a forward scan, however, we must keep going, because we may + * have initially positioned to the start of the index. */ if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) && ScanDirectionIsBackward(dir)) @@ -1548,11 +1550,11 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Since NULLs are sorted after non-NULLs, we know we have * reached the upper limit of the range of values for this - * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. We can stop regardless of + * index attr. On a forward scan, we can stop if this qual is + * one of the "must match" subset. We can stop regardless of * whether the qual is > or <, so long as it's required, - * because it's not possible for any future tuples to pass. - * On a backward scan, however, we must keep going, because we + * because it's not possible for any future tuples to pass. On + * a backward scan, however, we must keep going, because we * may have initially positioned to the end of the index. */ if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) && diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 98a7bea742..b3f8f6a231 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -24,7 +24,7 @@ /* * SPPageDesc tracks all info about a page we are inserting into. In some * situations it actually identifies a tuple, or even a specific node within - * an inner tuple. But any of the fields can be invalid. If the buffer + * an inner tuple. But any of the fields can be invalid. If the buffer * field is valid, it implies we hold pin and exclusive lock on that buffer. * page pointer should be valid exactly when buffer is. */ @@ -129,8 +129,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page, int firststate, int reststate, BlockNumber blkno, OffsetNumber offnum) { - OffsetNumber firstItem; - OffsetNumber *sortednos; + OffsetNumber firstItem; + OffsetNumber *sortednos; SpGistDeadTuple tuple = NULL; int i; @@ -155,8 +155,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page, for (i = 0; i < nitems; i++) { - OffsetNumber itemno = sortednos[i]; - int tupstate; + OffsetNumber itemno = sortednos[i]; + int tupstate; tupstate = (itemno == firstItem) ? firststate : reststate; if (tuple == NULL || tuple->tupstate != tupstate) @@ -200,7 +200,7 @@ saveNodeLink(Relation index, SPPageDesc *parent, */ static void addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, - SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew) + SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew) { XLogRecData rdata[4]; spgxlogAddLeaf xlrec; @@ -230,7 +230,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, /* Tuple is not part of a chain */ leafTuple->nextOffset = InvalidOffsetNumber; current->offnum = SpGistPageAddNewItem(state, current->page, - (Item) leafTuple, leafTuple->size, + (Item) leafTuple, leafTuple->size, NULL, false); xlrec.offnumLeaf = current->offnum; @@ -250,9 +250,9 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, else { /* - * Tuple must be inserted into existing chain. We mustn't change - * the chain's head address, but we don't need to chase the entire - * chain to put the tuple at the end; we can insert it second. + * Tuple must be inserted into existing chain. We mustn't change the + * chain's head address, but we don't need to chase the entire chain + * to put the tuple at the end; we can insert it second. * * Also, it's possible that the "chain" consists only of a DEAD tuple, * in which case we should replace the DEAD tuple in-place. @@ -261,7 +261,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, OffsetNumber offnum; head = (SpGistLeafTuple) PageGetItem(current->page, - PageGetItemId(current->page, current->offnum)); + PageGetItemId(current->page, current->offnum)); if (head->tupstate == SPGIST_LIVE) { leafTuple->nextOffset = head->nextOffset; @@ -274,7 +274,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, * and set new second element */ head = (SpGistLeafTuple) PageGetItem(current->page, - PageGetItemId(current->page, current->offnum)); + PageGetItemId(current->page, current->offnum)); head->nextOffset = offnum; xlrec.offnumLeaf = offnum; @@ -483,7 +483,7 @@ moveLeafs(Relation index, SpGistState *state, for (i = 0; i < nDelete; i++) { it = (SpGistLeafTuple) PageGetItem(current->page, - PageGetItemId(current->page, toDelete[i])); + PageGetItemId(current->page, toDelete[i])); Assert(it->tupstate == SPGIST_LIVE); /* @@ -516,12 +516,12 @@ moveLeafs(Relation index, SpGistState *state, leafptr += newLeafTuple->size; /* - * Now delete the old tuples, leaving a redirection pointer behind for - * the first one, unless we're doing an index build; in which case there - * can't be any concurrent scan so we need not provide a redirect. + * Now delete the old tuples, leaving a redirection pointer behind for the + * first one, unless we're doing an index build; in which case there can't + * be any concurrent scan so we need not provide a redirect. */ spgPageIndexMultiDelete(state, current->page, toDelete, nDelete, - state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT, + state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT, SPGIST_PLACEHOLDER, nblkno, r); @@ -575,7 +575,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position, SpGistDeadTuple dt; dt = (SpGistDeadTuple) PageGetItem(current->page, - PageGetItemId(current->page, position)); + PageGetItemId(current->page, position)); Assert(dt->tupstate == SPGIST_REDIRECT); Assert(ItemPointerGetBlockNumber(&dt->pointer) == SPGIST_METAPAGE_BLKNO); ItemPointerSet(&dt->pointer, blkno, offnum); @@ -640,7 +640,7 @@ checkAllTheSame(spgPickSplitIn *in, spgPickSplitOut *out, bool tooBig, /* The opclass may not use node labels, but if it does, duplicate 'em */ if (out->nodeLabels) { - Datum theLabel = out->nodeLabels[theNode]; + Datum theLabel = out->nodeLabels[theNode]; out->nodeLabels = (Datum *) palloc(sizeof(Datum) * out->nNodes); for (i = 0; i < out->nNodes; i++) @@ -754,8 +754,8 @@ doPickSplit(Relation index, SpGistState *state, { /* * We are splitting the root (which up to now is also a leaf page). - * Its tuples are not linked, so scan sequentially to get them all. - * We ignore the original value of current->offnum. + * Its tuples are not linked, so scan sequentially to get them all. We + * ignore the original value of current->offnum. */ for (i = FirstOffsetNumber; i <= max; i++) { @@ -773,7 +773,7 @@ doPickSplit(Relation index, SpGistState *state, /* we will delete the tuple altogether, so count full space */ spaceToDelete += it->size + sizeof(ItemIdData); } - else /* tuples on root should be live */ + else /* tuples on root should be live */ elog(ERROR, "unexpected SPGiST tuple state: %d", it->tupstate); } } @@ -820,7 +820,7 @@ doPickSplit(Relation index, SpGistState *state, * We may not actually insert new tuple because another picksplit may be * necessary due to too large value, but we will try to allocate enough * space to include it; and in any case it has to be included in the input - * for the picksplit function. So don't increment nToInsert yet. + * for the picksplit function. So don't increment nToInsert yet. */ in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state); heapPtrs[in.nTuples] = newLeafTuple->heapPtr; @@ -878,7 +878,7 @@ doPickSplit(Relation index, SpGistState *state, /* * Check to see if the picksplit function failed to separate the values, * ie, it put them all into the same child node. If so, select allTheSame - * mode and create a random split instead. See comments for + * mode and create a random split instead. See comments for * checkAllTheSame as to why we need to know if the new leaf tuples could * fit on one page. */ @@ -924,8 +924,8 @@ doPickSplit(Relation index, SpGistState *state, innerTuple->allTheSame = allTheSame; /* - * Update nodes[] array to point into the newly formed innerTuple, so - * that we can adjust their downlinks below. + * Update nodes[] array to point into the newly formed innerTuple, so that + * we can adjust their downlinks below. */ SGITITERATE(innerTuple, i, node) { @@ -944,13 +944,13 @@ doPickSplit(Relation index, SpGistState *state, } /* - * To perform the split, we must insert a new inner tuple, which can't - * go on a leaf page; and unless we are splitting the root page, we - * must then update the parent tuple's downlink to point to the inner - * tuple. If there is room, we'll put the new inner tuple on the same - * page as the parent tuple, otherwise we need another non-leaf buffer. - * But if the parent page is the root, we can't add the new inner tuple - * there, because the root page must have only one inner tuple. + * To perform the split, we must insert a new inner tuple, which can't go + * on a leaf page; and unless we are splitting the root page, we must then + * update the parent tuple's downlink to point to the inner tuple. If + * there is room, we'll put the new inner tuple on the same page as the + * parent tuple, otherwise we need another non-leaf buffer. But if the + * parent page is the root, we can't add the new inner tuple there, + * because the root page must have only one inner tuple. */ xlrec.initInner = false; if (parent->buffer != InvalidBuffer && @@ -965,9 +965,9 @@ doPickSplit(Relation index, SpGistState *state, { /* Send tuple to page with next triple parity (see README) */ newInnerBuffer = SpGistGetBuffer(index, - GBUF_INNER_PARITY(parent->blkno + 1) | + GBUF_INNER_PARITY(parent->blkno + 1) | (isNulls ? GBUF_NULLS : 0), - innerTuple->size + sizeof(ItemIdData), + innerTuple->size + sizeof(ItemIdData), &xlrec.initInner); } else @@ -977,22 +977,22 @@ doPickSplit(Relation index, SpGistState *state, } /* - * Because a WAL record can't involve more than four buffers, we can - * only afford to deal with two leaf pages in each picksplit action, - * ie the current page and at most one other. + * Because a WAL record can't involve more than four buffers, we can only + * afford to deal with two leaf pages in each picksplit action, ie the + * current page and at most one other. * - * The new leaf tuples converted from the existing ones should require - * the same or less space, and therefore should all fit onto one page + * The new leaf tuples converted from the existing ones should require the + * same or less space, and therefore should all fit onto one page * (although that's not necessarily the current page, since we can't * delete the old tuples but only replace them with placeholders). - * However, the incoming new tuple might not also fit, in which case - * we might need another picksplit cycle to reduce it some more. + * However, the incoming new tuple might not also fit, in which case we + * might need another picksplit cycle to reduce it some more. * - * If there's not room to put everything back onto the current page, - * then we decide on a per-node basis which tuples go to the new page. - * (We do it like that because leaf tuple chains can't cross pages, - * so we must place all leaf tuples belonging to the same parent node - * on the same page.) + * If there's not room to put everything back onto the current page, then + * we decide on a per-node basis which tuples go to the new page. (We do + * it like that because leaf tuple chains can't cross pages, so we must + * place all leaf tuples belonging to the same parent node on the same + * page.) * * If we are splitting the root page (turning it from a leaf page into an * inner page), then no leaf tuples can go back to the current page; they @@ -1037,12 +1037,13 @@ doPickSplit(Relation index, SpGistState *state, int newspace; newLeafBuffer = SpGistGetBuffer(index, - GBUF_LEAF | (isNulls ? GBUF_NULLS : 0), + GBUF_LEAF | (isNulls ? GBUF_NULLS : 0), Min(totalLeafSizes, SPGIST_PAGE_CAPACITY), &xlrec.initDest); + /* - * Attempt to assign node groups to the two pages. We might fail to + * Attempt to assign node groups to the two pages. We might fail to * do so, even if totalLeafSizes is less than the available space, * because we can't split a group across pages. */ @@ -1054,12 +1055,12 @@ doPickSplit(Relation index, SpGistState *state, { if (leafSizes[i] <= curspace) { - nodePageSelect[i] = 0; /* signifies current page */ + nodePageSelect[i] = 0; /* signifies current page */ curspace -= leafSizes[i]; } else { - nodePageSelect[i] = 1; /* signifies new leaf page */ + nodePageSelect[i] = 1; /* signifies new leaf page */ newspace -= leafSizes[i]; } } @@ -1075,7 +1076,7 @@ doPickSplit(Relation index, SpGistState *state, else if (includeNew) { /* We must exclude the new leaf tuple from the split */ - int nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1]; + int nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1]; leafSizes[nodeOfNewTuple] -= newLeafs[in.nTuples - 1]->size + sizeof(ItemIdData); @@ -1087,12 +1088,12 @@ doPickSplit(Relation index, SpGistState *state, { if (leafSizes[i] <= curspace) { - nodePageSelect[i] = 0; /* signifies current page */ + nodePageSelect[i] = 0; /* signifies current page */ curspace -= leafSizes[i]; } else { - nodePageSelect[i] = 1; /* signifies new leaf page */ + nodePageSelect[i] = 1; /* signifies new leaf page */ newspace -= leafSizes[i]; } } @@ -1204,7 +1205,7 @@ doPickSplit(Relation index, SpGistState *state, for (i = 0; i < nToInsert; i++) { SpGistLeafTuple it = newLeafs[i]; - Buffer leafBuffer; + Buffer leafBuffer; BlockNumber leafBlock; OffsetNumber newoffset; @@ -1584,12 +1585,12 @@ spgAddNodeAction(Relation index, SpGistState *state, xlrec.nodeI = parent->node; /* - * obtain new buffer with the same parity as current, since it will - * be a child of same parent tuple + * obtain new buffer with the same parity as current, since it will be + * a child of same parent tuple */ current->buffer = SpGistGetBuffer(index, GBUF_INNER_PARITY(current->blkno), - newInnerTuple->size + sizeof(ItemIdData), + newInnerTuple->size + sizeof(ItemIdData), &xlrec.newPage); current->blkno = BufferGetBlockNumber(current->buffer); current->page = BufferGetPage(current->buffer); @@ -1597,15 +1598,15 @@ spgAddNodeAction(Relation index, SpGistState *state, xlrec.blknoNew = current->blkno; /* - * Let's just make real sure new current isn't same as old. Right - * now that's impossible, but if SpGistGetBuffer ever got smart enough - * to delete placeholder tuples before checking space, maybe it - * wouldn't be impossible. The case would appear to work except that - * WAL replay would be subtly wrong, so I think a mere assert isn't - * enough here. + * Let's just make real sure new current isn't same as old. Right now + * that's impossible, but if SpGistGetBuffer ever got smart enough to + * delete placeholder tuples before checking space, maybe it wouldn't + * be impossible. The case would appear to work except that WAL + * replay would be subtly wrong, so I think a mere assert isn't enough + * here. */ - if (xlrec.blknoNew == xlrec.blkno) - elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer"); + if (xlrec.blknoNew == xlrec.blkno) + elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer"); /* * New current and parent buffer will both be modified; but note that @@ -1707,9 +1708,9 @@ spgSplitNodeAction(Relation index, SpGistState *state, Assert(!SpGistPageStoresNulls(current->page)); /* - * Construct new prefix tuple, containing a single node with the - * specified label. (We'll update the node's downlink to point to the - * new postfix tuple, below.) + * Construct new prefix tuple, containing a single node with the specified + * label. (We'll update the node's downlink to point to the new postfix + * tuple, below.) */ node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false); @@ -1888,9 +1889,9 @@ spgdoinsert(Relation index, SpGistState *state, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", (unsigned long) (leafSize - sizeof(ItemIdData)), - (unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)), + (unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)), RelationGetRelationName(index)), - errhint("Values larger than a buffer page cannot be indexed."))); + errhint("Values larger than a buffer page cannot be indexed."))); /* Initialize "current" to the appropriate root page */ current.blkno = isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO; @@ -1920,7 +1921,7 @@ spgdoinsert(Relation index, SpGistState *state, if (current.blkno == InvalidBlockNumber) { /* - * Create a leaf page. If leafSize is too large to fit on a page, + * Create a leaf page. If leafSize is too large to fit on a page, * we won't actually use the page yet, but it simplifies the API * for doPickSplit to always have a leaf page at hand; so just * quietly limit our request to a page size. @@ -1968,7 +1969,7 @@ spgdoinsert(Relation index, SpGistState *state, } else if ((sizeToSplit = checkSplitConditions(index, state, ¤t, - &nToSplit)) < SPGIST_PAGE_CAPACITY / 2 && + &nToSplit)) < SPGIST_PAGE_CAPACITY / 2 && nToSplit < 64 && leafTuple->size + sizeof(ItemIdData) + sizeToSplit <= SPGIST_PAGE_CAPACITY) { @@ -2077,8 +2078,8 @@ spgdoinsert(Relation index, SpGistState *state, } /* - * Loop around and attempt to insert the new leafDatum - * at "current" (which might reference an existing child + * Loop around and attempt to insert the new leafDatum at + * "current" (which might reference an existing child * tuple, or might be invalid to force us to find a new * page for the tuple). * @@ -2102,8 +2103,8 @@ spgdoinsert(Relation index, SpGistState *state, out.result.addNode.nodeLabel); /* - * Retry insertion into the enlarged node. We assume - * that we'll get a MatchNode result this time. + * Retry insertion into the enlarged node. We assume that + * we'll get a MatchNode result this time. */ goto process_inner_tuple; break; diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c index 8ff9245e17..456a71fbba 100644 --- a/src/backend/access/spgist/spginsert.c +++ b/src/backend/access/spgist/spginsert.c @@ -123,7 +123,7 @@ spgbuild(PG_FUNCTION_ARGS) buildstate.spgstate.isBuild = true; buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext, - "SP-GiST build temporary context", + "SP-GiST build temporary context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); diff --git a/src/backend/access/spgist/spgkdtreeproc.c b/src/backend/access/spgist/spgkdtreeproc.c index adfe287581..db472db9d6 100644 --- a/src/backend/access/spgist/spgkdtreeproc.c +++ b/src/backend/access/spgist/spgkdtreeproc.c @@ -135,12 +135,12 @@ spg_kd_picksplit(PG_FUNCTION_ARGS) /* * Note: points that have coordinates exactly equal to coord may get - * classified into either node, depending on where they happen to fall - * in the sorted list. This is okay as long as the inner_consistent - * function descends into both sides for such cases. This is better - * than the alternative of trying to have an exact boundary, because - * it keeps the tree balanced even when we have many instances of the - * same point value. So we should never trigger the allTheSame logic. + * classified into either node, depending on where they happen to fall in + * the sorted list. This is okay as long as the inner_consistent function + * descends into both sides for such cases. This is better than the + * alternative of trying to have an exact boundary, because it keeps the + * tree balanced even when we have many instances of the same point value. + * So we should never trigger the allTheSame logic. */ for (i = 0; i < in->nTuples; i++) { diff --git a/src/backend/access/spgist/spgquadtreeproc.c b/src/backend/access/spgist/spgquadtreeproc.c index 10fafe5864..5da265025e 100644 --- a/src/backend/access/spgist/spgquadtreeproc.c +++ b/src/backend/access/spgist/spgquadtreeproc.c @@ -253,8 +253,8 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS) boxQuery = DatumGetBoxP(in->scankeys[i].sk_argument); if (DatumGetBool(DirectFunctionCall2(box_contain_pt, - PointerGetDatum(boxQuery), - PointerGetDatum(centroid)))) + PointerGetDatum(boxQuery), + PointerGetDatum(centroid)))) { /* centroid is in box, so all quadrants are OK */ } diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index 7a3a96230d..2a083b7c38 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -24,7 +24,7 @@ typedef void (*storeRes_func) (SpGistScanOpaque so, ItemPointer heapPtr, - Datum leafValue, bool isnull, bool recheck); + Datum leafValue, bool isnull, bool recheck); typedef struct ScanStackEntry { @@ -88,7 +88,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so) if (so->want_itup) { /* Must pfree IndexTuples to avoid memory leak */ - int i; + int i; for (i = 0; i < so->nPtrs; i++) pfree(so->indexTups[i]); @@ -102,7 +102,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so) * Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so. * * The point here is to eliminate null-related considerations from what the - * opclass consistent functions need to deal with. We assume all SPGiST- + * opclass consistent functions need to deal with. We assume all SPGiST- * indexable operators are strict, so any null RHS value makes the scan * condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL * conditions; their effect is reflected into searchNulls/searchNonNulls. @@ -177,6 +177,7 @@ spgbeginscan(PG_FUNCTION_ARGS) { Relation rel = (Relation) PG_GETARG_POINTER(0); int keysz = PG_GETARG_INT32(1); + /* ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); */ IndexScanDesc scan; SpGistScanOpaque so; @@ -457,7 +458,7 @@ redirect: MemoryContext oldCtx; innerTuple = (SpGistInnerTuple) PageGetItem(page, - PageGetItemId(page, offset)); + PageGetItemId(page, offset)); if (innerTuple->tupstate != SPGIST_LIVE) { @@ -522,7 +523,7 @@ redirect: for (i = 0; i < out.nNodes; i++) { - int nodeN = out.nodeNumbers[i]; + int nodeN = out.nodeNumbers[i]; Assert(nodeN >= 0 && nodeN < in.nNodes); if (ItemPointerIsValid(&nodes[nodeN]->t_tid)) @@ -598,7 +599,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr, if (so->want_itup) { /* - * Reconstruct desired IndexTuple. We have to copy the datum out of + * Reconstruct desired IndexTuple. We have to copy the datum out of * the temp context anyway, so we may as well create the tuple here. */ so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc, @@ -636,7 +637,7 @@ spggettuple(PG_FUNCTION_ARGS) if (so->want_itup) { /* Must pfree IndexTuples to avoid memory leak */ - int i; + int i; for (i = 0; i < so->nPtrs; i++) pfree(so->indexTups[i]); diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c index 656015ea7e..520d7b24c5 100644 --- a/src/backend/access/spgist/spgtextproc.c +++ b/src/backend/access/spgist/spgtextproc.c @@ -26,7 +26,7 @@ * In the worst case, a inner tuple in a text suffix tree could have as many * as 256 nodes (one for each possible byte value). Each node can take 16 * bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page - * of size BLCKSZ. Rather than assuming we know the exact amount of overhead + * of size BLCKSZ. Rather than assuming we know the exact amount of overhead * imposed by page headers, tuple headers, etc, we leave 100 bytes for that * (the actual overhead should be no more than 56 bytes at this writing, so * there is slop in this number). The upshot is that the maximum safe prefix @@ -209,9 +209,9 @@ spg_text_choose(PG_FUNCTION_ARGS) { /* * Descend to existing node. (If in->allTheSame, the core code will - * ignore our nodeN specification here, but that's OK. We still - * have to provide the correct levelAdd and restDatum values, and - * those are the same regardless of which node gets chosen by core.) + * ignore our nodeN specification here, but that's OK. We still have + * to provide the correct levelAdd and restDatum values, and those are + * the same regardless of which node gets chosen by core.) */ out->resultType = spgMatchNode; out->result.matchNode.nodeN = i; @@ -227,10 +227,10 @@ spg_text_choose(PG_FUNCTION_ARGS) else if (in->allTheSame) { /* - * Can't use AddNode action, so split the tuple. The upper tuple - * has the same prefix as before and uses an empty node label for - * the lower tuple. The lower tuple has no prefix and the same - * node labels as the original tuple. + * Can't use AddNode action, so split the tuple. The upper tuple has + * the same prefix as before and uses an empty node label for the + * lower tuple. The lower tuple has no prefix and the same node + * labels as the original tuple. */ out->resultType = spgSplitTuple; out->result.splitTuple.prefixHasPrefix = in->hasPrefix; @@ -315,13 +315,13 @@ spg_text_picksplit(PG_FUNCTION_ARGS) if (commonLen < VARSIZE_ANY_EXHDR(texti)) nodes[i].c = *(uint8 *) (VARDATA_ANY(texti) + commonLen); else - nodes[i].c = '\0'; /* use \0 if string is all common */ + nodes[i].c = '\0'; /* use \0 if string is all common */ nodes[i].i = i; nodes[i].d = in->datums[i]; } /* - * Sort by label bytes so that we can group the values into nodes. This + * Sort by label bytes so that we can group the values into nodes. This * also ensures that the nodes are ordered by label value, allowing the * use of binary search in searchChar. */ @@ -371,7 +371,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS) /* * Reconstruct values represented at this tuple, including parent data, - * prefix of this tuple if any, and the node label if any. in->level + * prefix of this tuple if any, and the node label if any. in->level * should be the length of the previously reconstructed value, and the * number of bytes added here is prefixSize or prefixSize + 1. * @@ -381,7 +381,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS) * long-format reconstructed values. */ Assert(in->level == 0 ? DatumGetPointer(in->reconstructedValue) == NULL : - VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level); + VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level); maxReconstrLen = in->level + 1; if (in->hasPrefix) @@ -530,7 +530,7 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS) } else { - text *fullText = palloc(VARHDRSZ + fullLen); + text *fullText = palloc(VARHDRSZ + fullLen); SET_VARSIZE(fullText, VARHDRSZ + fullLen); fullValue = VARDATA(fullText); diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 46a10f6a20..d56c2325fe 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index) * * When requesting an inner page, if we get one with the wrong parity, * we just release the buffer and try again. We will get a different page - * because GetFreeIndexPage will have marked the page used in FSM. The page + * because GetFreeIndexPage will have marked the page used in FSM. The page * is entered in our local lastUsedPages cache, so there's some hope of * making use of it later in this session, but otherwise we rely on VACUUM * to eventually re-enter the page in FSM, making it available for recycling. @@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index) * * When we return a buffer to the caller, the page is *not* entered into * the lastUsedPages cache; we expect the caller will do so after it's taken - * whatever space it will use. This is because after the caller has used up + * whatever space it will use. This is because after the caller has used up * some space, the page might have less space than whatever was cached already * so we'd rather not trash the old cache entry. */ @@ -275,7 +275,7 @@ allocNewBuffer(Relation index, int flags) else { BlockNumber blkno = BufferGetBlockNumber(buffer); - int blkFlags = GBUF_INNER_PARITY(blkno); + int blkFlags = GBUF_INNER_PARITY(blkno); if ((flags & GBUF_PARITY_MASK) == blkFlags) { @@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew) /* * If possible, increase the space request to include relation's - * fillfactor. This ensures that when we add unrelated tuples to a page, + * fillfactor. This ensures that when we add unrelated tuples to a page, * we try to keep 100-fillfactor% available for adding tuples that are * related to the ones already on it. But fillfactor mustn't cause an * error for requests that would otherwise be legal. @@ -664,7 +664,7 @@ spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix, errmsg("SPGiST inner tuple size %lu exceeds maximum %lu", (unsigned long) size, (unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData))), - errhint("Values larger than a buffer page cannot be indexed."))); + errhint("Values larger than a buffer page cannot be indexed."))); /* * Check for overflow of header fields --- probably can't fail if the @@ -801,7 +801,7 @@ SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size, for (; i <= maxoff; i++) { SpGistDeadTuple it = (SpGistDeadTuple) PageGetItem(page, - PageGetItemId(page, i)); + PageGetItemId(page, i)); if (it->tupstate == SPGIST_PLACEHOLDER) { diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 856790ee2a..27b55170cb 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -31,8 +31,8 @@ /* Entry in pending-list of TIDs we need to revisit */ typedef struct spgVacPendingItem { - ItemPointerData tid; /* redirection target to visit */ - bool done; /* have we dealt with this? */ + ItemPointerData tid; /* redirection target to visit */ + bool done; /* have we dealt with this? */ struct spgVacPendingItem *next; /* list link */ } spgVacPendingItem; @@ -46,10 +46,10 @@ typedef struct spgBulkDeleteState void *callback_state; /* Additional working state */ - SpGistState spgstate; /* for SPGiST operations that need one */ - spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */ - TransactionId myXmin; /* for detecting newly-added redirects */ - TransactionId OldestXmin; /* for deciding a redirect is obsolete */ + SpGistState spgstate; /* for SPGiST operations that need one */ + spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */ + TransactionId myXmin; /* for detecting newly-added redirects */ + TransactionId OldestXmin; /* for deciding a redirect is obsolete */ BlockNumber lastFilledBlock; /* last non-deletable block */ } spgBulkDeleteState; @@ -213,7 +213,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, * Figure out exactly what we have to do. We do this separately from * actually modifying the page, mainly so that we have a representation * that can be dumped into WAL and then the replay code can do exactly - * the same thing. The output of this step consists of six arrays + * the same thing. The output of this step consists of six arrays * describing four kinds of operations, to be performed in this order: * * toDead[]: tuple numbers to be replaced with DEAD tuples @@ -276,8 +276,8 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, else if (prevLive == InvalidOffsetNumber) { /* - * This is the first live tuple in the chain. It has - * to move to the head position. + * This is the first live tuple in the chain. It has to move + * to the head position. */ moveSrc[xlrec.nMove] = j; moveDest[xlrec.nMove] = i; @@ -289,7 +289,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, else { /* - * Second or later live tuple. Arrange to re-chain it to the + * Second or later live tuple. Arrange to re-chain it to the * previous live one, if there was a gap. */ if (interveningDeletable) @@ -353,11 +353,11 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, InvalidBlockNumber, InvalidOffsetNumber); /* - * We implement the move step by swapping the item pointers of the - * source and target tuples, then replacing the newly-source tuples - * with placeholders. This is perhaps unduly friendly with the page - * data representation, but it's fast and doesn't risk page overflow - * when a tuple to be relocated is large. + * We implement the move step by swapping the item pointers of the source + * and target tuples, then replacing the newly-source tuples with + * placeholders. This is perhaps unduly friendly with the page data + * representation, but it's fast and doesn't risk page overflow when a + * tuple to be relocated is large. */ for (i = 0; i < xlrec.nMove; i++) { @@ -518,7 +518,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer, */ for (i = max; i >= FirstOffsetNumber && - (opaque->nRedirection > 0 || !hasNonPlaceholder); + (opaque->nRedirection > 0 || !hasNonPlaceholder); i--) { SpGistDeadTuple dt; @@ -651,9 +651,9 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno) /* * The root pages must never be deleted, nor marked as available in FSM, - * because we don't want them ever returned by a search for a place to - * put a new tuple. Otherwise, check for empty/deletable page, and - * make sure FSM knows about it. + * because we don't want them ever returned by a search for a place to put + * a new tuple. Otherwise, check for empty/deletable page, and make sure + * FSM knows about it. */ if (!SpGistBlockIsRoot(blkno)) { @@ -688,7 +688,7 @@ spgprocesspending(spgBulkDeleteState *bds) Relation index = bds->info->index; spgVacPendingItem *pitem; spgVacPendingItem *nitem; - BlockNumber blkno; + BlockNumber blkno; Buffer buffer; Page page; @@ -741,11 +741,11 @@ spgprocesspending(spgBulkDeleteState *bds) else { /* - * On an inner page, visit the referenced inner tuple and add - * all its downlinks to the pending list. We might have pending - * items for more than one inner tuple on the same page (in fact - * this is pretty likely given the way space allocation works), - * so get them all while we are here. + * On an inner page, visit the referenced inner tuple and add all + * its downlinks to the pending list. We might have pending items + * for more than one inner tuple on the same page (in fact this is + * pretty likely given the way space allocation works), so get + * them all while we are here. */ for (nitem = pitem; nitem != NULL; nitem = nitem->next) { @@ -774,7 +774,7 @@ spgprocesspending(spgBulkDeleteState *bds) { /* transfer attention to redirect point */ spgAddPendingTID(bds, - &((SpGistDeadTuple) innerTuple)->pointer); + &((SpGistDeadTuple) innerTuple)->pointer); } else elog(ERROR, "unexpected SPGiST tuple state: %d", @@ -825,8 +825,8 @@ spgvacuumscan(spgBulkDeleteState *bds) * physical order (we hope the kernel will cooperate in providing * read-ahead for speed). It is critical that we visit all leaf pages, * including ones added after we start the scan, else we might fail to - * delete some deletable tuples. See more extensive comments about - * this in btvacuumscan(). + * delete some deletable tuples. See more extensive comments about this + * in btvacuumscan(). */ blkno = SPGIST_METAPAGE_BLKNO + 1; for (;;) diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index 8e87e2adc9..82f8c8b978 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -40,7 +40,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc) } /* - * Add a leaf tuple, or replace an existing placeholder tuple. This is used + * Add a leaf tuple, or replace an existing placeholder tuple. This is used * to replay SpGistPageAddNewItem() operations. If the offset points at an * existing tuple, it had better be a placeholder tuple. */ @@ -50,7 +50,7 @@ addOrReplaceTuple(Page page, Item tuple, int size, OffsetNumber offset) if (offset <= PageGetMaxOffsetNumber(page)) { SpGistDeadTuple dt = (SpGistDeadTuple) PageGetItem(page, - PageGetItemId(page, offset)); + PageGetItemId(page, offset)); if (dt->tupstate != SPGIST_PLACEHOLDER) elog(ERROR, "SPGiST tuple to be replaced is not a placeholder"); @@ -126,7 +126,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record) if (xldata->newPage) SpGistInitBuffer(buffer, - SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); + SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); if (!XLByteLE(lsn, PageGetLSN(page))) { @@ -143,7 +143,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record) SpGistLeafTuple head; head = (SpGistLeafTuple) PageGetItem(page, - PageGetItemId(page, xldata->offnumHeadLeaf)); + PageGetItemId(page, xldata->offnumHeadLeaf)); Assert(head->nextOffset == leafTuple->nextOffset); head->nextOffset = xldata->offnumLeaf; } @@ -154,7 +154,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record) PageIndexTupleDelete(page, xldata->offnumLeaf); if (PageAddItem(page, (Item) leafTuple, leafTuple->size, - xldata->offnumLeaf, false, false) != xldata->offnumLeaf) + xldata->offnumLeaf, false, false) != xldata->offnumLeaf) elog(ERROR, "failed to add item of size %u to SPGiST index page", leafTuple->size); } @@ -180,7 +180,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record) SpGistInnerTuple tuple; tuple = (SpGistInnerTuple) PageGetItem(page, - PageGetItemId(page, xldata->offnumParent)); + PageGetItemId(page, xldata->offnumParent)); spgUpdateNodeLink(tuple, xldata->nodeI, xldata->blknoLeaf, xldata->offnumLeaf); @@ -229,7 +229,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record) if (xldata->newPage) SpGistInitBuffer(buffer, - SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); + SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); if (!XLByteLE(lsn, PageGetLSN(page))) { @@ -261,7 +261,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record) if (!XLByteLE(lsn, PageGetLSN(page))) { spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves, - state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT, + state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT, SPGIST_PLACEHOLDER, xldata->blknoDst, toInsert[nInsert - 1]); @@ -286,7 +286,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record) SpGistInnerTuple tuple; tuple = (SpGistInnerTuple) PageGetItem(page, - PageGetItemId(page, xldata->offnumParent)); + PageGetItemId(page, xldata->offnumParent)); spgUpdateNodeLink(tuple, xldata->nodeI, xldata->blknoDst, toInsert[nInsert - 1]); @@ -413,7 +413,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record) } /* - * Update parent downlink. Since parent could be in either of the + * Update parent downlink. Since parent could be in either of the * previous two buffers, it's a bit tricky to determine which BKP bit * applies. */ @@ -435,7 +435,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record) SpGistInnerTuple innerTuple; innerTuple = (SpGistInnerTuple) PageGetItem(page, - PageGetItemId(page, xldata->offnumParent)); + PageGetItemId(page, xldata->offnumParent)); spgUpdateNodeLink(innerTuple, xldata->nodeI, xldata->blknoNew, xldata->offnumNew); @@ -504,7 +504,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record) { PageIndexTupleDelete(page, xldata->offnumPrefix); if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size, - xldata->offnumPrefix, false, false) != xldata->offnumPrefix) + xldata->offnumPrefix, false, false) != xldata->offnumPrefix) elog(ERROR, "failed to add item of size %u to SPGiST index page", prefixTuple->size); @@ -571,7 +571,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) page = (Page) BufferGetPage(srcBuffer); SpGistInitBuffer(srcBuffer, - SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); + SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); /* don't update LSN etc till we're done with it */ } else @@ -587,8 +587,8 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) { /* * We have it a bit easier here than in doPickSplit(), - * because we know the inner tuple's location already, - * so we can inject the correct redirection tuple now. + * because we know the inner tuple's location already, so + * we can inject the correct redirection tuple now. */ if (!state.isBuild) spgPageIndexMultiDelete(&state, page, @@ -627,7 +627,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) page = (Page) BufferGetPage(destBuffer); SpGistInitBuffer(destBuffer, - SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); + SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); /* don't update LSN etc till we're done with it */ } else @@ -707,9 +707,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) SpGistInnerTuple parent; parent = (SpGistInnerTuple) PageGetItem(page, - PageGetItemId(page, xldata->offnumParent)); + PageGetItemId(page, xldata->offnumParent)); spgUpdateNodeLink(parent, xldata->nodeI, - xldata->blknoInner, xldata->offnumInner); + xldata->blknoInner, xldata->offnumInner); } PageSetLSN(page, lsn); @@ -742,9 +742,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) SpGistInnerTuple parent; parent = (SpGistInnerTuple) PageGetItem(page, - PageGetItemId(page, xldata->offnumParent)); + PageGetItemId(page, xldata->offnumParent)); spgUpdateNodeLink(parent, xldata->nodeI, - xldata->blknoInner, xldata->offnumInner); + xldata->blknoInner, xldata->offnumInner); PageSetLSN(page, lsn); PageSetTLI(page, ThisTimeLineID); @@ -803,7 +803,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record) spgPageIndexMultiDelete(&state, page, toPlaceholder, xldata->nPlaceholder, - SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER, + SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER, InvalidBlockNumber, InvalidOffsetNumber); @@ -821,7 +821,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record) spgPageIndexMultiDelete(&state, page, moveSrc, xldata->nMove, - SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER, + SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER, InvalidBlockNumber, InvalidOffsetNumber); @@ -906,7 +906,7 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record) SpGistDeadTuple dt; dt = (SpGistDeadTuple) PageGetItem(page, - PageGetItemId(page, itemToPlaceholder[i])); + PageGetItemId(page, itemToPlaceholder[i])); Assert(dt->tupstate == SPGIST_REDIRECT); dt->tupstate = SPGIST_PLACEHOLDER; ItemPointerSetInvalid(&dt->pointer); diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 33b5ca2d36..7f2f6921d5 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn) * Testing during the PostgreSQL 9.2 development cycle revealed that on a * large multi-processor system, it was possible to have more CLOG page * requests in flight at one time than the numebr of CLOG buffers which existed - * at that time, which was hardcoded to 8. Further testing revealed that + * at that time, which was hardcoded to 8. Further testing revealed that * performance dropped off with more than 32 CLOG buffers, possibly because * the linear buffer search algorithm doesn't scale well. * diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index a8e3f19119..dd69c232eb 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -903,12 +903,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) { int slotno; int cur_count; - int bestvalidslot = 0; /* keep compiler quiet */ + int bestvalidslot = 0; /* keep compiler quiet */ int best_valid_delta = -1; - int best_valid_page_number = 0; /* keep compiler quiet */ - int bestinvalidslot = 0; /* keep compiler quiet */ + int best_valid_page_number = 0; /* keep compiler quiet */ + int bestinvalidslot = 0; /* keep compiler quiet */ int best_invalid_delta = -1; - int best_invalid_page_number = 0; /* keep compiler quiet */ + int best_invalid_page_number = 0; /* keep compiler quiet */ /* See if page already has a buffer assigned */ for (slotno = 0; slotno < shared->num_slots; slotno++) @@ -920,15 +920,15 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) /* * If we find any EMPTY slot, just select that one. Else choose a - * victim page to replace. We normally take the least recently used + * victim page to replace. We normally take the least recently used * valid page, but we will never take the slot containing - * latest_page_number, even if it appears least recently used. We + * latest_page_number, even if it appears least recently used. We * will select a slot that is already I/O busy only if there is no * other choice: a read-busy slot will not be least recently used once * the read finishes, and waiting for an I/O on a write-busy slot is * inferior to just picking some other slot. Testing shows the slot - * we pick instead will often be clean, allowing us to begin a read - * at once. + * we pick instead will often be clean, allowing us to begin a read at + * once. * * Normally the page_lru_count values will all be different and so * there will be a well-defined LRU page. But since we allow @@ -997,10 +997,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) /* * If all pages (except possibly the latest one) are I/O busy, we'll - * have to wait for an I/O to complete and then retry. In that unhappy - * case, we choose to wait for the I/O on the least recently used slot, - * on the assumption that it was likely initiated first of all the I/Os - * in progress and may therefore finish first. + * have to wait for an I/O to complete and then retry. In that + * unhappy case, we choose to wait for the I/O on the least recently + * used slot, on the assumption that it was likely initiated first of + * all the I/Os in progress and may therefore finish first. */ if (best_valid_delta < 0) { @@ -1168,20 +1168,20 @@ restart:; /* * SlruScanDirectory callback - * This callback reports true if there's any segment prior to the one - * containing the page passed as "data". + * This callback reports true if there's any segment prior to the one + * containing the page passed as "data". */ bool SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data) { - int cutoffPage = *(int *) data; + int cutoffPage = *(int *) data; cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT; if (ctl->PagePrecedes(segpage, cutoffPage)) - return true; /* found one; don't iterate any more */ + return true; /* found one; don't iterate any more */ - return false; /* keep going */ + return false; /* keep going */ } /* @@ -1191,8 +1191,8 @@ SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data) { - char path[MAXPGPATH]; - int cutoffPage = *(int *) data; + char path[MAXPGPATH]; + int cutoffPage = *(int *) data; if (ctl->PagePrecedes(segpage, cutoffPage)) { @@ -1202,7 +1202,7 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data) unlink(path); } - return false; /* keep going */ + return false; /* keep going */ } /* @@ -1212,14 +1212,14 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data) bool SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data) { - char path[MAXPGPATH]; + char path[MAXPGPATH]; snprintf(path, MAXPGPATH, "%s/%s", ctl->Dir, filename); ereport(DEBUG2, (errmsg("removing file \"%s\"", path))); unlink(path); - return false; /* keep going */ + return false; /* keep going */ } /* diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 0b41a76a32..b94fae3740 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -360,8 +360,9 @@ static void GXactLoadSubxactData(GlobalTransaction gxact, int nsubxacts, TransactionId *children) { - PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; - PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; + PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; + PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; + /* We need no extra lock since the GXACT isn't valid yet */ if (nsubxacts > PGPROC_MAX_CACHED_SUBXIDS) { @@ -410,7 +411,7 @@ LockGXact(const char *gid, Oid user) for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; - PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; + PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; /* Ignore not-yet-valid GIDs */ if (!gxact->valid) @@ -523,7 +524,7 @@ TransactionIdIsPrepared(TransactionId xid) for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; - PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; + PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; if (gxact->valid && pgxact->xid == xid) { @@ -648,8 +649,8 @@ pg_prepared_xact(PG_FUNCTION_ARGS) while (status->array != NULL && status->currIdx < status->ngxacts) { GlobalTransaction gxact = &status->array[status->currIdx++]; - PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; - PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; + PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; + PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; Datum values[5]; bool nulls[5]; HeapTuple tuple; @@ -719,7 +720,7 @@ TwoPhaseGetDummyProc(TransactionId xid) for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; - PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; + PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; if (pgxact->xid == xid) { @@ -850,8 +851,8 @@ save_state_data(const void *data, uint32 len) void StartPrepare(GlobalTransaction gxact) { - PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; - PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; + PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; + PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; TransactionId xid = pgxact->xid; TwoPhaseFileHeader hdr; TransactionId *children; @@ -1063,9 +1064,9 @@ EndPrepare(GlobalTransaction gxact) errmsg("could not close two-phase state file: %m"))); /* - * Mark the prepared transaction as valid. As soon as xact.c marks MyPgXact - * as not running our XID (which it will do immediately after this - * function returns), others can commit/rollback the xact. + * Mark the prepared transaction as valid. As soon as xact.c marks + * MyPgXact as not running our XID (which it will do immediately after + * this function returns), others can commit/rollback the xact. * * NB: a side effect of this is to make a dummy ProcArray entry for the * prepared XID. This must happen before we clear the XID from MyPgXact, @@ -1551,7 +1552,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; - PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; + PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; if (gxact->valid && XLByteLE(gxact->prepare_lsn, redo_horizon)) @@ -1707,7 +1708,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p) * XID, and they may force us to advance nextXid. * * We don't expect anyone else to modify nextXid, hence we don't - * need to hold a lock while examining it. We still acquire the + * need to hold a lock while examining it. We still acquire the * lock to modify it, though. */ subxids = (TransactionId *) diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 892a46abc3..7abf9343be 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -174,8 +174,8 @@ GetNewTransactionId(bool isSubXact) * latestCompletedXid is present in the ProcArray, which is essential for * correct OldestXmin tracking; see src/backend/access/transam/README. * - * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we are - * relying on fetch/store of an xid to be atomic, else other backends + * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we + * are relying on fetch/store of an xid to be atomic, else other backends * might see a partially-set xid here. But holding both locks at once * would be a nasty concurrency hit. So for now, assume atomicity. * diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 659b53524c..8f00186dd7 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -1019,6 +1019,7 @@ RecordTransactionCommit(void) XLogRecData rdata[4]; int lastrdata = 0; xl_xact_commit xlrec; + /* * Set flags required for recovery processing of commits. */ @@ -1073,7 +1074,8 @@ RecordTransactionCommit(void) { XLogRecData rdata[2]; int lastrdata = 0; - xl_xact_commit_compact xlrec; + xl_xact_commit_compact xlrec; + xlrec.xact_time = xactStopTimestamp; xlrec.nsubxacts = nchildren; rdata[0].data = (char *) (&xlrec); @@ -2102,7 +2104,7 @@ PrepareTransaction(void) if (XactHasExportedSnapshots()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot PREPARE a transaction that has exported snapshots"))); + errmsg("cannot PREPARE a transaction that has exported snapshots"))); /* Prevent cancel/die interrupt while cleaning up */ HOLD_INTERRUPTS(); @@ -2602,10 +2604,10 @@ CommitTransactionCommand(void) break; /* - * We were issued a RELEASE command, so we end the - * current subtransaction and return to the parent transaction. - * The parent might be ended too, so repeat till we find an - * INPROGRESS transaction or subtransaction. + * We were issued a RELEASE command, so we end the current + * subtransaction and return to the parent transaction. The parent + * might be ended too, so repeat till we find an INPROGRESS + * transaction or subtransaction. */ case TBLOCK_SUBRELEASE: do @@ -2623,9 +2625,9 @@ CommitTransactionCommand(void) * hierarchy and perform final commit. We do this by rolling up * any subtransactions into their parent, which leads to O(N^2) * operations with respect to resource owners - this isn't that - * bad until we approach a thousands of savepoints but is necessary - * for correctness should after triggers create new resource - * owners. + * bad until we approach a thousands of savepoints but is + * necessary for correctness should after triggers create new + * resource owners. */ case TBLOCK_SUBCOMMIT: do @@ -4551,11 +4553,11 @@ xactGetCommittedChildren(TransactionId **ptr) */ static void xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn, - TransactionId *sub_xids, int nsubxacts, - SharedInvalidationMessage *inval_msgs, int nmsgs, - RelFileNode *xnodes, int nrels, - Oid dbId, Oid tsId, - uint32 xinfo) + TransactionId *sub_xids, int nsubxacts, + SharedInvalidationMessage *inval_msgs, int nmsgs, + RelFileNode *xnodes, int nrels, + Oid dbId, Oid tsId, + uint32 xinfo) { TransactionId max_xid; int i; @@ -4659,12 +4661,13 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn, XLogFlush(lsn); } + /* * Utility function to call xact_redo_commit_internal after breaking down xlrec */ static void xact_redo_commit(xl_xact_commit *xlrec, - TransactionId xid, XLogRecPtr lsn) + TransactionId xid, XLogRecPtr lsn) { TransactionId *subxacts; SharedInvalidationMessage *inval_msgs; @@ -4675,11 +4678,11 @@ xact_redo_commit(xl_xact_commit *xlrec, inval_msgs = (SharedInvalidationMessage *) &(subxacts[xlrec->nsubxacts]); xact_redo_commit_internal(xid, lsn, subxacts, xlrec->nsubxacts, - inval_msgs, xlrec->nmsgs, - xlrec->xnodes, xlrec->nrels, - xlrec->dbId, - xlrec->tsId, - xlrec->xinfo); + inval_msgs, xlrec->nmsgs, + xlrec->xnodes, xlrec->nrels, + xlrec->dbId, + xlrec->tsId, + xlrec->xinfo); } /* @@ -4687,14 +4690,14 @@ xact_redo_commit(xl_xact_commit *xlrec, */ static void xact_redo_commit_compact(xl_xact_commit_compact *xlrec, - TransactionId xid, XLogRecPtr lsn) + TransactionId xid, XLogRecPtr lsn) { xact_redo_commit_internal(xid, lsn, xlrec->subxacts, xlrec->nsubxacts, - NULL, 0, /* inval msgs */ - NULL, 0, /* relfilenodes */ - InvalidOid, /* dbId */ - InvalidOid, /* tsId */ - 0); /* xinfo */ + NULL, 0, /* inval msgs */ + NULL, 0, /* relfilenodes */ + InvalidOid, /* dbId */ + InvalidOid, /* tsId */ + 0); /* xinfo */ } /* diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 0f2678cfda..bcb71c45b2 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -344,10 +344,10 @@ typedef struct XLogCtlInsert /* * fullPageWrites is the master copy used by all backends to determine - * whether to write full-page to WAL, instead of using process-local - * one. This is required because, when full_page_writes is changed - * by SIGHUP, we must WAL-log it before it actually affects - * WAL-logging by backends. Checkpointer sets at startup or after SIGHUP. + * whether to write full-page to WAL, instead of using process-local one. + * This is required because, when full_page_writes is changed by SIGHUP, + * we must WAL-log it before it actually affects WAL-logging by backends. + * Checkpointer sets at startup or after SIGHUP. */ bool fullPageWrites; @@ -455,8 +455,11 @@ typedef struct XLogCtlData XLogRecPtr recoveryLastRecPtr; /* timestamp of last COMMIT/ABORT record replayed (or being replayed) */ TimestampTz recoveryLastXTime; - /* timestamp of when we started replaying the current chunk of WAL data, - * only relevant for replication or archive recovery */ + + /* + * timestamp of when we started replaying the current chunk of WAL data, + * only relevant for replication or archive recovery + */ TimestampTz currentChunkStartTime; /* end of the last record restored from the archive */ XLogRecPtr restoreLastRecPtr; @@ -580,7 +583,7 @@ static bool updateMinRecoveryPoint = true; * to replay all the WAL, so reachedConsistency is never set. During archive * recovery, the database is consistent once minRecoveryPoint is reached. */ -bool reachedConsistency = false; +bool reachedConsistency = false; static bool InRedo = false; @@ -750,8 +753,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata) * insert lock, but it seems better to avoid doing CRC calculations while * holding the lock. * - * We add entries for backup blocks to the chain, so that they don't - * need any special treatment in the critical section where the chunks are + * We add entries for backup blocks to the chain, so that they don't need + * any special treatment in the critical section where the chunks are * copied into the WAL buffers. Those entries have to be unlinked from the * chain if we have to loop back here. */ @@ -896,10 +899,10 @@ begin:; /* * Calculate CRC of the data, including all the backup blocks * - * Note that the record header isn't added into the CRC initially since - * we don't know the prev-link yet. Thus, the CRC will represent the CRC - * of the whole record in the order: rdata, then backup blocks, then - * record header. + * Note that the record header isn't added into the CRC initially since we + * don't know the prev-link yet. Thus, the CRC will represent the CRC of + * the whole record in the order: rdata, then backup blocks, then record + * header. */ INIT_CRC32(rdata_crc); for (rdt = rdata; rdt != NULL; rdt = rdt->next) @@ -948,10 +951,10 @@ begin:; } /* - * Also check to see if fullPageWrites or forcePageWrites was just turned on; - * if we weren't already doing full-page writes then go back and recompute. - * (If it was just turned off, we could recompute the record without full pages, - * but we choose not to bother.) + * Also check to see if fullPageWrites or forcePageWrites was just turned + * on; if we weren't already doing full-page writes then go back and + * recompute. (If it was just turned off, we could recompute the record + * without full pages, but we choose not to bother.) */ if ((Insert->fullPageWrites || Insert->forcePageWrites) && !doPageWrites) { @@ -1575,15 +1578,15 @@ AdvanceXLInsertBuffer(bool new_segment) * WAL records beginning in this page have removable backup blocks. This * allows the WAL archiver to know whether it is safe to compress archived * WAL data by transforming full-block records into the non-full-block - * format. It is sufficient to record this at the page level because we + * format. It is sufficient to record this at the page level because we * force a page switch (in fact a segment switch) when starting a backup, * so the flag will be off before any records can be written during the - * backup. At the end of a backup, the last page will be marked as all + * backup. At the end of a backup, the last page will be marked as all * unsafe when perhaps only part is unsafe, but at worst the archiver * would miss the opportunity to compress a few records. */ if (!Insert->forcePageWrites) - NewPage->xlp_info |= XLP_BKP_REMOVABLE; + NewPage ->xlp_info |= XLP_BKP_REMOVABLE; /* * If first page of an XLOG segment file, make it a long header. @@ -1827,11 +1830,11 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch) Write->lastSegSwitchTime = (pg_time_t) time(NULL); /* - * Request a checkpoint if we've consumed too - * much xlog since the last one. For speed, we first check - * using the local copy of RedoRecPtr, which might be out of - * date; if it looks like a checkpoint is needed, forcibly - * update RedoRecPtr and recheck. + * Request a checkpoint if we've consumed too much xlog since + * the last one. For speed, we first check using the local + * copy of RedoRecPtr, which might be out of date; if it looks + * like a checkpoint is needed, forcibly update RedoRecPtr and + * recheck. */ if (IsUnderPostmaster && XLogCheckpointNeeded(openLogId, openLogSeg)) @@ -1931,7 +1934,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN) /* * If the WALWriter is sleeping, we should kick it to make it come out of - * low-power mode. Otherwise, determine whether there's a full page of + * low-power mode. Otherwise, determine whether there's a full page of * WAL available to write. */ if (!sleeping) @@ -1945,9 +1948,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN) } /* - * Nudge the WALWriter: it has a full page of WAL to write, or we want - * it to come out of low-power mode so that this async commit will reach - * disk within the expected amount of time. + * Nudge the WALWriter: it has a full page of WAL to write, or we want it + * to come out of low-power mode so that this async commit will reach disk + * within the expected amount of time. */ if (ProcGlobal->walwriterLatch) SetLatch(ProcGlobal->walwriterLatch); @@ -2076,8 +2079,8 @@ XLogFlush(XLogRecPtr record) WriteRqstPtr = record; /* - * Now wait until we get the write lock, or someone else does the - * flush for us. + * Now wait until we get the write lock, or someone else does the flush + * for us. */ for (;;) { @@ -2182,7 +2185,7 @@ XLogFlush(XLogRecPtr record) * block, and flush through the latest one of those. Thus, if async commits * are not being used, we will flush complete blocks only. We can guarantee * that async commits reach disk after at most three cycles; normally only - * one or two. (When flushing complete blocks, we allow XLogWrite to write + * one or two. (When flushing complete blocks, we allow XLogWrite to write * "flexibly", meaning it can stop at the end of the buffer ring; this makes a * difference only with very high load or long wal_writer_delay, but imposes * one extra cycle for the worst case for async commits.) @@ -2273,7 +2276,8 @@ XLogBackgroundFlush(void) /* * If we wrote something then we have something to send to standbys also, - * otherwise the replication delay become around 7s with just async commit. + * otherwise the replication delay become around 7s with just async + * commit. */ if (wrote_something) WalSndWakeup(); @@ -2776,17 +2780,17 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli, } /* - * If the segment was fetched from archival storage, replace - * the existing xlog segment (if any) with the archival version. + * If the segment was fetched from archival storage, replace the existing + * xlog segment (if any) with the archival version. */ if (source == XLOG_FROM_ARCHIVE) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - XLogRecPtr endptr; - char xlogfpath[MAXPGPATH]; - bool reload = false; - struct stat statbuf; + XLogRecPtr endptr; + char xlogfpath[MAXPGPATH]; + bool reload = false; + struct stat statbuf; XLogFilePath(xlogfpath, tli, log, seg); if (stat(xlogfpath, &statbuf) == 0) @@ -2801,9 +2805,9 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli, if (rename(path, xlogfpath) < 0) ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not rename file \"%s\" to \"%s\": %m", - path, xlogfpath))); + (errcode_for_file_access(), + errmsg("could not rename file \"%s\" to \"%s\": %m", + path, xlogfpath))); /* * If the existing segment was replaced, since walsenders might have @@ -3812,7 +3816,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt) RecPtr = &tmpRecPtr; /* - * RecPtr is pointing to end+1 of the previous WAL record. We must + * RecPtr is pointing to end+1 of the previous WAL record. We must * advance it if necessary to where the next record starts. First, * align to next page if no more records can fit on the current page. */ @@ -5389,10 +5393,10 @@ readRecoveryCommandFile(void) } if (rtli) ereport(DEBUG2, - (errmsg_internal("recovery_target_timeline = %u", rtli))); + (errmsg_internal("recovery_target_timeline = %u", rtli))); else ereport(DEBUG2, - (errmsg_internal("recovery_target_timeline = latest"))); + (errmsg_internal("recovery_target_timeline = latest"))); } else if (strcmp(item->name, "recovery_target_xid") == 0) { @@ -5404,7 +5408,7 @@ readRecoveryCommandFile(void) item->value))); ereport(DEBUG2, (errmsg_internal("recovery_target_xid = %u", - recoveryTargetXid))); + recoveryTargetXid))); recoveryTarget = RECOVERY_TARGET_XID; } else if (strcmp(item->name, "recovery_target_time") == 0) @@ -5428,7 +5432,7 @@ readRecoveryCommandFile(void) Int32GetDatum(-1))); ereport(DEBUG2, (errmsg_internal("recovery_target_time = '%s'", - timestamptz_to_str(recoveryTargetTime)))); + timestamptz_to_str(recoveryTargetTime)))); } else if (strcmp(item->name, "recovery_target_name") == 0) { @@ -5576,13 +5580,13 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) } /* - * If we are establishing a new timeline, we have to copy data from - * the last WAL segment of the old timeline to create a starting WAL - * segment for the new timeline. + * If we are establishing a new timeline, we have to copy data from the + * last WAL segment of the old timeline to create a starting WAL segment + * for the new timeline. * - * Notify the archiver that the last WAL segment of the old timeline - * is ready to copy to archival storage. Otherwise, it is not archived - * for a while. + * Notify the archiver that the last WAL segment of the old timeline is + * ready to copy to archival storage. Otherwise, it is not archived for a + * while. */ if (endTLI != ThisTimeLineID) { @@ -5604,8 +5608,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) XLogArchiveCleanup(xlogpath); /* - * Since there might be a partial WAL segment named RECOVERYXLOG, - * get rid of it. + * Since there might be a partial WAL segment named RECOVERYXLOG, get rid + * of it. */ snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG"); unlink(recoveryPath); /* ignore any error */ @@ -6323,11 +6327,11 @@ StartupXLOG(void) /* * Set backupStartPoint if we're starting recovery from a base backup. * - * Set backupEndPoint and use minRecoveryPoint as the backup end location - * if we're starting recovery from a base backup which was taken from - * the standby. In this case, the database system status in pg_control must - * indicate DB_IN_ARCHIVE_RECOVERY. If not, which means that backup - * is corrupted, so we cancel recovery. + * Set backupEndPoint and use minRecoveryPoint as the backup end + * location if we're starting recovery from a base backup which was + * taken from the standby. In this case, the database system status in + * pg_control must indicate DB_IN_ARCHIVE_RECOVERY. If not, which + * means that backup is corrupted, so we cancel recovery. */ if (haveBackupLabel) { @@ -6340,7 +6344,7 @@ StartupXLOG(void) ereport(FATAL, (errmsg("backup_label contains inconsistent data with control file"), errhint("This means that the backup is corrupted and you will " - "have to use another backup for recovery."))); + "have to use another backup for recovery."))); ControlFile->backupEndPoint = ControlFile->minRecoveryPoint; } } @@ -6383,15 +6387,15 @@ StartupXLOG(void) /* * We're in recovery, so unlogged relations may be trashed and must be - * reset. This should be done BEFORE allowing Hot Standby connections, - * so that read-only backends don't try to read whatever garbage is - * left over from before. + * reset. This should be done BEFORE allowing Hot Standby + * connections, so that read-only backends don't try to read whatever + * garbage is left over from before. */ ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP); /* - * Likewise, delete any saved transaction snapshot files that got - * left behind by crashed backends. + * Likewise, delete any saved transaction snapshot files that got left + * behind by crashed backends. */ DeleteAllExportedSnapshotFiles(); @@ -6489,10 +6493,11 @@ StartupXLOG(void) /* * Let postmaster know we've started redo now, so that it can launch - * checkpointer to perform restartpoints. We don't bother during crash - * recovery as restartpoints can only be performed during archive - * recovery. And we'd like to keep crash recovery simple, to avoid - * introducing bugs that could affect you when recovering after crash. + * checkpointer to perform restartpoints. We don't bother during + * crash recovery as restartpoints can only be performed during + * archive recovery. And we'd like to keep crash recovery simple, to + * avoid introducing bugs that could affect you when recovering after + * crash. * * After this point, we can no longer assume that we're the only * process in addition to postmaster! Also, fsync requests are @@ -6649,8 +6654,8 @@ StartupXLOG(void) { /* * We have reached the end of base backup, the point where - * the minimum recovery point in pg_control indicates. - * The data on disk is now consistent. Reset backupStartPoint + * the minimum recovery point in pg_control indicates. The + * data on disk is now consistent. Reset backupStartPoint * and backupEndPoint. */ elog(DEBUG1, "end of backup reached"); @@ -6863,9 +6868,9 @@ StartupXLOG(void) oldestActiveXID = PrescanPreparedTransactions(NULL, NULL); /* - * Update full_page_writes in shared memory and write an - * XLOG_FPW_CHANGE record before resource manager writes cleanup - * WAL records or checkpoint record is written. + * Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE + * record before resource manager writes cleanup WAL records or checkpoint + * record is written. */ Insert->fullPageWrites = lastFullPageWrites; LocalSetXLogInsertAllowed(); @@ -6954,8 +6959,8 @@ StartupXLOG(void) LWLockRelease(ProcArrayLock); /* - * Start up the commit log and subtrans, if not already done for - * hot standby. + * Start up the commit log and subtrans, if not already done for hot + * standby. */ if (standbyState == STANDBY_DISABLED) { @@ -7705,9 +7710,9 @@ CreateCheckPoint(int flags) checkPoint.time = (pg_time_t) time(NULL); /* - * For Hot Standby, derive the oldestActiveXid before we fix the redo pointer. - * This allows us to begin accumulating changes to assemble our starting - * snapshot of locks and transactions. + * For Hot Standby, derive the oldestActiveXid before we fix the redo + * pointer. This allows us to begin accumulating changes to assemble our + * starting snapshot of locks and transactions. */ if (!shutdown && XLogStandbyInfoActive()) checkPoint.oldestActiveXid = GetOldestActiveTransactionId(); @@ -8062,7 +8067,7 @@ RecoveryRestartPoint(const CheckPoint *checkPoint) volatile XLogCtlData *xlogctl = XLogCtl; /* - * Is it safe to restartpoint? We must ask each of the resource managers + * Is it safe to restartpoint? We must ask each of the resource managers * whether they have any partial state information that might prevent a * correct restart from this point. If so, we skip this opportunity, but * return at the next checkpoint record for another try. @@ -8082,10 +8087,11 @@ RecoveryRestartPoint(const CheckPoint *checkPoint) } /* - * Also refrain from creating a restartpoint if we have seen any references - * to non-existent pages. Restarting recovery from the restartpoint would - * not see the references, so we would lose the cross-check that the pages - * belonged to a relation that was dropped later. + * Also refrain from creating a restartpoint if we have seen any + * references to non-existent pages. Restarting recovery from the + * restartpoint would not see the references, so we would lose the + * cross-check that the pages belonged to a relation that was dropped + * later. */ if (XLogHaveInvalidPages()) { @@ -8098,8 +8104,8 @@ RecoveryRestartPoint(const CheckPoint *checkPoint) } /* - * Copy the checkpoint record to shared memory, so that checkpointer - * can work out the next time it wants to perform a restartpoint. + * Copy the checkpoint record to shared memory, so that checkpointer can + * work out the next time it wants to perform a restartpoint. */ SpinLockAcquire(&xlogctl->info_lck); XLogCtl->lastCheckPointRecPtr = ReadRecPtr; @@ -8493,8 +8499,8 @@ UpdateFullPageWrites(void) * Do nothing if full_page_writes has not been changed. * * It's safe to check the shared full_page_writes without the lock, - * because we assume that there is no concurrently running process - * which can update it. + * because we assume that there is no concurrently running process which + * can update it. */ if (fullPageWrites == Insert->fullPageWrites) return; @@ -8505,8 +8511,8 @@ UpdateFullPageWrites(void) * It's always safe to take full page images, even when not strictly * required, but not the other round. So if we're setting full_page_writes * to true, first set it true and then write the WAL record. If we're - * setting it to false, first write the WAL record and then set the - * global flag. + * setting it to false, first write the WAL record and then set the global + * flag. */ if (fullPageWrites) { @@ -8516,12 +8522,12 @@ UpdateFullPageWrites(void) } /* - * Write an XLOG_FPW_CHANGE record. This allows us to keep - * track of full_page_writes during archive recovery, if required. + * Write an XLOG_FPW_CHANGE record. This allows us to keep track of + * full_page_writes during archive recovery, if required. */ if (XLogStandbyInfoActive() && !RecoveryInProgress()) { - XLogRecData rdata; + XLogRecData rdata; rdata.data = (char *) (&fullPageWrites); rdata.len = sizeof(bool); @@ -8561,7 +8567,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) /* * We used to try to take the maximum of ShmemVariableCache->nextOid * and the recorded nextOid, but that fails if the OID counter wraps - * around. Since no OID allocation should be happening during replay + * around. Since no OID allocation should be happening during replay * anyway, better to just believe the record exactly. We still take * OidGenLock while setting the variable, just in case. */ @@ -8597,7 +8603,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) !XLogRecPtrIsInvalid(ControlFile->backupStartPoint) && XLogRecPtrIsInvalid(ControlFile->backupEndPoint)) ereport(PANIC, - (errmsg("online backup was canceled, recovery cannot continue"))); + (errmsg("online backup was canceled, recovery cannot continue"))); /* * If we see a shutdown checkpoint, we know that nothing was running @@ -8797,9 +8803,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) memcpy(&fpw, XLogRecGetData(record), sizeof(bool)); /* - * Update the LSN of the last replayed XLOG_FPW_CHANGE record - * so that do_pg_start_backup() and do_pg_stop_backup() can check - * whether full_page_writes has been disabled during online backup. + * Update the LSN of the last replayed XLOG_FPW_CHANGE record so that + * do_pg_start_backup() and do_pg_stop_backup() can check whether + * full_page_writes has been disabled during online backup. */ if (!fpw) { @@ -8825,7 +8831,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec) CheckPoint *checkpoint = (CheckPoint *) rec; appendStringInfo(buf, "checkpoint: redo %X/%X; " - "tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; " + "tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; " "oldest xid %u in DB %u; oldest running xid %u; %s", checkpoint->redo.xlogid, checkpoint->redo.xrecoff, checkpoint->ThisTimeLineID, @@ -9115,8 +9121,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) errhint("WAL control functions cannot be executed during recovery."))); /* - * During recovery, we don't need to check WAL level. Because, if WAL level - * is not sufficient, it's impossible to get here during recovery. + * During recovery, we don't need to check WAL level. Because, if WAL + * level is not sufficient, it's impossible to get here during recovery. */ if (!backup_started_in_recovery && !XLogIsNeeded()) ereport(ERROR, @@ -9179,7 +9185,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) * old timeline IDs. That would otherwise happen if you called * pg_start_backup() right after restoring from a PITR archive: the * first WAL segment containing the startup checkpoint has pages in - * the beginning with the old timeline ID. That can cause trouble at + * the beginning with the old timeline ID. That can cause trouble at * recovery: we won't have a history file covering the old timeline if * pg_xlog directory was not included in the base backup and the WAL * archive was cleared too before starting the backup. @@ -9202,17 +9208,18 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) bool checkpointfpw; /* - * Force a CHECKPOINT. Aside from being necessary to prevent torn + * Force a CHECKPOINT. Aside from being necessary to prevent torn * page problems, this guarantees that two successive backup runs * will have different checkpoint positions and hence different * history file names, even if nothing happened in between. * - * During recovery, establish a restartpoint if possible. We use the last - * restartpoint as the backup starting checkpoint. This means that two - * successive backup runs can have same checkpoint positions. + * During recovery, establish a restartpoint if possible. We use + * the last restartpoint as the backup starting checkpoint. This + * means that two successive backup runs can have same checkpoint + * positions. * - * Since the fact that we are executing do_pg_start_backup() during - * recovery means that checkpointer is running, we can use + * Since the fact that we are executing do_pg_start_backup() + * during recovery means that checkpointer is running, we can use * RequestCheckpoint() to establish a restartpoint. * * We use CHECKPOINT_IMMEDIATE only if requested by user (via @@ -9237,12 +9244,12 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - XLogRecPtr recptr; + XLogRecPtr recptr; /* - * Check to see if all WAL replayed during online backup (i.e., - * since last restartpoint used as backup starting checkpoint) - * contain full-page writes. + * Check to see if all WAL replayed during online backup + * (i.e., since last restartpoint used as backup starting + * checkpoint) contain full-page writes. */ SpinLockAcquire(&xlogctl->info_lck); recptr = xlogctl->lastFpwDisableRecPtr; @@ -9250,20 +9257,20 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) if (!checkpointfpw || XLByteLE(startpoint, recptr)) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("WAL generated with full_page_writes=off was replayed " - "since last restartpoint"), - errhint("This means that the backup being taken on standby " - "is corrupt and should not be used. " - "Enable full_page_writes and run CHECKPOINT on the master, " - "and then try an online backup again."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("WAL generated with full_page_writes=off was replayed " + "since last restartpoint"), + errhint("This means that the backup being taken on standby " + "is corrupt and should not be used. " + "Enable full_page_writes and run CHECKPOINT on the master, " + "and then try an online backup again."))); /* * During recovery, since we don't use the end-of-backup WAL - * record and don't write the backup history file, the starting WAL - * location doesn't need to be unique. This means that two base - * backups started at the same time might use the same checkpoint - * as starting locations. + * record and don't write the backup history file, the + * starting WAL location doesn't need to be unique. This means + * that two base backups started at the same time might use + * the same checkpoint as starting locations. */ gotUniqueStartpoint = true; } @@ -9443,8 +9450,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) errhint("WAL control functions cannot be executed during recovery."))); /* - * During recovery, we don't need to check WAL level. Because, if WAL level - * is not sufficient, it's impossible to get here during recovery. + * During recovery, we don't need to check WAL level. Because, if WAL + * level is not sufficient, it's impossible to get here during recovery. */ if (!backup_started_in_recovery && !XLogIsNeeded()) ereport(ERROR, @@ -9537,9 +9544,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */ /* - * Parse the BACKUP FROM line. If we are taking an online backup from - * the standby, we confirm that the standby has not been promoted - * during the backup. + * Parse the BACKUP FROM line. If we are taking an online backup from the + * standby, we confirm that the standby has not been promoted during the + * backup. */ ptr = strstr(remaining, "BACKUP FROM:"); if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1) @@ -9555,30 +9562,30 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) "Try taking another online backup."))); /* - * During recovery, we don't write an end-of-backup record. We assume - * that pg_control was backed up last and its minimum recovery - * point can be available as the backup end location. Since we don't - * have an end-of-backup record, we use the pg_control value to check - * whether we've reached the end of backup when starting recovery from - * this backup. We have no way of checking if pg_control wasn't backed - * up last however. + * During recovery, we don't write an end-of-backup record. We assume that + * pg_control was backed up last and its minimum recovery point can be + * available as the backup end location. Since we don't have an + * end-of-backup record, we use the pg_control value to check whether + * we've reached the end of backup when starting recovery from this + * backup. We have no way of checking if pg_control wasn't backed up last + * however. * * We don't force a switch to new WAL file and wait for all the required - * files to be archived. This is okay if we use the backup to start - * the standby. But, if it's for an archive recovery, to ensure all the - * required files are available, a user should wait for them to be archived, - * or include them into the backup. + * files to be archived. This is okay if we use the backup to start the + * standby. But, if it's for an archive recovery, to ensure all the + * required files are available, a user should wait for them to be + * archived, or include them into the backup. * * We return the current minimum recovery point as the backup end * location. Note that it's would be bigger than the exact backup end - * location if the minimum recovery point is updated since the backup - * of pg_control. This is harmless for current uses. + * location if the minimum recovery point is updated since the backup of + * pg_control. This is harmless for current uses. * * XXX currently a backup history file is for informational and debug * purposes only. It's not essential for an online backup. Furthermore, * even if it's created, it will not be archived during recovery because - * an archiver is not invoked. So it doesn't seem worthwhile to write - * a backup history file during recovery. + * an archiver is not invoked. So it doesn't seem worthwhile to write a + * backup history file during recovery. */ if (backup_started_in_recovery) { @@ -9597,12 +9604,12 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) if (XLByteLE(startpoint, recptr)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("WAL generated with full_page_writes=off was replayed " - "during online backup"), - errhint("This means that the backup being taken on standby " - "is corrupt and should not be used. " - "Enable full_page_writes and run CHECKPOINT on the master, " - "and then try an online backup again."))); + errmsg("WAL generated with full_page_writes=off was replayed " + "during online backup"), + errhint("This means that the backup being taken on standby " + "is corrupt and should not be used. " + "Enable full_page_writes and run CHECKPOINT on the master, " + "and then try an online backup again."))); LWLockAcquire(ControlFileLock, LW_SHARED); @@ -9905,10 +9912,11 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE))); + /* - * BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't - * restore from an older backup anyway, but since the information on it - * is not strictly required, don't error out if it's missing for some reason. + * BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't restore + * from an older backup anyway, but since the information on it is not + * strictly required, don't error out if it's missing for some reason. */ if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1) { @@ -10050,8 +10058,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt, if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg)) { /* - * Request a restartpoint if we've replayed too much - * xlog since the last one. + * Request a restartpoint if we've replayed too much xlog since the + * last one. */ if (StandbyMode && bgwriterLaunched) { diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index f286cdfc07..6ddcc59b37 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -80,10 +80,10 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, /* * Once recovery has reached a consistent state, the invalid-page table * should be empty and remain so. If a reference to an invalid page is - * found after consistency is reached, PANIC immediately. This might - * seem aggressive, but it's better than letting the invalid reference - * linger in the hash table until the end of recovery and PANIC there, - * which might come only much later if this is a standby server. + * found after consistency is reached, PANIC immediately. This might seem + * aggressive, but it's better than letting the invalid reference linger + * in the hash table until the end of recovery and PANIC there, which + * might come only much later if this is a standby server. */ if (reachedConsistency) { diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 9315e79c99..45cd0808ce 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -186,10 +186,10 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, foreach(j, grantees) { - AclItem aclitem; + AclItem aclitem; Acl *newer_acl; - aclitem. ai_grantee = lfirst_oid(j); + aclitem.ai_grantee = lfirst_oid(j); /* * Grant options can only be granted to individual roles, not PUBLIC. @@ -202,7 +202,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("grant options can only be granted to roles"))); - aclitem. ai_grantor = grantorId; + aclitem.ai_grantor = grantorId; /* * The asymmetry in the conditions here comes from the spec. In @@ -3073,7 +3073,7 @@ ExecGrant_Type(InternalGrant *istmt) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("cannot set privileges of array types"), - errhint("Set the privileges of the element type instead."))); + errhint("Set the privileges of the element type instead."))); /* Used GRANT DOMAIN on a non-domain? */ if (istmt->objtype == ACL_OBJECT_DOMAIN && @@ -4184,7 +4184,7 @@ pg_type_aclmask(Oid type_oid, Oid roleid, AclMode mask, AclMaskHow how) /* "True" array types don't manage permissions of their own */ if (typeForm->typelem != 0 && typeForm->typlen == -1) { - Oid elttype_oid = typeForm->typelem; + Oid elttype_oid = typeForm->typelem; ReleaseSysCache(tuple); diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index db6769cb90..d4e1f76f31 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -173,7 +173,7 @@ static void reportDependentObjects(const ObjectAddresses *targetObjects, int msglevel, const ObjectAddress *origObject); static void deleteOneObject(const ObjectAddress *object, - Relation depRel, int32 flags); + Relation depRel, int32 flags); static void doDeletion(const ObjectAddress *object, int flags); static void AcquireDeletionLock(const ObjectAddress *object, int flags); static void ReleaseDeletionLock(const ObjectAddress *object); @@ -352,7 +352,8 @@ performMultipleDeletions(const ObjectAddresses *objects, free_object_addresses(targetObjects); /* - * We closed depRel earlier in deleteOneObject if doing a drop concurrently + * We closed depRel earlier in deleteOneObject if doing a drop + * concurrently */ if ((flags & PERFORM_DELETION_CONCURRENTLY) != PERFORM_DELETION_CONCURRENTLY) heap_close(depRel, RowExclusiveLock); @@ -424,7 +425,7 @@ deleteWhatDependsOn(const ObjectAddress *object, * Since this function is currently only used to clean out temporary * schemas, we pass PERFORM_DELETION_INTERNAL here, indicating that * the operation is an automatic system operation rather than a user - * action. If, in the future, this function is used for other + * action. If, in the future, this function is used for other * purposes, we might need to revisit this. */ deleteOneObject(thisobj, depRel, PERFORM_DELETION_INTERNAL); @@ -514,12 +515,12 @@ findDependentObjects(const ObjectAddress *object, /* * The target object might be internally dependent on some other object * (its "owner"), and/or be a member of an extension (also considered its - * owner). If so, and if we aren't recursing from the owning object, we + * owner). If so, and if we aren't recursing from the owning object, we * have to transform this deletion request into a deletion request of the * owning object. (We'll eventually recurse back to this object, but the - * owning object has to be visited first so it will be deleted after.) - * The way to find out about this is to scan the pg_depend entries that - * show what this object depends on. + * owning object has to be visited first so it will be deleted after.) The + * way to find out about this is to scan the pg_depend entries that show + * what this object depends on. */ ScanKeyInit(&key[0], Anum_pg_depend_classid, @@ -577,7 +578,7 @@ findDependentObjects(const ObjectAddress *object, /* * Exception 1a: if the owning object is listed in * pendingObjects, just release the caller's lock and - * return. We'll eventually complete the DROP when we + * return. We'll eventually complete the DROP when we * reach that entry in the pending list. */ if (pendingObjects && @@ -593,8 +594,8 @@ findDependentObjects(const ObjectAddress *object, * Exception 1b: if the owning object is the extension * currently being created/altered, it's okay to continue * with the deletion. This allows dropping of an - * extension's objects within the extension's scripts, - * as well as corner cases such as dropping a transient + * extension's objects within the extension's scripts, as + * well as corner cases such as dropping a transient * object created within such a script. */ if (creating_extension && @@ -618,8 +619,8 @@ findDependentObjects(const ObjectAddress *object, * it's okay to continue with the deletion. This holds when * recursing from a whole object that includes the nominal * other end as a component, too. Since there can be more - * than one "owning" object, we have to allow matches that - * are more than one level down in the stack. + * than one "owning" object, we have to allow matches that are + * more than one level down in the stack. */ if (stack_address_present_add_flags(&otherObject, 0, stack)) break; @@ -630,7 +631,7 @@ findDependentObjects(const ObjectAddress *object, * owning object. * * First, release caller's lock on this object and get - * deletion lock on the owning object. (We must release + * deletion lock on the owning object. (We must release * caller's lock to avoid deadlock against a concurrent * deletion of the owning object.) */ @@ -999,7 +1000,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags) /* DROP hook of the objects being removed */ if (object_access_hook) { - ObjectAccessDrop drop_arg; + ObjectAccessDrop drop_arg; + drop_arg.dropflags = flags; InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId, object->objectSubId, &drop_arg); @@ -1049,8 +1051,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags) object->objectSubId); /* - * Close depRel if we are doing a drop concurrently because it - * commits the transaction, so we don't want dangling references. + * Close depRel if we are doing a drop concurrently because it commits the + * transaction, so we don't want dangling references. */ if ((flags & PERFORM_DELETION_CONCURRENTLY) == PERFORM_DELETION_CONCURRENTLY) heap_close(depRel, RowExclusiveLock); @@ -1093,8 +1095,8 @@ doDeletion(const ObjectAddress *object, int flags) if (relKind == RELKIND_INDEX) { - bool concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY) - == PERFORM_DELETION_CONCURRENTLY); + bool concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY) + == PERFORM_DELETION_CONCURRENTLY); Assert(object->objectSubId == 0); index_drop(object->objectId, concurrent); diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 8fc69ae720..49e7644699 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -1957,7 +1957,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr, ccsrc, /* Source form of check constraint */ is_local, /* conislocal */ inhcount, /* coninhcount */ - is_no_inherit); /* connoinherit */ + is_no_inherit); /* connoinherit */ pfree(ccbin); pfree(ccsrc); @@ -1998,7 +1998,7 @@ StoreConstraints(Relation rel, List *cooked_constraints) break; case CONSTR_CHECK: StoreRelCheck(rel, con->name, con->expr, !con->skip_validation, - con->is_local, con->inhcount, con->is_no_inherit); + con->is_local, con->inhcount, con->is_no_inherit); numchecks++; break; default: @@ -2345,8 +2345,8 @@ MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr, } /* OK to update the tuple */ ereport(NOTICE, - (errmsg("merging constraint \"%s\" with inherited definition", - ccname))); + (errmsg("merging constraint \"%s\" with inherited definition", + ccname))); simple_heap_update(conDesc, &tup->t_self, tup); CatalogUpdateIndexes(conDesc, tup); break; diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 998379c8af..9e8b1cc49b 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -1155,7 +1155,7 @@ index_constraint_create(Relation heapRelation, NULL, NULL, true, /* islocal */ - 0, /* inhcount */ + 0, /* inhcount */ false); /* noinherit */ /* @@ -1324,8 +1324,8 @@ index_drop(Oid indexId, bool concurrent) CheckTableNotInUse(userIndexRelation, "DROP INDEX"); /* - * Drop Index concurrently is similar in many ways to creating an - * index concurrently, so some actions are similar to DefineIndex() + * Drop Index concurrently is similar in many ways to creating an index + * concurrently, so some actions are similar to DefineIndex() */ if (concurrent) { @@ -1339,7 +1339,7 @@ index_drop(Oid indexId, bool concurrent) indexRelation = heap_open(IndexRelationId, RowExclusiveLock); tuple = SearchSysCacheCopy1(INDEXRELID, - ObjectIdGetDatum(indexId)); + ObjectIdGetDatum(indexId)); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for index %u", indexId); indexForm = (Form_pg_index) GETSTRUCT(tuple); @@ -1373,15 +1373,15 @@ index_drop(Oid indexId, bool concurrent) * will be marked not indisvalid, so that no one else tries to either * insert into it or use it for queries. * - * We must commit our current transaction so that the index update becomes - * visible; then start another. Note that all the data structures we just - * built are lost in the commit. The only data we keep past here are the - * relation IDs. + * We must commit our current transaction so that the index update + * becomes visible; then start another. Note that all the data + * structures we just built are lost in the commit. The only data we + * keep past here are the relation IDs. * * Before committing, get a session-level lock on the table, to ensure * that neither it nor the index can be dropped before we finish. This - * cannot block, even if someone else is waiting for access, because we - * already have the same lock within our transaction. + * cannot block, even if someone else is waiting for access, because + * we already have the same lock within our transaction. */ LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock); LockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock); @@ -1391,23 +1391,23 @@ index_drop(Oid indexId, bool concurrent) StartTransactionCommand(); /* - * Now we must wait until no running transaction could have the table open - * with the old list of indexes. To do this, inquire which xacts - * currently would conflict with AccessExclusiveLock on the table -- ie, - * which ones have a lock of any kind on the table. Then wait for each of - * these xacts to commit or abort. Note we do not need to worry about - * xacts that open the table for writing after this point; they will see - * the index as invalid when they open the relation. + * Now we must wait until no running transaction could have the table + * open with the old list of indexes. To do this, inquire which xacts + * currently would conflict with AccessExclusiveLock on the table -- + * ie, which ones have a lock of any kind on the table. Then wait for + * each of these xacts to commit or abort. Note we do not need to + * worry about xacts that open the table for writing after this point; + * they will see the index as invalid when they open the relation. * - * Note: the reason we use actual lock acquisition here, rather than just - * checking the ProcArray and sleeping, is that deadlock is possible if - * one of the transactions in question is blocked trying to acquire an - * exclusive lock on our table. The lock code will detect deadlock and - * error out properly. + * Note: the reason we use actual lock acquisition here, rather than + * just checking the ProcArray and sleeping, is that deadlock is + * possible if one of the transactions in question is blocked trying + * to acquire an exclusive lock on our table. The lock code will + * detect deadlock and error out properly. * - * Note: GetLockConflicts() never reports our own xid, hence we need not - * check for that. Also, prepared xacts are not reported, which is fine - * since they certainly aren't going to do anything more. + * Note: GetLockConflicts() never reports our own xid, hence we need + * not check for that. Also, prepared xacts are not reported, which + * is fine since they certainly aren't going to do anything more. */ old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock); @@ -1786,7 +1786,7 @@ index_update_stats(Relation rel, if (rd_rel->relkind != RELKIND_INDEX) relallvisible = visibilitymap_count(rel); - else /* don't bother for indexes */ + else /* don't bother for indexes */ relallvisible = 0; if (rd_rel->relpages != (int32) relpages) diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index e92efd863e..1b6bb3bb6d 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -226,7 +226,7 @@ Datum pg_is_other_temp_schema(PG_FUNCTION_ARGS); Oid RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, bool missing_ok, bool nowait, - RangeVarGetRelidCallback callback, void *callback_arg) + RangeVarGetRelidCallback callback, void *callback_arg) { uint64 inval_count; Oid relId; @@ -247,20 +247,20 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, } /* - * DDL operations can change the results of a name lookup. Since all - * such operations will generate invalidation messages, we keep track - * of whether any such messages show up while we're performing the - * operation, and retry until either (1) no more invalidation messages - * show up or (2) the answer doesn't change. + * DDL operations can change the results of a name lookup. Since all such + * operations will generate invalidation messages, we keep track of + * whether any such messages show up while we're performing the operation, + * and retry until either (1) no more invalidation messages show up or (2) + * the answer doesn't change. * * But if lockmode = NoLock, then we assume that either the caller is OK * with the answer changing under them, or that they already hold some * appropriate lock, and therefore return the first answer we get without - * checking for invalidation messages. Also, if the requested lock is + * checking for invalidation messages. Also, if the requested lock is * already held, no LockRelationOid will not AcceptInvalidationMessages, * so we may fail to notice a change. We could protect against that case - * by calling AcceptInvalidationMessages() before beginning this loop, - * but that would add a significant amount overhead, so for now we don't. + * by calling AcceptInvalidationMessages() before beginning this loop, but + * that would add a significant amount overhead, so for now we don't. */ for (;;) { @@ -282,17 +282,18 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, if (relation->relpersistence == RELPERSISTENCE_TEMP) { if (!OidIsValid(myTempNamespace)) - relId = InvalidOid; /* this probably can't happen? */ + relId = InvalidOid; /* this probably can't happen? */ else { if (relation->schemaname) { - Oid namespaceId; + Oid namespaceId; + namespaceId = LookupExplicitNamespace(relation->schemaname); if (namespaceId != myTempNamespace) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("temporary tables cannot specify a schema name"))); + errmsg("temporary tables cannot specify a schema name"))); } relId = get_relname_relid(relation->relname, myTempNamespace); @@ -315,12 +316,12 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, /* * Invoke caller-supplied callback, if any. * - * This callback is a good place to check permissions: we haven't taken - * the table lock yet (and it's really best to check permissions before - * locking anything!), but we've gotten far enough to know what OID we - * think we should lock. Of course, concurrent DDL might change things - * while we're waiting for the lock, but in that case the callback will - * be invoked again for the new OID. + * This callback is a good place to check permissions: we haven't + * taken the table lock yet (and it's really best to check permissions + * before locking anything!), but we've gotten far enough to know what + * OID we think we should lock. Of course, concurrent DDL might + * change things while we're waiting for the lock, but in that case + * the callback will be invoked again for the new OID. */ if (callback) callback(relation, relId, oldRelId, callback_arg); @@ -328,21 +329,21 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, /* * If no lock requested, we assume the caller knows what they're * doing. They should have already acquired a heavyweight lock on - * this relation earlier in the processing of this same statement, - * so it wouldn't be appropriate to AcceptInvalidationMessages() - * here, as that might pull the rug out from under them. + * this relation earlier in the processing of this same statement, so + * it wouldn't be appropriate to AcceptInvalidationMessages() here, as + * that might pull the rug out from under them. */ if (lockmode == NoLock) break; /* - * If, upon retry, we get back the same OID we did last time, then - * the invalidation messages we processed did not change the final - * answer. So we're done. + * If, upon retry, we get back the same OID we did last time, then the + * invalidation messages we processed did not change the final answer. + * So we're done. * * If we got a different OID, we've locked the relation that used to - * have this name rather than the one that does now. So release - * the lock. + * have this name rather than the one that does now. So release the + * lock. */ if (retry) { @@ -384,8 +385,8 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, break; /* - * Something may have changed. Let's repeat the name lookup, to - * make sure this name still references the same relation it did + * Something may have changed. Let's repeat the name lookup, to make + * sure this name still references the same relation it did * previously. */ retry = true; @@ -550,8 +551,8 @@ RangeVarGetAndCheckCreationNamespace(RangeVar *relation, relid = InvalidOid; /* - * In bootstrap processing mode, we don't bother with permissions - * or locking. Permissions might not be working yet, and locking is + * In bootstrap processing mode, we don't bother with permissions or + * locking. Permissions might not be working yet, and locking is * unnecessary. */ if (IsBootstrapProcessingMode()) diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index d133f64776..5a06fcbf41 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -75,10 +75,10 @@ */ typedef struct { - Oid class_oid; /* oid of catalog */ - Oid oid_index_oid; /* oid of index on system oid column */ - int oid_catcache_id; /* id of catcache on system oid column */ - AttrNumber attnum_namespace; /* attnum of namespace field */ + Oid class_oid; /* oid of catalog */ + Oid oid_index_oid; /* oid of index on system oid column */ + int oid_catcache_id; /* id of catcache on system oid column */ + AttrNumber attnum_namespace; /* attnum of namespace field */ } ObjectPropertyType; static ObjectPropertyType ObjectProperty[] = @@ -286,13 +286,13 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, for (;;) { /* - * Remember this value, so that, after looking up the object name - * and locking it, we can check whether any invalidation messages - * have been processed that might require a do-over. + * Remember this value, so that, after looking up the object name and + * locking it, we can check whether any invalidation messages have + * been processed that might require a do-over. */ inval_count = SharedInvalidMessageCounter; - /* Look up object address. */ + /* Look up object address. */ switch (objtype) { case OBJECT_INDEX: @@ -367,7 +367,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, case OBJECT_OPCLASS: case OBJECT_OPFAMILY: address = get_object_address_opcf(objtype, - objname, objargs, missing_ok); + objname, objargs, missing_ok); break; case OBJECT_LARGEOBJECT: Assert(list_length(objname) == 1); @@ -377,10 +377,10 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, if (!LargeObjectExists(address.objectId)) { if (!missing_ok) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("large object %u does not exist", - address.objectId))); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("large object %u does not exist", + address.objectId))); } break; case OBJECT_CAST: @@ -475,8 +475,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, * At this point, we've resolved the name to an OID and locked the * corresponding database object. However, it's possible that by the * time we acquire the lock on the object, concurrent DDL has modified - * the database in such a way that the name we originally looked up - * no longer resolves to that OID. + * the database in such a way that the name we originally looked up no + * longer resolves to that OID. * * We can be certain that this isn't an issue if (a) no shared * invalidation messages have been processed or (b) we've locked a @@ -488,12 +488,12 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, * the relation, which is enough to freeze out any concurrent DDL. * * In all other cases, however, it's possible that the name we looked - * up no longer refers to the object we locked, so we retry the - * lookup and see whether we get the same answer. + * up no longer refers to the object we locked, so we retry the lookup + * and see whether we get the same answer. */ - if (inval_count == SharedInvalidMessageCounter || relation != NULL) - break; - old_address = address; + if (inval_count == SharedInvalidMessageCounter || relation != NULL) + break; + old_address = address; } /* Return the object address and the relation. */ @@ -621,7 +621,7 @@ get_relation_by_qualified_name(ObjectType objtype, List *objname, bool missing_ok) { Relation relation; - ObjectAddress address; + ObjectAddress address; address.classId = RelationRelationId; address.objectId = InvalidOid; @@ -721,8 +721,8 @@ get_object_address_relobject(ObjectType objtype, List *objname, address.objectSubId = 0; /* - * Caller is expecting to get back the relation, even though we - * didn't end up using it to find the rule. + * Caller is expecting to get back the relation, even though we didn't + * end up using it to find the rule. */ if (OidIsValid(address.objectId)) relation = heap_open(reloid, AccessShareLock); @@ -768,7 +768,7 @@ get_object_address_relobject(ObjectType objtype, List *objname, if (!OidIsValid(address.objectId)) { heap_close(relation, AccessShareLock); - relation = NULL; /* department of accident prevention */ + relation = NULL; /* department of accident prevention */ return address; } } @@ -834,9 +834,10 @@ static ObjectAddress get_object_address_type(ObjectType objtype, List *objname, bool missing_ok) { - ObjectAddress address; + ObjectAddress address; TypeName *typename; - Type tup; + Type tup; + typename = makeTypeNameFromNameList(objname); address.classId = TypeRelationId; @@ -1083,7 +1084,7 @@ get_object_namespace(const ObjectAddress *address) HeapTuple tuple; bool isnull; Oid oid; - ObjectPropertyType *property; + ObjectPropertyType *property; /* If not owned by a namespace, just return InvalidOid. */ property = get_object_property_data(address->classId); @@ -1122,5 +1123,5 @@ get_object_property_data(Oid class_id) return &ObjectProperty[index]; elog(ERROR, "unrecognized class id: %u", class_id); - return NULL; /* not reached */ + return NULL; /* not reached */ } diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index dca5d09ee6..224859d76e 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -831,8 +831,8 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok) if (OidIsValid(conOid)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("domain \"%s\" has multiple constraints named \"%s\"", - format_type_be(typid), conname))); + errmsg("domain \"%s\" has multiple constraints named \"%s\"", + format_type_be(typid), conname))); conOid = HeapTupleGetOid(tuple); } } diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index 843f03d2c3..8e58435606 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -150,7 +150,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object, /* Only need to check for existing membership if isReplace */ if (isReplace) { - Oid oldext; + Oid oldext; oldext = getExtensionOfObject(object->classId, object->objectId); if (OidIsValid(oldext)) diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index ae71b93917..599f04242f 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -228,7 +228,7 @@ ProcedureCreate(const char *procedureName, /* * Do not allow polymorphic return type unless at least one input argument - * is polymorphic. ANYRANGE return type is even stricter: must have an + * is polymorphic. ANYRANGE return type is even stricter: must have an * ANYRANGE input (since we can't deduce the specific range type from * ANYELEMENT). Also, do not allow return type INTERNAL unless at least * one input argument is INTERNAL. diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index a67aebbdb6..1edf950c56 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -1287,7 +1287,7 @@ shdepReassignOwned(List *roleids, Oid newrole) ereport(ERROR, (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("cannot reassign ownership of objects owned by %s because they are required by the database system", - getObjectDescription(&obj)))); + getObjectDescription(&obj)))); /* * There's no need to tell the whole truth, which is that we diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 97ca95b6c8..993bc49c2a 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -500,8 +500,8 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record) /* * Forcibly create relation if it doesn't exist (which suggests that * it was dropped somewhere later in the WAL sequence). As in - * XLogReadBuffer, we prefer to recreate the rel and replay the log - * as best we can until the drop is seen. + * XLogReadBuffer, we prefer to recreate the rel and replay the log as + * best we can until the drop is seen. */ smgrcreate(reln, MAIN_FORKNUM, true); diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 225ea866bf..9612a276f3 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -96,11 +96,11 @@ static void compute_index_stats(Relation onerel, double totalrows, MemoryContext col_context); static VacAttrStats *examine_attribute(Relation onerel, int attnum, Node *index_expr); -static int acquire_sample_rows(Relation onerel, int elevel, +static int acquire_sample_rows(Relation onerel, int elevel, HeapTuple *rows, int targrows, double *totalrows, double *totaldeadrows); static int compare_rows(const void *a, const void *b); -static int acquire_inherited_sample_rows(Relation onerel, int elevel, +static int acquire_inherited_sample_rows(Relation onerel, int elevel, HeapTuple *rows, int targrows, double *totalrows, double *totaldeadrows); static void update_attstats(Oid relid, bool inh, @@ -118,7 +118,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy) Relation onerel; int elevel; AcquireSampleRowsFunc acquirefunc = NULL; - BlockNumber relpages = 0; + BlockNumber relpages = 0; /* Select logging level */ if (vacstmt->options & VACOPT_VERBOSE) @@ -205,8 +205,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy) } /* - * Check that it's a plain table or foreign table; we used to do this - * in get_rel_oids() but seems safer to check after we've locked the + * Check that it's a plain table or foreign table; we used to do this in + * get_rel_oids() but seems safer to check after we've locked the * relation. */ if (onerel->rd_rel->relkind == RELKIND_RELATION) @@ -235,8 +235,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy) if (!ok) { ereport(WARNING, - (errmsg("skipping \"%s\" --- cannot analyze this foreign table", - RelationGetRelationName(onerel)))); + (errmsg("skipping \"%s\" --- cannot analyze this foreign table", + RelationGetRelationName(onerel)))); relation_close(onerel, ShareUpdateExclusiveLock); return; } @@ -464,8 +464,8 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, /* * Determine how many rows we need to sample, using the worst case from * all analyzable columns. We use a lower bound of 100 rows to avoid - * possible overflow in Vitter's algorithm. (Note: that will also be - * the target in the corner case where there are no analyzable columns.) + * possible overflow in Vitter's algorithm. (Note: that will also be the + * target in the corner case where there are no analyzable columns.) */ targrows = 100; for (i = 0; i < attr_cnt; i++) @@ -1337,7 +1337,7 @@ anl_get_next_S(double t, int n, double *stateptr) double V, quot; - V = anl_random_fract(); /* Generate V */ + V = anl_random_fract(); /* Generate V */ S = 0; t += 1; /* Note: "num" in Vitter's code is always equal to t - n */ @@ -1398,7 +1398,7 @@ anl_get_next_S(double t, int n, double *stateptr) y *= numer / denom; denom -= 1; } - W = exp(-log(anl_random_fract()) / n); /* Generate W in advance */ + W = exp(-log(anl_random_fract()) / n); /* Generate W in advance */ if (exp(log(y) / n) <= (t + X) / t) break; } diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 349d13034e..a72b0ad5ff 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -594,10 +594,10 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace) OldHeapDesc = RelationGetDescr(OldHeap); /* - * Note that the NewHeap will not - * receive any of the defaults or constraints associated with the OldHeap; - * we don't need 'em, and there's no reason to spend cycles inserting them - * into the catalogs only to delete them. + * Note that the NewHeap will not receive any of the defaults or + * constraints associated with the OldHeap; we don't need 'em, and there's + * no reason to spend cycles inserting them into the catalogs only to + * delete them. */ /* diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 1d1eacd3fb..98bcb2fcf3 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -150,7 +150,7 @@ typedef struct CopyStateData Oid *typioparams; /* array of element types for in_functions */ int *defmap; /* array of default att numbers */ ExprState **defexprs; /* array of default att expressions */ - bool volatile_defexprs; /* is any of defexprs volatile? */ + bool volatile_defexprs; /* is any of defexprs volatile? */ /* * These variables are used to reduce overhead in textual COPY FROM. @@ -566,11 +566,11 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread) if (mtype == EOF) ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("unexpected EOF on client connection with an open transaction"))); + errmsg("unexpected EOF on client connection with an open transaction"))); if (pq_getmessage(cstate->fe_msgbuf, 0)) ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("unexpected EOF on client connection with an open transaction"))); + errmsg("unexpected EOF on client connection with an open transaction"))); switch (mtype) { case 'd': /* CopyData */ @@ -1861,6 +1861,7 @@ CopyFrom(CopyState cstate) uint64 processed = 0; bool useHeapMultiInsert; int nBufferedTuples = 0; + #define MAX_BUFFERED_TUPLES 1000 HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */ Size bufferedTuplesSize = 0; @@ -1968,8 +1969,8 @@ CopyFrom(CopyState cstate) * processed and prepared for insertion are not there. */ if ((resultRelInfo->ri_TrigDesc != NULL && - (resultRelInfo->ri_TrigDesc->trig_insert_before_row || - resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) || + (resultRelInfo->ri_TrigDesc->trig_insert_before_row || + resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) || cstate->volatile_defexprs) { useHeapMultiInsert = false; @@ -2162,8 +2163,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, int i; /* - * heap_multi_insert leaks memory, so switch to short-lived memory - * context before calling it. + * heap_multi_insert leaks memory, so switch to short-lived memory context + * before calling it. */ oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); heap_multi_insert(cstate->rel, @@ -2175,14 +2176,14 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, MemoryContextSwitchTo(oldcontext); /* - * If there are any indexes, update them for all the inserted tuples, - * and run AFTER ROW INSERT triggers. + * If there are any indexes, update them for all the inserted tuples, and + * run AFTER ROW INSERT triggers. */ if (resultRelInfo->ri_NumIndices > 0) { for (i = 0; i < nBufferedTuples; i++) { - List *recheckIndexes; + List *recheckIndexes; ExecStoreTuple(bufferedTuples[i], myslot, InvalidBuffer, false); recheckIndexes = @@ -2194,6 +2195,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, list_free(recheckIndexes); } } + /* * There's no indexes, but see if we need to run AFTER ROW INSERT triggers * anyway. diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index 5173f5a308..dc0665e2a4 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -62,12 +62,12 @@ void ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, ParamListInfo params, char *completionTag) { - Query *query = (Query *) stmt->query; + Query *query = (Query *) stmt->query; IntoClause *into = stmt->into; DestReceiver *dest; - List *rewritten; + List *rewritten; PlannedStmt *plan; - QueryDesc *queryDesc; + QueryDesc *queryDesc; ScanDirection dir; /* @@ -98,9 +98,9 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, * plancache.c. * * Because the rewriter and planner tend to scribble on the input, we make - * a preliminary copy of the source querytree. This prevents problems in + * a preliminary copy of the source querytree. This prevents problems in * the case that CTAS is in a portal or plpgsql function and is executed - * repeatedly. (See also the same hack in EXPLAIN and PREPARE.) + * repeatedly. (See also the same hack in EXPLAIN and PREPARE.) */ rewritten = QueryRewrite((Query *) copyObject(stmt->query)); @@ -115,10 +115,10 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, /* * Use a snapshot with an updated command ID to ensure this query sees - * results of any previously executed queries. (This could only matter - * if the planner executed an allegedly-stable function that changed - * the database contents, but let's do it anyway to be parallel to the - * EXPLAIN code path.) + * results of any previously executed queries. (This could only matter if + * the planner executed an allegedly-stable function that changed the + * database contents, but let's do it anyway to be parallel to the EXPLAIN + * code path.) */ PushCopiedSnapshot(GetActiveSnapshot()); UpdateActiveSnapshotCommandId(); @@ -211,12 +211,12 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) DR_intorel *myState = (DR_intorel *) self; IntoClause *into = myState->into; CreateStmt *create; - Oid intoRelationId; - Relation intoRelationDesc; + Oid intoRelationId; + Relation intoRelationDesc; RangeTblEntry *rte; Datum toast_options; - ListCell *lc; - int attnum; + ListCell *lc; + int attnum; static char *validnsps[] = HEAP_RELOPT_NAMESPACES; Assert(into != NULL); /* else somebody forgot to set it */ @@ -237,8 +237,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) create->if_not_exists = false; /* - * Build column definitions using "pre-cooked" type and collation info. - * If a column name list was specified in CREATE TABLE AS, override the + * Build column definitions using "pre-cooked" type and collation info. If + * a column name list was specified in CREATE TABLE AS, override the * column names derived from the query. (Too few column names are OK, too * many are not.) */ @@ -246,8 +246,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) for (attnum = 0; attnum < typeinfo->natts; attnum++) { Form_pg_attribute attribute = typeinfo->attrs[attnum]; - ColumnDef *col = makeNode(ColumnDef); - TypeName *coltype = makeNode(TypeName); + ColumnDef *col = makeNode(ColumnDef); + TypeName *coltype = makeNode(TypeName); if (lc) { @@ -280,9 +280,9 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) /* * It's possible that the column is of a collatable type but the - * collation could not be resolved, so double-check. (We must - * check this here because DefineRelation would adopt the type's - * default collation rather than complaining.) + * collation could not be resolved, so double-check. (We must check + * this here because DefineRelation would adopt the type's default + * collation rather than complaining.) */ if (!OidIsValid(col->collOid) && type_is_collatable(coltype->typeOid)) @@ -297,8 +297,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) if (lc != NULL) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("CREATE TABLE AS specifies too many column names"))); + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("CREATE TABLE AS specifies too many column names"))); /* * Actually create the target table @@ -342,7 +342,7 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) for (attnum = 1; attnum <= intoRelationDesc->rd_att->natts; attnum++) rte->modifiedCols = bms_add_member(rte->modifiedCols, - attnum - FirstLowInvalidHeapAttributeNumber); + attnum - FirstLowInvalidHeapAttributeNumber); ExecCheckRTPerms(list_make1(rte), true); diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 90155b9c14..b7224bde87 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -695,8 +695,8 @@ check_encoding_locale_matches(int encoding, const char *collate, const char *cty errmsg("encoding \"%s\" does not match locale \"%s\"", pg_encoding_to_char(encoding), ctype), - errdetail("The chosen LC_CTYPE setting requires encoding \"%s\".", - pg_encoding_to_char(ctype_encoding)))); + errdetail("The chosen LC_CTYPE setting requires encoding \"%s\".", + pg_encoding_to_char(ctype_encoding)))); if (!(collate_encoding == encoding || collate_encoding == PG_SQL_ASCII || @@ -710,8 +710,8 @@ check_encoding_locale_matches(int encoding, const char *collate, const char *cty errmsg("encoding \"%s\" does not match locale \"%s\"", pg_encoding_to_char(encoding), collate), - errdetail("The chosen LC_COLLATE setting requires encoding \"%s\".", - pg_encoding_to_char(collate_encoding)))); + errdetail("The chosen LC_COLLATE setting requires encoding \"%s\".", + pg_encoding_to_char(collate_encoding)))); } /* Error cleanup callback for createdb */ @@ -784,7 +784,8 @@ dropdb(const char *dbname, bool missing_ok) /* DROP hook for the database being removed */ if (object_access_hook) { - ObjectAccessDrop drop_arg; + ObjectAccessDrop drop_arg; + memset(&drop_arg, 0, sizeof(ObjectAccessDrop)); InvokeObjectAccessHook(OAT_DROP, DatabaseRelationId, db_id, 0, &drop_arg); @@ -831,8 +832,7 @@ dropdb(const char *dbname, bool missing_ok) ReleaseSysCache(tup); /* - * Delete any comments or security labels associated with - * the database. + * Delete any comments or security labels associated with the database. */ DeleteSharedComments(db_id, DatabaseRelationId); DeleteSharedSecurityLabel(db_id, DatabaseRelationId); @@ -860,18 +860,18 @@ dropdb(const char *dbname, bool missing_ok) pgstat_drop_database(db_id); /* - * Tell checkpointer to forget any pending fsync and unlink requests for files - * in the database; else the fsyncs will fail at next checkpoint, or + * Tell checkpointer to forget any pending fsync and unlink requests for + * files in the database; else the fsyncs will fail at next checkpoint, or * worse, it will delete files that belong to a newly created database * with the same OID. */ ForgetDatabaseFsyncRequests(db_id); /* - * Force a checkpoint to make sure the checkpointer has received the message - * sent by ForgetDatabaseFsyncRequests. On Windows, this also ensures that - * background procs don't hold any open files, which would cause rmdir() to - * fail. + * Force a checkpoint to make sure the checkpointer has received the + * message sent by ForgetDatabaseFsyncRequests. On Windows, this also + * ensures that background procs don't hold any open files, which would + * cause rmdir() to fail. */ RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT); diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c index 298940c7c4..1b8529ed84 100644 --- a/src/backend/commands/dropcmds.c +++ b/src/backend/commands/dropcmds.c @@ -30,7 +30,7 @@ #include "utils/syscache.h" static void does_not_exist_skipping(ObjectType objtype, - List *objname, List *objargs); + List *objname, List *objargs); /* * Drop one or more objects. @@ -54,7 +54,7 @@ RemoveObjects(DropStmt *stmt) foreach(cell1, stmt->objects) { - ObjectAddress address; + ObjectAddress address; List *objname = lfirst(cell1); List *objargs = NIL; Relation relation = NULL; @@ -97,8 +97,8 @@ RemoveObjects(DropStmt *stmt) if (((Form_pg_proc) GETSTRUCT(tup))->proisagg) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is an aggregate function", - NameListToString(objname)), + errmsg("\"%s\" is an aggregate function", + NameListToString(objname)), errhint("Use DROP AGGREGATE to drop aggregate functions."))); ReleaseSysCache(tup); @@ -149,7 +149,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs) break; case OBJECT_CONVERSION: msg = gettext_noop("conversion \"%s\" does not exist, skipping"); - name = NameListToString(objname); + name = NameListToString(objname); break; case OBJECT_SCHEMA: msg = gettext_noop("schema \"%s\" does not exist, skipping"); @@ -196,9 +196,9 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs) case OBJECT_CAST: msg = gettext_noop("cast from type %s to type %s does not exist, skipping"); name = format_type_be(typenameTypeId(NULL, - (TypeName *) linitial(objname))); + (TypeName *) linitial(objname))); args = format_type_be(typenameTypeId(NULL, - (TypeName *) linitial(objargs))); + (TypeName *) linitial(objargs))); break; case OBJECT_TRIGGER: msg = gettext_noop("trigger \"%s\" for table \"%s\" does not exist, skipping"); @@ -231,7 +231,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs) args = strVal(linitial(objargs)); break; default: - elog(ERROR, "unexpected object type (%d)", (int)objtype); + elog(ERROR, "unexpected object type (%d)", (int) objtype); break; } diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index e2b4b994b4..1e8f618a34 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -117,7 +117,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString, TupOutputState *tstate; List *rewritten; ListCell *lc; - bool timing_set = false; + bool timing_set = false; /* Initialize ExplainState. */ ExplainInitState(&es); @@ -169,7 +169,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString, ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("EXPLAIN option BUFFERS requires ANALYZE"))); - + /* if the timing was not set explicitly, set default value */ es.timing = (timing_set) ? es.timing : es.analyze; @@ -340,9 +340,9 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es, if (IsA(utilityStmt, CreateTableAsStmt)) { /* - * We have to rewrite the contained SELECT and then pass it back - * to ExplainOneQuery. It's probably not really necessary to copy - * the contained parsetree another time, but let's be safe. + * We have to rewrite the contained SELECT and then pass it back to + * ExplainOneQuery. It's probably not really necessary to copy the + * contained parsetree another time, but let's be safe. */ CreateTableAsStmt *ctas = (CreateTableAsStmt *) utilityStmt; List *rewritten; @@ -1021,7 +1021,7 @@ ExplainNode(PlanState *planstate, List *ancestors, { if (planstate->instrument->need_timer) appendStringInfo(es->str, - " (actual time=%.3f..%.3f rows=%.0f loops=%.0f)", + " (actual time=%.3f..%.3f rows=%.0f loops=%.0f)", startup_sec, total_sec, rows, nloops); else appendStringInfo(es->str, @@ -1095,7 +1095,7 @@ ExplainNode(PlanState *planstate, List *ancestors, planstate, es); if (es->analyze) ExplainPropertyLong("Heap Fetches", - ((IndexOnlyScanState *) planstate)->ioss_HeapFetches, es); + ((IndexOnlyScanState *) planstate)->ioss_HeapFetches, es); break; case T_BitmapIndexScan: show_scan_qual(((BitmapIndexScan *) plan)->indexqualorig, @@ -1237,7 +1237,7 @@ ExplainNode(PlanState *planstate, List *ancestors, bool has_temp = (usage->temp_blks_read > 0 || usage->temp_blks_written > 0); bool has_timing = (!INSTR_TIME_IS_ZERO(usage->blk_read_time) || - !INSTR_TIME_IS_ZERO(usage->blk_write_time)); + !INSTR_TIME_IS_ZERO(usage->blk_write_time)); /* Show only positive counter values. */ if (has_shared || has_local || has_temp) @@ -1301,10 +1301,10 @@ ExplainNode(PlanState *planstate, List *ancestors, appendStringInfoString(es->str, "I/O Timings:"); if (!INSTR_TIME_IS_ZERO(usage->blk_read_time)) appendStringInfo(es->str, " read=%0.3f", - INSTR_TIME_GET_MILLISEC(usage->blk_read_time)); + INSTR_TIME_GET_MILLISEC(usage->blk_read_time)); if (!INSTR_TIME_IS_ZERO(usage->blk_write_time)) appendStringInfo(es->str, " write=%0.3f", - INSTR_TIME_GET_MILLISEC(usage->blk_write_time)); + INSTR_TIME_GET_MILLISEC(usage->blk_write_time)); appendStringInfoChar(es->str, '\n'); } } diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 732791cc41..cde3d60ee8 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -899,8 +899,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, { t_sql = DirectFunctionCall3(replace_text, t_sql, - CStringGetTextDatum("MODULE_PATHNAME"), - CStringGetTextDatum(control->module_pathname)); + CStringGetTextDatum("MODULE_PATHNAME"), + CStringGetTextDatum(control->module_pathname)); } /* And now back to C string */ @@ -1585,14 +1585,14 @@ RemoveExtensionById(Oid extId) * might write "DROP EXTENSION foo" in foo's own script files, as because * errors in dependency management in extension script files could give * rise to cases where an extension is dropped as a result of recursing - * from some contained object. Because of that, we must test for the case + * from some contained object. Because of that, we must test for the case * here, not at some higher level of the DROP EXTENSION command. */ if (extId == CurrentExtensionObject) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot drop extension \"%s\" because it is being modified", - get_extension_name(extId)))); + errmsg("cannot drop extension \"%s\" because it is being modified", + get_extension_name(extId)))); rel = heap_open(ExtensionRelationId, RowExclusiveLock); diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index 30135e6de8..342ecc2931 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -166,7 +166,7 @@ transformGenericOptions(Oid catalogId, if (OidIsValid(fdwvalidator)) { - Datum valarg = result; + Datum valarg = result; /* * Pass a null options list as an empty array, so that validators @@ -215,13 +215,13 @@ RenameForeignDataWrapper(const char *oldname, const char *newname) if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("foreign-data wrapper \"%s\" does not exist", oldname))); + errmsg("foreign-data wrapper \"%s\" does not exist", oldname))); /* make sure the new name doesn't exist */ if (SearchSysCacheExists1(FOREIGNDATAWRAPPERNAME, CStringGetDatum(newname))) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("foreign-data wrapper \"%s\" already exists", newname))); + errmsg("foreign-data wrapper \"%s\" already exists", newname))); /* must be owner of FDW */ if (!pg_foreign_data_wrapper_ownercheck(HeapTupleGetOid(tup), GetUserId())) @@ -364,7 +364,7 @@ AlterForeignDataWrapperOwner_oid(Oid fwdId, Oid newOwnerId) if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("foreign-data wrapper with OID %u does not exist", fwdId))); + errmsg("foreign-data wrapper with OID %u does not exist", fwdId))); AlterForeignDataWrapperOwner_internal(rel, tup, newOwnerId); diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index ff0836c141..13e30f4a55 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -890,9 +890,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString) ReleaseSysCache(languageTuple); /* - * Only superuser is allowed to create leakproof functions because - * it possibly allows unprivileged users to reference invisible tuples - * to be filtered out using views for row-level security. + * Only superuser is allowed to create leakproof functions because it + * possibly allows unprivileged users to reference invisible tuples to be + * filtered out using views for row-level security. */ if (isLeakProof && !superuser()) ereport(ERROR, @@ -1320,7 +1320,7 @@ AlterFunction(AlterFunctionStmt *stmt) if (intVal(leakproof_item->arg) && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("only superuser can define a leakproof function"))); + errmsg("only superuser can define a leakproof function"))); procForm->proleakproof = intVal(leakproof_item->arg); } if (cost_item) diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 6c909298b7..a68d500e5b 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -96,7 +96,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation, * concrete benefit for core types. * When a comparison or exclusion operator has a polymorphic input type, the - * actual input types must also match. This defends against the possibility + * actual input types must also match. This defends against the possibility * that operators could vary behavior in response to get_fn_expr_argtype(). * At present, this hazard is theoretical: check_exclusion_constraint() and * all core index access methods decline to set fn_expr for such calls. @@ -134,6 +134,7 @@ CheckIndexCompatible(Oid oldId, /* Caller should already have the relation locked in some way. */ relationId = RangeVarGetRelid(heapRelation, NoLock, false); + /* * We can pretend isconstraint = false unconditionally. It only serves to * decide the text of an error message that should never happen for us. @@ -157,10 +158,10 @@ CheckIndexCompatible(Oid oldId, ReleaseSysCache(tuple); /* - * Compute the operator classes, collations, and exclusion operators - * for the new index, so we can test whether it's compatible with the - * existing one. Note that ComputeIndexAttrs might fail here, but that's - * OK: DefineIndex would have called this function with the same arguments + * Compute the operator classes, collations, and exclusion operators for + * the new index, so we can test whether it's compatible with the existing + * one. Note that ComputeIndexAttrs might fail here, but that's OK: + * DefineIndex would have called this function with the same arguments * later on, and it would have failed then anyway. */ indexInfo = makeNode(IndexInfo); @@ -218,11 +219,11 @@ CheckIndexCompatible(Oid oldId, return false; /* For polymorphic opcintype, column type changes break compatibility. */ - irel = index_open(oldId, AccessShareLock); /* caller probably has a lock */ + irel = index_open(oldId, AccessShareLock); /* caller probably has a lock */ for (i = 0; i < old_natts; i++) { if (IsPolymorphicType(get_opclass_input_type(classObjectId[i])) && - irel->rd_att->attrs[i]->atttypid != typeObjectId[i]) + irel->rd_att->attrs[i]->atttypid != typeObjectId[i]) { ret = false; break; @@ -232,7 +233,8 @@ CheckIndexCompatible(Oid oldId, /* Any change in exclusion operator selections breaks compatibility. */ if (ret && indexInfo->ii_ExclusionOps != NULL) { - Oid *old_operators, *old_procs; + Oid *old_operators, + *old_procs; uint16 *old_strats; RelationGetExclusionInfo(irel, &old_operators, &old_procs, &old_strats); @@ -249,7 +251,7 @@ CheckIndexCompatible(Oid oldId, op_input_types(indexInfo->ii_ExclusionOps[i], &left, &right); if ((IsPolymorphicType(left) || IsPolymorphicType(right)) && - irel->rd_att->attrs[i]->atttypid != typeObjectId[i]) + irel->rd_att->attrs[i]->atttypid != typeObjectId[i]) { ret = false; break; @@ -1778,9 +1780,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, return; /* - * If the relation does exist, check whether it's an index. But note - * that the relation might have been dropped between the time we did the - * name lookup and now. In that case, there's nothing to do. + * If the relation does exist, check whether it's an index. But note that + * the relation might have been dropped between the time we did the name + * lookup and now. In that case, there's nothing to do. */ relkind = get_rel_relkind(relId); if (!relkind) @@ -1798,9 +1800,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, if (relId != oldRelId) { /* - * Lock level here should match reindex_index() heap lock. - * If the OID isn't valid, it means the index as concurrently dropped, - * which is not a problem for us; just return normally. + * Lock level here should match reindex_index() heap lock. If the OID + * isn't valid, it means the index as concurrently dropped, which is + * not a problem for us; just return normally. */ *heapOid = IndexGetRelation(relId, true); if (OidIsValid(*heapOid)) diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c index fd3dcc3643..ab13a45900 100644 --- a/src/backend/commands/lockcmds.c +++ b/src/backend/commands/lockcmds.c @@ -40,9 +40,9 @@ LockTableCommand(LockStmt *lockstmt) /* * During recovery we only accept these variations: LOCK TABLE foo IN - * ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo - * IN ROW EXCLUSIVE MODE This test must match the restrictions defined - * in LockAcquire() + * ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo IN + * ROW EXCLUSIVE MODE This test must match the restrictions defined in + * LockAcquire() */ if (lockstmt->mode > RowExclusiveLock) PreventCommandDuringRecovery("LOCK TABLE"); @@ -74,15 +74,16 @@ static void RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid, void *arg) { - LOCKMODE lockmode = * (LOCKMODE *) arg; + LOCKMODE lockmode = *(LOCKMODE *) arg; char relkind; AclResult aclresult; if (!OidIsValid(relid)) - return; /* doesn't exist, so no permissions check */ + return; /* doesn't exist, so no permissions check */ relkind = get_rel_relkind(relid); if (!relkind) - return; /* woops, concurrently dropped; no permissions check */ + return; /* woops, concurrently dropped; no permissions + * check */ /* Currently, we only allow plain tables to be locked */ if (relkind != RELKIND_RELATION) @@ -122,9 +123,10 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait) if (aclresult != ACLCHECK_OK) { char *relname = get_rel_name(childreloid); + if (!relname) - continue; /* child concurrently dropped, just skip it */ - aclcheck_error(aclresult, ACL_KIND_CLASS, relname); + continue; /* child concurrently dropped, just skip it */ + aclcheck_error(aclresult, ACL_KIND_CLASS, relname); } /* We have enough rights to lock the relation; do so. */ @@ -134,17 +136,18 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait) { /* try to throw error by name; relation could be deleted... */ char *relname = get_rel_name(childreloid); + if (!relname) - continue; /* child concurrently dropped, just skip it */ + continue; /* child concurrently dropped, just skip it */ ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("could not obtain lock on relation \"%s\"", - relname))); + relname))); } /* - * Even if we got the lock, child might have been concurrently dropped. - * If so, we can skip it. + * Even if we got the lock, child might have been concurrently + * dropped. If so, we can skip it. */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(childreloid))) { diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 87c889604e..460b1d9ae2 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -1167,7 +1167,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) if (procform->prorettype != INT4OID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree comparison procedures must return integer"))); + errmsg("btree comparison procedures must return integer"))); /* * If lefttype/righttype isn't specified, use the proc's input @@ -1188,7 +1188,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) if (procform->prorettype != VOIDOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree sort support procedures must return void"))); + errmsg("btree sort support procedures must return void"))); /* * Can't infer lefttype/righttype from proc, so use default rule @@ -1217,7 +1217,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) /* * The default in CREATE OPERATOR CLASS is to use the class' opcintype as - * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype + * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype * isn't available, so make the user specify the types. */ if (!OidIsValid(member->lefttype)) diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index edd646e7c3..2d87b1c690 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -174,7 +174,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString) * ExecuteQuery --- implement the 'EXECUTE' utility statement. * * This code also supports CREATE TABLE ... AS EXECUTE. That case is - * indicated by passing a non-null intoClause. The DestReceiver is already + * indicated by passing a non-null intoClause. The DestReceiver is already * set up correctly for CREATE TABLE AS, but we still have to make a few * other adjustments here. * @@ -211,7 +211,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause, { /* * Need an EState to evaluate parameters; must not delete it till end - * of query, in case parameters are pass-by-reference. Note that the + * of query, in case parameters are pass-by-reference. Note that the * passed-in "params" could possibly be referenced in the parameter * expressions. */ @@ -237,15 +237,15 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause, /* * For CREATE TABLE ... AS EXECUTE, we must verify that the prepared * statement is one that produces tuples. Currently we insist that it be - * a plain old SELECT. In future we might consider supporting other + * a plain old SELECT. In future we might consider supporting other * things such as INSERT ... RETURNING, but there are a couple of issues * to be settled first, notably how WITH NO DATA should be handled in such * a case (do we really want to suppress execution?) and how to pass down * the OID-determining eflags (PortalStart won't handle them in such a * case, and for that matter it's not clear the executor will either). * - * For CREATE TABLE ... AS EXECUTE, we also have to ensure that the - * proper eflags and fetch count are passed to PortalStart/PortalRun. + * For CREATE TABLE ... AS EXECUTE, we also have to ensure that the proper + * eflags and fetch count are passed to PortalStart/PortalRun. */ if (intoClause) { @@ -658,7 +658,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, { /* * Need an EState to evaluate parameters; must not delete it till end - * of query, in case parameters are pass-by-reference. Note that the + * of query, in case parameters are pass-by-reference. Note that the * passed-in "params" could possibly be referenced in the parameter * expressions. */ diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 5d2e7dc195..354389c617 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -133,7 +133,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) false, /* isAgg */ false, /* isWindowFunc */ false, /* security_definer */ - false, /* isLeakProof */ + false, /* isLeakProof */ false, /* isStrict */ PROVOLATILE_VOLATILE, buildoidvector(funcargtypes, 0), @@ -210,7 +210,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) false, /* isAgg */ false, /* isWindowFunc */ false, /* security_definer */ - false, /* isLeakProof */ + false, /* isLeakProof */ true, /* isStrict */ PROVOLATILE_VOLATILE, buildoidvector(funcargtypes, 1), diff --git a/src/backend/commands/seclabel.c b/src/backend/commands/seclabel.c index 2129f62e51..c09a96e9f6 100644 --- a/src/backend/commands/seclabel.c +++ b/src/backend/commands/seclabel.c @@ -237,7 +237,7 @@ GetSecurityLabel(const ObjectAddress *object, const char *provider) return seclabel; } -/* +/* * SetSharedSecurityLabel is a helper function of SetSecurityLabel to * handle shared database objects. */ @@ -246,8 +246,8 @@ SetSharedSecurityLabel(const ObjectAddress *object, const char *provider, const char *label) { Relation pg_shseclabel; - ScanKeyData keys[4]; - SysScanDesc scan; + ScanKeyData keys[4]; + SysScanDesc scan; HeapTuple oldtup; HeapTuple newtup = NULL; Datum values[Natts_pg_shseclabel]; @@ -414,8 +414,8 @@ void DeleteSharedSecurityLabel(Oid objectId, Oid classId) { Relation pg_shseclabel; - ScanKeyData skey[2]; - SysScanDesc scan; + ScanKeyData skey[2]; + SysScanDesc scan; HeapTuple oldtup; ScanKeyInit(&skey[0], diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 718658995e..34b74f6c38 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -430,7 +430,7 @@ AlterSequence(AlterSeqStmt *stmt) { ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", - stmt->sequence->relname))); + stmt->sequence->relname))); return; } @@ -514,12 +514,12 @@ nextval(PG_FUNCTION_ARGS) sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin)); /* - * XXX: This is not safe in the presence of concurrent DDL, but - * acquiring a lock here is more expensive than letting nextval_internal - * do it, since the latter maintains a cache that keeps us from hitting - * the lock manager more than once per transaction. It's not clear - * whether the performance penalty is material in practice, but for now, - * we do it this way. + * XXX: This is not safe in the presence of concurrent DDL, but acquiring + * a lock here is more expensive than letting nextval_internal do it, + * since the latter maintains a cache that keeps us from hitting the lock + * manager more than once per transaction. It's not clear whether the + * performance penalty is material in practice, but for now, we do it this + * way. */ relid = RangeVarGetRelid(sequence, NoLock, false); @@ -1543,9 +1543,9 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record) * is also used for updating sequences, it's possible that a hot-standby * backend is examining the page concurrently; so we mustn't transiently * trash the buffer. The solution is to build the correct new page - * contents in local workspace and then memcpy into the buffer. Then - * only bytes that are supposed to change will change, even transiently. - * We must palloc the local page for alignment reasons. + * contents in local workspace and then memcpy into the buffer. Then only + * bytes that are supposed to change will change, even transiently. We + * must palloc the local page for alignment reasons. */ localpage = (Page) palloc(BufferGetPageSize(buffer)); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 6148bd62da..5c69cfb85a 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -237,9 +237,9 @@ static const struct dropmsgstrings dropmsgstringarray[] = { struct DropRelationCallbackState { - char relkind; - Oid heapOid; - bool concurrent; + char relkind; + Oid heapOid; + bool concurrent; }; /* Alter table target-type flags for ATSimplePermissions */ @@ -372,8 +372,8 @@ static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename, LOCKMODE lockmode); static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode); static void ATExecSetRelOptions(Relation rel, List *defList, - AlterTableType operation, - LOCKMODE lockmode); + AlterTableType operation, + LOCKMODE lockmode); static void ATExecEnableDisableTrigger(Relation rel, char *trigname, char fires_when, bool skip_system, LOCKMODE lockmode); static void ATExecEnableDisableRule(Relation rel, char *rulename, @@ -752,7 +752,7 @@ RemoveRelations(DropStmt *drop) if (drop->behavior == DROP_CASCADE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DROP INDEX CONCURRENTLY does not support CASCADE"))); + errmsg("DROP INDEX CONCURRENTLY does not support CASCADE"))); } /* @@ -799,7 +799,7 @@ RemoveRelations(DropStmt *drop) RangeVar *rel = makeRangeVarFromNameList((List *) lfirst(cell)); Oid relOid; ObjectAddress obj; - struct DropRelationCallbackState state; + struct DropRelationCallbackState state; /* * These next few steps are a great deal like relation_openrv, but we @@ -914,9 +914,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid, /* * In DROP INDEX, attempt to acquire lock on the parent table before * locking the index. index_drop() will need this anyway, and since - * regular queries lock tables before their indexes, we risk deadlock - * if we do it the other way around. No error if we don't find a - * pg_index entry, though --- the relation may have been droppd. + * regular queries lock tables before their indexes, we risk deadlock if + * we do it the other way around. No error if we don't find a pg_index + * entry, though --- the relation may have been droppd. */ if (relkind == RELKIND_INDEX && relOid != oldRelOid) { @@ -2322,12 +2322,12 @@ static void RangeVarCallbackForRenameAttribute(const RangeVar *rv, Oid relid, Oid oldrelid, void *arg) { - HeapTuple tuple; - Form_pg_class form; + HeapTuple tuple; + Form_pg_class form; tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tuple)) - return; /* concurrently dropped */ + return; /* concurrently dropped */ form = (Form_pg_class) GETSTRUCT(tuple); renameatt_check(relid, form, false); ReleaseSysCache(tuple); @@ -2351,7 +2351,7 @@ renameatt(RenameStmt *stmt) { ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", - stmt->relation->relname))); + stmt->relation->relname))); return; } @@ -2379,7 +2379,7 @@ rename_constraint_internal(Oid myrelid, { Relation targetrelation = NULL; Oid constraintOid; - HeapTuple tuple; + HeapTuple tuple; Form_pg_constraint con; AssertArg(!myrelid || !mytypid); @@ -2391,7 +2391,11 @@ rename_constraint_internal(Oid myrelid, else { targetrelation = relation_open(myrelid, AccessExclusiveLock); - /* don't tell it whether we're recursing; we allow changing typed tables here */ + + /* + * don't tell it whether we're recursing; we allow changing typed + * tables here + */ renameatt_check(myrelid, RelationGetForm(targetrelation), false); constraintOid = get_relation_constraint_oid(myrelid, oldconname, false); @@ -2408,9 +2412,9 @@ rename_constraint_internal(Oid myrelid, if (recurse) { List *child_oids, - *child_numparents; + *child_numparents; ListCell *lo, - *li; + *li; child_oids = find_all_inheritors(myrelid, AccessExclusiveLock, &child_numparents); @@ -2455,7 +2459,7 @@ rename_constraint_internal(Oid myrelid, ReleaseSysCache(tuple); if (targetrelation) - relation_close(targetrelation, NoLock); /* close rel but keep lock */ + relation_close(targetrelation, NoLock); /* close rel but keep lock */ } void @@ -2469,7 +2473,7 @@ RenameConstraint(RenameStmt *stmt) Relation rel; HeapTuple tup; - typid = typenameTypeId(NULL, makeTypeNameFromNameList(stmt->object)); + typid = typenameTypeId(NULL, makeTypeNameFromNameList(stmt->object)); rel = heap_open(TypeRelationId, RowExclusiveLock); tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); if (!HeapTupleIsValid(tup)) @@ -2490,9 +2494,9 @@ RenameConstraint(RenameStmt *stmt) rename_constraint_internal(relid, typid, stmt->subname, stmt->newname, - stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */ + stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */ false, /* recursing? */ - 0 /* expected inhcount */); + 0 /* expected inhcount */ ); } /* @@ -2507,8 +2511,8 @@ RenameRelation(RenameStmt *stmt) * Grab an exclusive lock on the target table, index, sequence or view, * which we will NOT release until end of transaction. * - * Lock level used here should match RenameRelationInternal, to avoid - * lock escalation. + * Lock level used here should match RenameRelationInternal, to avoid lock + * escalation. */ relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, stmt->missing_ok, false, @@ -2519,7 +2523,7 @@ RenameRelation(RenameStmt *stmt) { ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", - stmt->relation->relname))); + stmt->relation->relname))); return; } @@ -2702,11 +2706,11 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode) * Thanks to the magic of MVCC, an error anywhere along the way rolls back * the whole operation; we don't have to do anything special to clean up. * - * The caller must lock the relation, with an appropriate lock level + * The caller must lock the relation, with an appropriate lock level * for the subcommands requested. Any subcommand that needs to rewrite * tuples in the table forces the whole command to be executed with * AccessExclusiveLock (actually, that is currently required always, but - * we hope to relax it at some point). We pass the lock level down + * we hope to relax it at some point). We pass the lock level down * so that we can apply it recursively to inherited tables. Note that the * lock level we want as we recurse might well be higher than required for * that specific subcommand. So we pass down the overall lock requirement, @@ -2773,22 +2777,22 @@ LOCKMODE AlterTableGetLockLevel(List *cmds) { /* - * Late in 9.1 dev cycle a number of issues were uncovered with access - * to catalog relations, leading to the decision to re-enforce all DDL - * at AccessExclusiveLock level by default. + * Late in 9.1 dev cycle a number of issues were uncovered with access to + * catalog relations, leading to the decision to re-enforce all DDL at + * AccessExclusiveLock level by default. * * The issues are that there is a pervasive assumption in the code that - * the catalogs will not be read unless an AccessExclusiveLock is held. - * If that rule is relaxed, we must protect against a number of potential + * the catalogs will not be read unless an AccessExclusiveLock is held. If + * that rule is relaxed, we must protect against a number of potential * effects - infrequent, but proven possible with test cases where * multiple DDL operations occur in a stream against frequently accessed * tables. * - * 1. Catalog tables are read using SnapshotNow, which has a race bug - * that allows a scan to return no valid rows even when one is present - * in the case of a commit of a concurrent update of the catalog table. - * SnapshotNow also ignores transactions in progress, so takes the - * latest committed version without waiting for the latest changes. + * 1. Catalog tables are read using SnapshotNow, which has a race bug that + * allows a scan to return no valid rows even when one is present in the + * case of a commit of a concurrent update of the catalog table. + * SnapshotNow also ignores transactions in progress, so takes the latest + * committed version without waiting for the latest changes. * * 2. Relcache needs to be internally consistent, so unless we lock the * definition during reads we have no way to guarantee that. @@ -3156,8 +3160,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, pass = AT_PASS_MISC; /* doesn't actually matter */ break; case AT_SetRelOptions: /* SET (...) */ - case AT_ResetRelOptions: /* RESET (...) */ - case AT_ReplaceRelOptions: /* reset them all, then set just these */ + case AT_ResetRelOptions: /* RESET (...) */ + case AT_ReplaceRelOptions: /* reset them all, then set just these */ ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX | ATT_VIEW); /* This command never recurses */ /* No command-specific prep needed */ @@ -3344,8 +3348,8 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, case AT_ValidateConstraint: /* VALIDATE CONSTRAINT */ ATExecValidateConstraint(rel, cmd->name, false, false, lockmode); break; - case AT_ValidateConstraintRecurse: /* VALIDATE CONSTRAINT with - * recursion */ + case AT_ValidateConstraintRecurse: /* VALIDATE CONSTRAINT with + * recursion */ ATExecValidateConstraint(rel, cmd->name, true, false, lockmode); break; case AT_DropConstraint: /* DROP CONSTRAINT */ @@ -3361,7 +3365,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, case AT_AlterColumnType: /* ALTER COLUMN TYPE */ ATExecAlterColumnType(tab, rel, cmd, lockmode); break; - case AT_AlterColumnGenericOptions: /* ALTER COLUMN OPTIONS */ + case AT_AlterColumnGenericOptions: /* ALTER COLUMN OPTIONS */ ATExecAlterColumnGenericOptions(rel, cmd->name, (List *) cmd->def, lockmode); break; case AT_ChangeOwner: /* ALTER OWNER */ @@ -4725,7 +4729,7 @@ static void check_for_column_name_collision(Relation rel, const char *colname) { HeapTuple attTuple; - int attnum; + int attnum; /* * this test is deliberately not attisdropped-aware, since if one tries to @@ -4737,7 +4741,7 @@ check_for_column_name_collision(Relation rel, const char *colname) if (!HeapTupleIsValid(attTuple)) return; - attnum = ((Form_pg_attribute) GETSTRUCT(attTuple))->attnum; + attnum = ((Form_pg_attribute) GETSTRUCT(attTuple))->attnum; ReleaseSysCache(attTuple); /* @@ -4745,16 +4749,16 @@ check_for_column_name_collision(Relation rel, const char *colname) * names, since they are normally not shown and the user might otherwise * be confused about the reason for the conflict. */ - if (attnum <= 0) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column name \"%s\" conflicts with a system column name", - colname))); - else - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" of relation \"%s\" already exists", - colname, RelationGetRelationName(rel)))); + if (attnum <= 0) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_COLUMN), + errmsg("column name \"%s\" conflicts with a system column name", + colname))); + else + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_COLUMN), + errmsg("column \"%s\" of relation \"%s\" already exists", + colname, RelationGetRelationName(rel)))); } /* @@ -4999,8 +5003,8 @@ ATExecColumnDefault(Relation rel, const char *colName, * safety, but at present we do not expect anything to depend on the * default. * - * We treat removing the existing default as an internal operation when - * it is preparatory to adding a new default, but as a user-initiated + * We treat removing the existing default as an internal operation when it + * is preparatory to adding a new default, but as a user-initiated * operation when the user asked for a drop. */ RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, false, @@ -5507,13 +5511,14 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel, /* * If TryReuseIndex() stashed a relfilenode for us, we used it for the new - * index instead of building from scratch. The DROP of the old edition of + * index instead of building from scratch. The DROP of the old edition of * this index will have scheduled the storage for deletion at commit, so * cancel that pending deletion. */ if (OidIsValid(stmt->oldNode)) { Relation irel = index_open(new_index, NoLock); + RelationPreserveStorage(irel->rd_node, true); index_close(irel, NoLock); } @@ -5687,8 +5692,8 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, */ newcons = AddRelationNewConstraints(rel, NIL, list_make1(copyObject(constr)), - recursing, /* allow_merge */ - !recursing); /* is_local */ + recursing, /* allow_merge */ + !recursing); /* is_local */ /* Add each to-be-validated constraint to Phase 3's queue */ foreach(lcon, newcons) @@ -5743,7 +5748,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, /* * Check if ONLY was specified with ALTER TABLE. If so, allow the - * contraint creation only if there are no children currently. Error out + * contraint creation only if there are no children currently. Error out * otherwise. */ if (!recurse && children != NIL) @@ -6064,11 +6069,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, /* * Upon a change to the cast from the FK column to its pfeqop - * operand, revalidate the constraint. For this evaluation, a + * operand, revalidate the constraint. For this evaluation, a * binary coercion cast is equivalent to no cast at all. While * type implementors should design implicit casts with an eye - * toward consistency of operations like equality, we cannot assume - * here that they have done so. + * toward consistency of operations like equality, we cannot + * assume here that they have done so. * * A function with a polymorphic argument could change behavior * arbitrarily in response to get_fn_expr_argtype(). Therefore, @@ -6082,7 +6087,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * Necessarily, the primary key column must then be of the domain * type. Since the constraint was previously valid, all values on * the foreign side necessarily exist on the primary side and in - * turn conform to the domain. Consequently, we need not treat + * turn conform to the domain. Consequently, we need not treat * domains specially here. * * Since we require that all collations share the same notion of @@ -6091,8 +6096,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * * We need not directly consider the PK type. It's necessarily * binary coercible to the opcintype of the unique index column, - * and ri_triggers.c will only deal with PK datums in terms of that - * opcintype. Changing the opcintype also changes pfeqop. + * and ri_triggers.c will only deal with PK datums in terms of + * that opcintype. Changing the opcintype also changes pfeqop. */ old_check_ok = (new_pathtype == old_pathtype && new_castfunc == old_castfunc && @@ -6144,11 +6149,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, createForeignKeyTriggers(rel, fkconstraint, constrOid, indexOid); /* - * Tell Phase 3 to check that the constraint is satisfied by existing rows. - * We can skip this during table creation, when requested explicitly by - * specifying NOT VALID in an ADD FOREIGN KEY command, and when we're - * recreating a constraint following a SET DATA TYPE operation that did not - * impugn its validity. + * Tell Phase 3 to check that the constraint is satisfied by existing + * rows. We can skip this during table creation, when requested explicitly + * by specifying NOT VALID in an ADD FOREIGN KEY command, and when we're + * recreating a constraint following a SET DATA TYPE operation that did + * not impugn its validity. */ if (!old_check_ok && !fkconstraint->skip_validation) { @@ -6236,12 +6241,12 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse, Relation refrel; /* - * Triggers are already in place on both tables, so a concurrent write - * that alters the result here is not possible. Normally we can run a - * query here to do the validation, which would only require - * AccessShareLock. In some cases, it is possible that we might need - * to fire triggers to perform the check, so we take a lock at - * RowShareLock level just in case. + * Triggers are already in place on both tables, so a concurrent + * write that alters the result here is not possible. Normally we + * can run a query here to do the validation, which would only + * require AccessShareLock. In some cases, it is possible that we + * might need to fire triggers to perform the check, so we take a + * lock at RowShareLock level just in case. */ refrel = heap_open(con->confrelid, RowShareLock); @@ -6278,7 +6283,7 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse, */ foreach(child, children) { - Oid childoid = lfirst_oid(child); + Oid childoid = lfirst_oid(child); Relation childrel; if (childoid == RelationGetRelid(rel)) @@ -6662,27 +6667,28 @@ checkFkeyPermissions(Relation rel, int16 *attnums, int natts) static void validateCheckConstraint(Relation rel, HeapTuple constrtup) { - EState *estate; - Datum val; - char *conbin; - Expr *origexpr; - List *exprstate; - TupleDesc tupdesc; - HeapScanDesc scan; - HeapTuple tuple; - ExprContext *econtext; - MemoryContext oldcxt; + EState *estate; + Datum val; + char *conbin; + Expr *origexpr; + List *exprstate; + TupleDesc tupdesc; + HeapScanDesc scan; + HeapTuple tuple; + ExprContext *econtext; + MemoryContext oldcxt; TupleTableSlot *slot; Form_pg_constraint constrForm; - bool isnull; + bool isnull; constrForm = (Form_pg_constraint) GETSTRUCT(constrtup); estate = CreateExecutorState(); + /* * XXX this tuple doesn't really come from a syscache, but this doesn't - * matter to SysCacheGetAttr, because it only wants to be able to fetch the - * tupdesc + * matter to SysCacheGetAttr, because it only wants to be able to fetch + * the tupdesc */ val = SysCacheGetAttr(CONSTROID, constrtup, Anum_pg_constraint_conbin, &isnull); @@ -7132,7 +7138,7 @@ ATExecDropConstraint(Relation rel, const char *constrName, con = (Form_pg_constraint) GETSTRUCT(copy_tuple); - if (con->coninhcount <= 0) /* shouldn't happen */ + if (con->coninhcount <= 0) /* shouldn't happen */ elog(ERROR, "relation %u has non-inherited constraint \"%s\"", childrelid, constrName); @@ -7140,8 +7146,7 @@ ATExecDropConstraint(Relation rel, const char *constrName, { /* * If the child constraint has other definition sources, just - * decrement its inheritance count; if not, recurse to delete - * it. + * decrement its inheritance count; if not, recurse to delete it. */ if (con->coninhcount == 1 && !con->conislocal) { @@ -7164,9 +7169,9 @@ ATExecDropConstraint(Relation rel, const char *constrName, else { /* - * If we were told to drop ONLY in this table (no recursion), - * we need to mark the inheritors' constraints as locally - * defined rather than inherited. + * If we were told to drop ONLY in this table (no recursion), we + * need to mark the inheritors' constraints as locally defined + * rather than inherited. */ con->coninhcount--; con->conislocal = true; @@ -7315,8 +7320,8 @@ ATPrepAlterColumnType(List **wqueue, if (transform == NULL) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" cannot be cast automatically to type %s", - colName, format_type_be(targettype)), + errmsg("column \"%s\" cannot be cast automatically to type %s", + colName, format_type_be(targettype)), errhint("Specify a USING expression to perform the conversion."))); /* Fix collations after all else */ @@ -7483,8 +7488,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, if (defaultexpr == NULL) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("default for column \"%s\" cannot be cast automatically to type %s", - colName, format_type_be(targettype)))); + errmsg("default for column \"%s\" cannot be cast automatically to type %s", + colName, format_type_be(targettype)))); } else defaultexpr = NULL; @@ -8060,7 +8065,8 @@ TryReuseIndex(Oid oldId, IndexStmt *stmt) stmt->indexParams, stmt->excludeOpNames)) { - Relation irel = index_open(oldId, NoLock); + Relation irel = index_open(oldId, NoLock); + stmt->oldNode = irel->rd_node.relNode; index_close(irel, NoLock); } @@ -8085,7 +8091,7 @@ TryReuseForeignKey(Oid oldId, Constraint *con) int i; Assert(con->contype == CONSTR_FOREIGN); - Assert(con->old_conpfeqop == NIL); /* already prepared this node */ + Assert(con->old_conpfeqop == NIL); /* already prepared this node */ tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(oldId)); if (!HeapTupleIsValid(tup)) /* should not happen */ @@ -8587,8 +8593,8 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, /* Generate new proposed reloptions (text array) */ newOptions = transformRelOptions(isnull ? (Datum) 0 : datum, - defList, NULL, validnsps, false, - operation == AT_ResetRelOptions); + defList, NULL, validnsps, false, + operation == AT_ResetRelOptions); /* Validate */ switch (rel->rd_rel->relkind) @@ -8665,8 +8671,8 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, } newOptions = transformRelOptions(isnull ? (Datum) 0 : datum, - defList, "toast", validnsps, false, - operation == AT_ResetRelOptions); + defList, "toast", validnsps, false, + operation == AT_ResetRelOptions); (void) heap_reloptions(RELKIND_TOASTVALUE, newOptions, true); @@ -9831,7 +9837,7 @@ AlterTableNamespace(AlterObjectSchemaStmt *stmt) { ereport(NOTICE, (errmsg("relation \"%s\" does not exist, skipping", - stmt->relation->relname))); + stmt->relation->relname))); return; } @@ -9848,10 +9854,10 @@ AlterTableNamespace(AlterObjectSchemaStmt *stmt) if (sequenceIsOwned(relid, &tableId, &colId)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move an owned sequence into another schema"), - errdetail("Sequence \"%s\" is linked to table \"%s\".", - RelationGetRelationName(rel), - get_rel_name(tableId)))); + errmsg("cannot move an owned sequence into another schema"), + errdetail("Sequence \"%s\" is linked to table \"%s\".", + RelationGetRelationName(rel), + get_rel_name(tableId)))); } /* Get and lock schema OID and check its permissions. */ @@ -10267,9 +10273,9 @@ RangeVarCallbackOwnsTable(const RangeVar *relation, return; /* - * If the relation does exist, check whether it's an index. But note - * that the relation might have been dropped between the time we did the - * name lookup and now. In that case, there's nothing to do. + * If the relation does exist, check whether it's an index. But note that + * the relation might have been dropped between the time we did the name + * lookup and now. In that case, there's nothing to do. */ relkind = get_rel_relkind(relId); if (!relkind) @@ -10292,16 +10298,16 @@ static void RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, void *arg) { - Node *stmt = (Node *) arg; - ObjectType reltype; - HeapTuple tuple; - Form_pg_class classform; - AclResult aclresult; - char relkind; + Node *stmt = (Node *) arg; + ObjectType reltype; + HeapTuple tuple; + Form_pg_class classform; + AclResult aclresult; + char relkind; tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tuple)) - return; /* concurrently dropped */ + return; /* concurrently dropped */ classform = (Form_pg_class) GETSTRUCT(tuple); relkind = classform->relkind; @@ -10324,7 +10330,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, */ if (IsA(stmt, RenameStmt)) { - aclresult = pg_namespace_aclcheck(classform->relnamespace, + aclresult = pg_namespace_aclcheck(classform->relnamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_NAMESPACE, @@ -10333,20 +10339,21 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, } else if (IsA(stmt, AlterObjectSchemaStmt)) reltype = ((AlterObjectSchemaStmt *) stmt)->objectType; + else if (IsA(stmt, AlterTableStmt)) reltype = ((AlterTableStmt *) stmt)->relkind; else { - reltype = OBJECT_TABLE; /* placate compiler */ + reltype = OBJECT_TABLE; /* placate compiler */ elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt)); } /* - * For compatibility with prior releases, we allow ALTER TABLE to be - * used with most other types of relations (but not composite types). - * We allow similar flexibility for ALTER INDEX in the case of RENAME, - * but not otherwise. Otherwise, the user must select the correct form - * of the command for the relation at issue. + * For compatibility with prior releases, we allow ALTER TABLE to be used + * with most other types of relations (but not composite types). We allow + * similar flexibility for ALTER INDEX in the case of RENAME, but not + * otherwise. Otherwise, the user must select the correct form of the + * command for the relation at issue. */ if (reltype == OBJECT_SEQUENCE && relkind != RELKIND_SEQUENCE) ereport(ERROR, @@ -10391,10 +10398,10 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, errhint("Use ALTER FOREIGN TABLE instead."))); /* - * Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be - * moved to a different schema, such as indexes and TOAST tables. + * Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be moved + * to a different schema, such as indexes and TOAST tables. */ - if (IsA(stmt, AlterObjectSchemaStmt) && relkind != RELKIND_RELATION + if (IsA(stmt, AlterObjectSchemaStmt) &&relkind != RELKIND_RELATION && relkind != RELKIND_VIEW && relkind != RELKIND_SEQUENCE && relkind != RELKIND_FOREIGN_TABLE) ereport(ERROR, diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 708bebb54d..da9cb2f30e 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -437,7 +437,8 @@ DropTableSpace(DropTableSpaceStmt *stmt) /* DROP hook for the tablespace being removed */ if (object_access_hook) { - ObjectAccessDrop drop_arg; + ObjectAccessDrop drop_arg; + memset(&drop_arg, 0, sizeof(ObjectAccessDrop)); InvokeObjectAccessHook(OAT_DROP, TableSpaceRelationId, tablespaceoid, 0, &drop_arg); @@ -638,7 +639,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) * Attempt to remove filesystem infrastructure for the tablespace. * * 'redo' indicates we are redoing a drop from XLOG; in that case we should - * not throw an ERROR for problems, just LOG them. The worst consequence of + * not throw an ERROR for problems, just LOG them. The worst consequence of * not removing files here would be failure to release some disk space, which * does not justify throwing an error that would require manual intervention * to get the database running again. @@ -678,7 +679,7 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo) * with a warning. This is because even though ProcessUtility disallows * DROP TABLESPACE in a transaction block, it's possible that a previous * DROP failed and rolled back after removing the tablespace directories - * and/or symlink. We want to allow a new DROP attempt to succeed at + * and/or symlink. We want to allow a new DROP attempt to succeed at * removing the catalog entries (and symlink if still present), so we * should not give a hard error here. */ @@ -1199,14 +1200,14 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source) } /* - * In an interactive SET command, we ereport for bad info. When + * In an interactive SET command, we ereport for bad info. When * source == PGC_S_TEST, we are checking the argument of an ALTER - * DATABASE SET or ALTER USER SET command. pg_dumpall dumps all + * DATABASE SET or ALTER USER SET command. pg_dumpall dumps all * roles before tablespaces, so if we're restoring a pg_dumpall * script the tablespace might not yet exist, but will be created - * later. Because of that, issue a NOTICE if source == PGC_S_TEST, - * but accept the value anyway. Otherwise, silently ignore any - * bad list elements. + * later. Because of that, issue a NOTICE if source == + * PGC_S_TEST, but accept the value anyway. Otherwise, silently + * ignore any bad list elements. */ curoid = get_tablespace_oid(curname, source <= PGC_S_TEST); if (curoid == InvalidOid) @@ -1493,10 +1494,10 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record) * files then do conflict processing and try again, if currently * enabled. * - * Other possible reasons for failure include bollixed file permissions - * on a standby server when they were okay on the primary, etc etc. - * There's not much we can do about that, so just remove what we can - * and press on. + * Other possible reasons for failure include bollixed file + * permissions on a standby server when they were okay on the primary, + * etc etc. There's not much we can do about that, so just remove what + * we can and press on. */ if (!destroy_tablespace_directories(xlrec->ts_id, true)) { @@ -1513,8 +1514,8 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record) if (!destroy_tablespace_directories(xlrec->ts_id, true)) ereport(LOG, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("directories for tablespace %u could not be removed", - xlrec->ts_id), + errmsg("directories for tablespace %u could not be removed", + xlrec->ts_id), errhint("You can remove the directories manually if necessary."))); } } diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 1218d033d1..4399a27446 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -199,8 +199,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, /* * We must take a lock on the target relation to protect against * concurrent drop. It's not clear that AccessShareLock is strong - * enough, but we certainly need at least that much... otherwise, - * we might end up creating a pg_constraint entry referencing a + * enough, but we certainly need at least that much... otherwise, we + * might end up creating a pg_constraint entry referencing a * nonexistent table. */ constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock, false); @@ -494,8 +494,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * can skip this for internally generated triggers, since the name * modification above should be sufficient. * - * NOTE that this is cool only because we have AccessExclusiveLock on - * the relation, so the trigger set won't be changing underneath us. + * NOTE that this is cool only because we have AccessExclusiveLock on the + * relation, so the trigger set won't be changing underneath us. */ if (!isInternal) { @@ -1168,27 +1168,27 @@ static void RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid, void *arg) { - HeapTuple tuple; - Form_pg_class form; + HeapTuple tuple; + Form_pg_class form; tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tuple)) - return; /* concurrently dropped */ + return; /* concurrently dropped */ form = (Form_pg_class) GETSTRUCT(tuple); /* only tables and views can have triggers */ - if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table or view", rv->relname))); + if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("\"%s\" is not a table or view", rv->relname))); /* you must own the table to rename one of its triggers */ - if (!pg_class_ownercheck(relid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname); - if (!allowSystemTableMods && IsSystemClass(form)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied: \"%s\" is a system catalog", + if (!pg_class_ownercheck(relid, GetUserId())) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname); + if (!allowSystemTableMods && IsSystemClass(form)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied: \"%s\" is a system catalog", rv->relname))); ReleaseSysCache(tuple); diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 77559842e5..fdb5bdbc11 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -609,7 +609,7 @@ DefineType(List *names, List *parameters) F_ARRAY_SEND, /* send procedure */ typmodinOid, /* typmodin procedure */ typmodoutOid, /* typmodout procedure */ - F_ARRAY_TYPANALYZE, /* analyze procedure */ + F_ARRAY_TYPANALYZE, /* analyze procedure */ typoid, /* element type ID */ true, /* yes this is an array type */ InvalidOid, /* no further array type */ @@ -1140,7 +1140,7 @@ DefineEnum(CreateEnumStmt *stmt) F_ARRAY_SEND, /* send procedure */ InvalidOid, /* typmodin procedure - none */ InvalidOid, /* typmodout procedure - none */ - F_ARRAY_TYPANALYZE, /* analyze procedure */ + F_ARRAY_TYPANALYZE, /* analyze procedure */ enumTypeOid, /* element type ID */ true, /* yes this is an array type */ InvalidOid, /* no further array type */ @@ -1450,7 +1450,7 @@ DefineRange(CreateRangeStmt *stmt) F_ARRAY_SEND, /* send procedure */ InvalidOid, /* typmodin procedure - none */ InvalidOid, /* typmodout procedure - none */ - F_ARRAY_TYPANALYZE, /* analyze procedure */ + F_ARRAY_TYPANALYZE, /* analyze procedure */ typoid, /* element type ID */ true, /* yes this is an array type */ InvalidOid, /* no further array type */ @@ -1477,15 +1477,15 @@ DefineRange(CreateRangeStmt *stmt) * impossible to define a polymorphic constructor; we have to generate new * constructor functions explicitly for each range type. * - * We actually define 4 functions, with 0 through 3 arguments. This is just + * We actually define 4 functions, with 0 through 3 arguments. This is just * to offer more convenience for the user. */ static void makeRangeConstructors(const char *name, Oid namespace, Oid rangeOid, Oid subtype) { - static const char * const prosrc[2] = {"range_constructor2", - "range_constructor3"}; + static const char *const prosrc[2] = {"range_constructor2", + "range_constructor3"}; static const int pronargs[2] = {2, 3}; Oid constructorArgTypes[3]; @@ -1509,7 +1509,7 @@ makeRangeConstructors(const char *name, Oid namespace, constructorArgTypesVector = buildoidvector(constructorArgTypes, pronargs[i]); - procOid = ProcedureCreate(name, /* name: same as range type */ + procOid = ProcedureCreate(name, /* name: same as range type */ namespace, /* namespace */ false, /* replace */ false, /* returns set */ @@ -1518,7 +1518,7 @@ makeRangeConstructors(const char *name, Oid namespace, INTERNALlanguageId, /* language */ F_FMGR_INTERNAL_VALIDATOR, /* language validator */ prosrc[i], /* prosrc */ - NULL, /* probin */ + NULL, /* probin */ false, /* isAgg */ false, /* isWindowFunc */ false, /* security_definer */ @@ -1834,9 +1834,9 @@ findRangeSubOpclass(List *opcname, Oid subtype) if (!IsBinaryCoercible(subtype, opInputType)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("operator class \"%s\" does not accept data type %s", - NameListToString(opcname), - format_type_be(subtype)))); + errmsg("operator class \"%s\" does not accept data type %s", + NameListToString(opcname), + format_type_be(subtype)))); } else { @@ -2335,8 +2335,8 @@ AlterDomainDropConstraint(List *names, const char *constrName, if (!missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("constraint \"%s\" of domain \"%s\" does not exist", - constrName, TypeNameToString(typename)))); + errmsg("constraint \"%s\" of domain \"%s\" does not exist", + constrName, TypeNameToString(typename)))); else ereport(NOTICE, (errmsg("constraint \"%s\" of domain \"%s\" does not exist, skipping", @@ -2958,7 +2958,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, ccsrc, /* Source form of check constraint */ true, /* is local */ 0, /* inhcount */ - false); /* is only */ + false); /* is only */ /* * Return the compiled constraint expression so the calling routine can diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 2edbabe754..a22092c202 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -936,7 +936,8 @@ DropRole(DropRoleStmt *stmt) /* DROP hook for the role being removed */ if (object_access_hook) { - ObjectAccessDrop drop_arg; + ObjectAccessDrop drop_arg; + memset(&drop_arg, 0, sizeof(ObjectAccessDrop)); InvokeObjectAccessHook(OAT_DROP, AuthIdRelationId, roleid, 0, &drop_arg); diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index c43cd8e017..710c2afc9f 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -322,13 +322,13 @@ get_rel_oids(Oid relid, const RangeVar *vacrel) Oid relid; /* - * Since we don't take a lock here, the relation might be gone, - * or the RangeVar might no longer refer to the OID we look up - * here. In the former case, VACUUM will do nothing; in the - * latter case, it will process the OID we looked up here, rather - * than the new one. Neither is ideal, but there's little practical - * alternative, since we're going to commit this transaction and - * begin a new one between now and then. + * Since we don't take a lock here, the relation might be gone, or the + * RangeVar might no longer refer to the OID we look up here. In the + * former case, VACUUM will do nothing; in the latter case, it will + * process the OID we looked up here, rather than the new one. + * Neither is ideal, but there's little practical alternative, since + * we're going to commit this transaction and begin a new one between + * now and then. */ relid = RangeVarGetRelid(vacrel, NoLock, false); diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 3ff56a7366..5e90221164 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -155,9 +155,9 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt, BlockNumber possibly_freeable; PGRUsage ru0; TimestampTz starttime = 0; - long secs; - int usecs; - double read_rate, + long secs; + int usecs; + double read_rate, write_rate; bool scan_all; TransactionId freezeTableLimit; @@ -222,17 +222,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt, * * A corner case here is that if we scanned no pages at all because every * page is all-visible, we should not update relpages/reltuples, because - * we have no new information to contribute. In particular this keeps - * us from replacing relpages=reltuples=0 (which means "unknown tuple + * we have no new information to contribute. In particular this keeps us + * from replacing relpages=reltuples=0 (which means "unknown tuple * density") with nonzero relpages and reltuples=0 (which means "zero * tuple density") unless there's some actual evidence for the latter. * - * We do update relallvisible even in the corner case, since if the - * table is all-visible we'd definitely like to know that. But clamp - * the value to be not more than what we're setting relpages to. + * We do update relallvisible even in the corner case, since if the table + * is all-visible we'd definitely like to know that. But clamp the value + * to be not more than what we're setting relpages to. * - * Also, don't change relfrozenxid if we skipped any pages, since then - * we don't know for certain that all tuples have a newer xmin. + * Also, don't change relfrozenxid if we skipped any pages, since then we + * don't know for certain that all tuples have a newer xmin. */ new_rel_pages = vacrelstats->rel_pages; new_rel_tuples = vacrelstats->new_rel_tuples; @@ -265,7 +265,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt, /* and log the action if appropriate */ if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0) { - TimestampTz endtime = GetCurrentTimestamp(); + TimestampTz endtime = GetCurrentTimestamp(); if (Log_autovacuum_min_duration == 0 || TimestampDifferenceExceeds(starttime, endtime, @@ -277,17 +277,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt, write_rate = 0; if ((secs > 0) || (usecs > 0)) { - read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) / - (secs + usecs / 1000000.0); - write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) / - (secs + usecs / 1000000.0); + read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) / + (secs + usecs / 1000000.0); + write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) / + (secs + usecs / 1000000.0); } ereport(LOG, (errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n" "pages: %d removed, %d remain\n" "tuples: %.0f removed, %.0f remain\n" "buffer usage: %d hits, %d misses, %d dirtied\n" - "avg read rate: %.3f MiB/s, avg write rate: %.3f MiB/s\n" + "avg read rate: %.3f MiB/s, avg write rate: %.3f MiB/s\n" "system usage: %s", get_database_name(MyDatabaseId), get_namespace_name(RelationGetNamespace(onerel)), @@ -300,7 +300,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt, VacuumPageHit, VacuumPageMiss, VacuumPageDirty, - read_rate,write_rate, + read_rate, write_rate, pg_rusage_show(&ru0)))); } } @@ -501,10 +501,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, vacrelstats->num_dead_tuples > 0) { /* - * Before beginning index vacuuming, we release any pin we may hold - * on the visibility map page. This isn't necessary for correctness, - * but we do it anyway to avoid holding the pin across a lengthy, - * unrelated operation. + * Before beginning index vacuuming, we release any pin we may + * hold on the visibility map page. This isn't necessary for + * correctness, but we do it anyway to avoid holding the pin + * across a lengthy, unrelated operation. */ if (BufferIsValid(vmbuffer)) { @@ -535,10 +535,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, /* * Pin the visibility map page in case we need to mark the page * all-visible. In most cases this will be very cheap, because we'll - * already have the correct page pinned anyway. However, it's possible - * that (a) next_not_all_visible_block is covered by a different VM page - * than the current block or (b) we released our pin and did a cycle of - * index vacuuming. + * already have the correct page pinned anyway. However, it's + * possible that (a) next_not_all_visible_block is covered by a + * different VM page than the current block or (b) we released our pin + * and did a cycle of index vacuuming. */ visibilitymap_pin(onerel, blkno, &vmbuffer); @@ -873,10 +873,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, else if (!all_visible_according_to_vm) { /* - * It should never be the case that the visibility map page - * is set while the page-level bit is clear, but the reverse - * is allowed. Set the visibility map bit as well so that - * we get back in sync. + * It should never be the case that the visibility map page is + * set while the page-level bit is clear, but the reverse is + * allowed. Set the visibility map bit as well so that we get + * back in sync. */ visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer, visibility_cutoff_xid); @@ -1152,7 +1152,7 @@ lazy_check_needs_freeze(Buffer buf) if (heap_tuple_needs_freeze(tupleheader, FreezeLimit, buf)) return true; - } /* scan along page */ + } /* scan along page */ return false; } diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index c887961bc9..3e7e39d8ec 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -204,8 +204,8 @@ DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace, checkViewTupleDesc(descriptor, rel->rd_att); /* - * The new options list replaces the existing options list, even - * if it's empty. + * The new options list replaces the existing options list, even if + * it's empty. */ atcmd = makeNode(AlterTableCmd); atcmd->subtype = AT_ReplaceRelOptions; @@ -504,7 +504,7 @@ DefineView(ViewStmt *stmt, const char *queryString) * long as the CREATE command is consistent with that --- no explicit * schema name. */ - view = copyObject(stmt->view); /* don't corrupt original command */ + view = copyObject(stmt->view); /* don't corrupt original command */ if (view->relpersistence == RELPERSISTENCE_PERMANENT && isViewOnTempTable(viewParse)) { diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c index 03790bbe06..2c8929b588 100644 --- a/src/backend/executor/execCurrent.c +++ b/src/backend/executor/execCurrent.c @@ -151,7 +151,7 @@ execCurrentOf(CurrentOfExpr *cexpr, { ScanState *scanstate; bool lisnull; - Oid tuple_tableoid PG_USED_FOR_ASSERTS_ONLY; + Oid tuple_tableoid PG_USED_FOR_ASSERTS_ONLY; ItemPointer tuple_tid; /* diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index fbb36fa6dc..440438b180 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -80,7 +80,7 @@ static void ExecutePlan(EState *estate, PlanState *planstate, static bool ExecCheckRTEPerms(RangeTblEntry *rte); static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt); static char *ExecBuildSlotValueDescription(TupleTableSlot *slot, - int maxfieldlen); + int maxfieldlen); static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree); @@ -1520,7 +1520,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo, ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("null value in column \"%s\" violates not-null constraint", - NameStr(rel->rd_att->attrs[attrChk - 1]->attname)), + NameStr(rel->rd_att->attrs[attrChk - 1]->attname)), errdetail("Failing row contains %s.", ExecBuildSlotValueDescription(slot, 64)))); } diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index a1193a8dc3..0ea21ca5f9 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -578,15 +578,15 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, /* Get the input slot and attribute number we want */ switch (variable->varno) { - case INNER_VAR: /* get the tuple from the inner node */ + case INNER_VAR: /* get the tuple from the inner node */ slot = econtext->ecxt_innertuple; break; - case OUTER_VAR: /* get the tuple from the outer node */ + case OUTER_VAR: /* get the tuple from the outer node */ slot = econtext->ecxt_outertuple; break; - /* INDEX_VAR is handled by default case */ + /* INDEX_VAR is handled by default case */ default: /* get the tuple from the relation being * scanned */ @@ -763,15 +763,15 @@ ExecEvalScalarVar(ExprState *exprstate, ExprContext *econtext, /* Get the input slot and attribute number we want */ switch (variable->varno) { - case INNER_VAR: /* get the tuple from the inner node */ + case INNER_VAR: /* get the tuple from the inner node */ slot = econtext->ecxt_innertuple; break; - case OUTER_VAR: /* get the tuple from the outer node */ + case OUTER_VAR: /* get the tuple from the outer node */ slot = econtext->ecxt_outertuple; break; - /* INDEX_VAR is handled by default case */ + /* INDEX_VAR is handled by default case */ default: /* get the tuple from the relation being * scanned */ @@ -808,15 +808,15 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext, /* Get the input slot we want */ switch (variable->varno) { - case INNER_VAR: /* get the tuple from the inner node */ + case INNER_VAR: /* get the tuple from the inner node */ slot = econtext->ecxt_innertuple; break; - case OUTER_VAR: /* get the tuple from the outer node */ + case OUTER_VAR: /* get the tuple from the outer node */ slot = econtext->ecxt_outertuple; break; - /* INDEX_VAR is handled by default case */ + /* INDEX_VAR is handled by default case */ default: /* get the tuple from the relation being * scanned */ @@ -879,15 +879,15 @@ ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext, /* Get the input slot we want */ switch (variable->varno) { - case INNER_VAR: /* get the tuple from the inner node */ + case INNER_VAR: /* get the tuple from the inner node */ slot = econtext->ecxt_innertuple; break; - case OUTER_VAR: /* get the tuple from the outer node */ + case OUTER_VAR: /* get the tuple from the outer node */ slot = econtext->ecxt_outertuple; break; - /* INDEX_VAR is handled by default case */ + /* INDEX_VAR is handled by default case */ default: /* get the tuple from the relation being * scanned */ diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 40cd5ce5d1..2bd8b42835 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -578,7 +578,7 @@ ExecBuildProjectionInfo(List *targetList, projInfo->pi_lastOuterVar = attnum; break; - /* INDEX_VAR is handled by default case */ + /* INDEX_VAR is handled by default case */ default: varSlotOffsets[numSimpleVars] = offsetof(ExprContext, @@ -638,7 +638,7 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo) projInfo->pi_lastOuterVar = attnum; break; - /* INDEX_VAR is handled by default case */ + /* INDEX_VAR is handled by default case */ default: if (projInfo->pi_lastScanVar < attnum) diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index ae8d374db2..bf2f5c6882 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -128,11 +128,11 @@ typedef struct SQLFunctionParseInfo /* non-export function prototypes */ static Node *sql_fn_param_ref(ParseState *pstate, ParamRef *pref); static Node *sql_fn_post_column_ref(ParseState *pstate, - ColumnRef *cref, Node *var); + ColumnRef *cref, Node *var); static Node *sql_fn_make_param(SQLFunctionParseInfoPtr pinfo, - int paramno, int location); + int paramno, int location); static Node *sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo, - const char *paramname, int location); + const char *paramname, int location); static List *init_execution_state(List *queryTree_list, SQLFunctionCachePtr fcache, bool lazyEvalOK); @@ -227,13 +227,13 @@ prepare_sql_fn_parse_info(HeapTuple procedureTuple, Anum_pg_proc_proargnames, &isNull); if (isNull) - proargnames = PointerGetDatum(NULL); /* just to be sure */ + proargnames = PointerGetDatum(NULL); /* just to be sure */ proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, procedureTuple, Anum_pg_proc_proargmodes, &isNull); if (isNull) - proargmodes = PointerGetDatum(NULL); /* just to be sure */ + proargmodes = PointerGetDatum(NULL); /* just to be sure */ n_arg_names = get_func_input_arg_names(proargnames, proargmodes, &pinfo->argnames); @@ -422,7 +422,7 @@ static Node * sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo, const char *paramname, int location) { - int i; + int i; if (pinfo->argnames == NULL) return NULL; diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 849665d4e2..702e704098 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -66,6 +66,7 @@ BitmapHeapNext(BitmapHeapScanState *node) TIDBitmap *tbm; TBMIterator *tbmiterator; TBMIterateResult *tbmres; + #ifdef USE_PREFETCH TBMIterator *prefetch_iterator; #endif @@ -355,7 +356,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) { OffsetNumber offnum = tbmres->offsets[curslot]; ItemPointerData tid; - HeapTupleData heapTuple; + HeapTupleData heapTuple; ItemPointerSet(&tid, page, offnum); if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot, diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index af31671b3e..38078763f5 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -86,7 +86,7 @@ IndexOnlyNext(IndexOnlyScanState *node) * Note on Memory Ordering Effects: visibilitymap_test does not lock * the visibility map buffer, and therefore the result we read here * could be slightly stale. However, it can't be stale enough to - * matter. It suffices to show that (1) there is a read barrier + * matter. It suffices to show that (1) there is a read barrier * between the time we read the index TID and the time we test the * visibility map; and (2) there is a write barrier between the time * some other concurrent process clears the visibility map bit and the @@ -106,12 +106,12 @@ IndexOnlyNext(IndexOnlyScanState *node) node->ioss_HeapFetches++; tuple = index_fetch_heap(scandesc); if (tuple == NULL) - continue; /* no visible tuple, try next index entry */ + continue; /* no visible tuple, try next index entry */ /* * Only MVCC snapshots are supported here, so there should be no * need to keep following the HOT chain once a visible entry has - * been found. If we did want to allow that, we'd need to keep + * been found. If we did want to allow that, we'd need to keep * more state to remember not to call index_getnext_tid next time. */ if (scandesc->xs_continue_hot) @@ -120,7 +120,7 @@ IndexOnlyNext(IndexOnlyScanState *node) /* * Note: at this point we are holding a pin on the heap page, as * recorded in scandesc->xs_cbuf. We could release that pin now, - * but it's not clear whether it's a win to do so. The next index + * but it's not clear whether it's a win to do so. The next index * entry might require a visit to the same heap page. */ } @@ -176,8 +176,8 @@ StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup, TupleDesc itupdesc) * Note: we must use the tupdesc supplied by the AM in index_getattr, not * the slot's tupdesc, in case the latter has different datatypes (this * happens for btree name_ops in particular). They'd better have the same - * number of columns though, as well as being datatype-compatible which - * is something we can't so easily check. + * number of columns though, as well as being datatype-compatible which is + * something we can't so easily check. */ Assert(slot->tts_tupleDescriptor->natts == nindexatts); @@ -494,10 +494,10 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) * Initialize scan descriptor. */ indexstate->ioss_ScanDesc = index_beginscan(currentRelation, - indexstate->ioss_RelationDesc, - estate->es_snapshot, - indexstate->ioss_NumScanKeys, - indexstate->ioss_NumOrderByKeys); + indexstate->ioss_RelationDesc, + estate->es_snapshot, + indexstate->ioss_NumScanKeys, + indexstate->ioss_NumOrderByKeys); /* Set it up for index-only scan */ indexstate->ioss_ScanDesc->xs_want_itup = true; diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c index 06137c6ba8..3a6bfec0db 100644 --- a/src/backend/executor/nodeMaterial.c +++ b/src/backend/executor/nodeMaterial.c @@ -66,7 +66,7 @@ ExecMaterial(MaterialState *node) * Allocate a second read pointer to serve as the mark. We know it * must have index 1, so needn't store that. */ - int ptrno PG_USED_FOR_ASSERTS_ONLY; + int ptrno PG_USED_FOR_ASSERTS_ONLY; ptrno = tuplestore_alloc_read_pointer(tuplestorestate, node->eflags); diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index d755109a33..d5141ba54e 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -130,7 +130,7 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) for (i = 0; i < node->numCols; i++) { - SortSupport sortKey = mergestate->ms_sortkeys + i; + SortSupport sortKey = mergestate->ms_sortkeys + i; sortKey->ssup_cxt = CurrentMemoryContext; sortKey->ssup_collation = node->collations[i]; @@ -276,7 +276,7 @@ heap_compare_slots(MergeAppendState *node, SlotNumber slot1, SlotNumber slot2) for (nkey = 0; nkey < node->ms_nkeys; nkey++) { - SortSupport sortKey = node->ms_sortkeys + nkey; + SortSupport sortKey = node->ms_sortkeys + nkey; AttrNumber attno = sortKey->ssup_attno; Datum datum1, datum2; diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index a1e55646c6..bc0b20bf82 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -247,7 +247,7 @@ MJExamineQuals(List *mergeclauses, op_lefttype, op_righttype, BTORDER_PROC); - if (!OidIsValid(sortfunc)) /* should not happen */ + if (!OidIsValid(sortfunc)) /* should not happen */ elog(ERROR, "missing support function %d(%u,%u) in opfamily %u", BTORDER_PROC, op_lefttype, op_righttype, opfamily); /* We'll use a shim to call the old-style btree comparator */ @@ -405,7 +405,7 @@ MJCompare(MergeJoinState *mergestate) */ if (clause->lisnull && clause->risnull) { - nulleqnull = true; /* NULL "=" NULL */ + nulleqnull = true; /* NULL "=" NULL */ continue; } @@ -419,8 +419,8 @@ MJCompare(MergeJoinState *mergestate) /* * If we had any NULL-vs-NULL inputs, we do not want to report that the - * tuples are equal. Instead, if result is still 0, change it to +1. - * This will result in advancing the inner side of the join. + * tuples are equal. Instead, if result is still 0, change it to +1. This + * will result in advancing the inner side of the join. * * Likewise, if there was a constant-false joinqual, do not report * equality. We have to check this as part of the mergequals, else the diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index dfdcb20b1d..a7bce75f0c 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -950,8 +950,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * If there are indices on the result relation, open them and save * descriptors in the result relation info, so that we can add new * index entries for the tuples we add/update. We need not do this - * for a DELETE, however, since deletion doesn't affect indexes. - * Also, inside an EvalPlanQual operation, the indexes might be open + * for a DELETE, however, since deletion doesn't affect indexes. Also, + * inside an EvalPlanQual operation, the indexes might be open * already, since we share the resultrel state with the original * query. */ diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 85590445cc..362f4466e4 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -344,7 +344,7 @@ setop_fill_hash_table(SetOpState *setopstate) SetOp *node = (SetOp *) setopstate->ps.plan; PlanState *outerPlan; int firstFlag; - bool in_first_rel PG_USED_FOR_ASSERTS_ONLY; + bool in_first_rel PG_USED_FOR_ASSERTS_ONLY; /* * get state info from node diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 5e4ae426b1..e222365d11 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -1674,8 +1674,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams) raw_parsetree_list = pg_parse_query(src); /* - * Do parse analysis and rule rewrite for each raw parsetree, storing - * the results into unsaved plancache entries. + * Do parse analysis and rule rewrite for each raw parsetree, storing the + * results into unsaved plancache entries. */ plancache_list = NIL; @@ -1686,8 +1686,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams) CachedPlanSource *plansource; /* - * Create the CachedPlanSource before we do parse analysis, since - * it needs to see the unmodified raw parse tree. + * Create the CachedPlanSource before we do parse analysis, since it + * needs to see the unmodified raw parse tree. */ plansource = CreateCachedPlan(parsetree, src, @@ -1722,7 +1722,7 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams) plan->parserSetup, plan->parserSetupArg, cursor_options, - false); /* not fixed result */ + false); /* not fixed result */ plancache_list = lappend(plancache_list, plansource); } @@ -1907,7 +1907,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, } else { - char completionTag[COMPLETION_TAG_BUFSIZE]; + char completionTag[COMPLETION_TAG_BUFSIZE]; ProcessUtility(stmt, plansource->query_string, @@ -2335,9 +2335,9 @@ _SPI_make_plan_non_temp(SPIPlanPtr plan) /* * Reparent all the CachedPlanSources into the procedure context. In - * theory this could fail partway through due to the pallocs, but we - * don't care too much since both the procedure context and the executor - * context would go away on error. + * theory this could fail partway through due to the pallocs, but we don't + * care too much since both the procedure context and the executor context + * would go away on error. */ foreach(lc, plan->plancache_list) { diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 5853b068da..9cdee2bb3e 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -316,8 +316,8 @@ ClientAuthentication(Port *port) /* * Get the authentication method to use for this frontend/database * combination. Note: we do not parse the file at this point; this has - * already been done elsewhere. hba.c dropped an error message - * into the server logfile if parsing the hba config file failed. + * already been done elsewhere. hba.c dropped an error message into the + * server logfile if parsing the hba config file failed. */ hba_getauthmethod(port); @@ -1365,10 +1365,10 @@ pg_SSPI_recvauth(Port *port) } /* - * Overwrite the current context with the one we just received. - * If sspictx is NULL it was the first loop and we need to allocate - * a buffer for it. On subsequent runs, we can just overwrite the - * buffer contents since the size does not change. + * Overwrite the current context with the one we just received. If + * sspictx is NULL it was the first loop and we need to allocate a + * buffer for it. On subsequent runs, we can just overwrite the buffer + * contents since the size does not change. */ if (sspictx == NULL) { @@ -1437,8 +1437,8 @@ pg_SSPI_recvauth(Port *port) if (!GetTokenInformation(token, TokenUser, NULL, 0, &retlen) && GetLastError() != 122) ereport(ERROR, - (errmsg_internal("could not get token user size: error code %lu", - GetLastError()))); + (errmsg_internal("could not get token user size: error code %lu", + GetLastError()))); tokenuser = malloc(retlen); if (tokenuser == NULL) @@ -1453,8 +1453,8 @@ pg_SSPI_recvauth(Port *port) if (!LookupAccountSid(NULL, tokenuser->User.Sid, accountname, &accountnamesize, domainname, &domainnamesize, &accountnameuse)) ereport(ERROR, - (errmsg_internal("could not look up account SID: error code %lu", - GetLastError()))); + (errmsg_internal("could not look up account SID: error code %lu", + GetLastError()))); free(tokenuser); diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c index dce0eaa20e..e0ab5997fb 100644 --- a/src/backend/libpq/be-secure.c +++ b/src/backend/libpq/be-secure.c @@ -89,10 +89,10 @@ static void close_SSL(Port *); static const char *SSLerrmessage(void); #endif -char *ssl_cert_file; -char *ssl_key_file; -char *ssl_ca_file; -char *ssl_crl_file; +char *ssl_cert_file; +char *ssl_key_file; +char *ssl_ca_file; +char *ssl_crl_file; /* * How much data can be sent across a secure connection @@ -845,8 +845,8 @@ initialize_SSL(void) { /* * Always ask for SSL client cert, but don't fail if it's not - * presented. We might fail such connections later, depending on - * what we find in pg_hba.conf. + * presented. We might fail such connections later, depending on what + * we find in pg_hba.conf. */ SSL_CTX_set_verify(SSL_context, (SSL_VERIFY_PEER | @@ -953,7 +953,7 @@ aloop: port->peer_cn = NULL; if (port->peer != NULL) { - int len; + int len; len = X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer), NID_commonName, NULL, 0); diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index 56229cb4df..828f6dcc8e 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -61,8 +61,8 @@ typedef struct check_network_data */ typedef struct HbaToken { - char *string; - bool quoted; + char *string; + bool quoted; } HbaToken; /* @@ -76,9 +76,9 @@ static MemoryContext parsed_hba_context = NULL; * These variables hold the pre-parsed contents of the ident usermap * configuration file. ident_lines is a triple-nested list of lines, fields * and tokens, as returned by tokenize_file. There will be one line in - * ident_lines for each (non-empty, non-comment) line of the file. Note there + * ident_lines for each (non-empty, non-comment) line of the file. Note there * will always be at least one field, since blank lines are not entered in the - * data structure. ident_line_nums is an integer list containing the actual + * data structure. ident_line_nums is an integer list containing the actual * line number for each line represented in ident_lines. ident_context is * the memory context holding all this. */ @@ -246,7 +246,7 @@ make_hba_token(char *token, bool quoted) static HbaToken * copy_hba_token(HbaToken *in) { - HbaToken *out = make_hba_token(in->string, in->quoted); + HbaToken *out = make_hba_token(in->string, in->quoted); return out; } @@ -283,12 +283,12 @@ next_field_expand(const char *filename, FILE *file) /* * tokenize_inc_file - * Expand a file included from another file into an hba "field" + * Expand a file included from another file into an hba "field" * * Opens and tokenises a file included from another HBA config file with @, * and returns all values found therein as a flat list of HbaTokens. If a * @-token is found, recursively expand it. The given token list is used as - * initial contents of list (so foo,bar,@baz does what you expect). + * initial contents of list (so foo,bar,@baz does what you expect). */ static List * tokenize_inc_file(List *tokens, @@ -377,8 +377,8 @@ tokenize_file(const char *filename, FILE *file, List *current_line = NIL; List *current_field = NIL; int line_number = 1; - MemoryContext linecxt; - MemoryContext oldcxt; + MemoryContext linecxt; + MemoryContext oldcxt; linecxt = AllocSetContextCreate(TopMemoryContext, "tokenize file cxt", @@ -442,11 +442,10 @@ is_member(Oid userid, const char *role) if (!OidIsValid(roleid)) return false; /* if target role not exist, say "no" */ - /* - * See if user is directly or indirectly a member of role. - * For this purpose, a superuser is not considered to be automatically - * a member of the role, so group auth only applies to explicit - * membership. + /* + * See if user is directly or indirectly a member of role. For this + * purpose, a superuser is not considered to be automatically a member of + * the role, so group auth only applies to explicit membership. */ return is_member_of_role_nosuper(userid, roleid); } @@ -457,8 +456,8 @@ is_member(Oid userid, const char *role) static bool check_role(const char *role, Oid roleid, List *tokens) { - ListCell *cell; - HbaToken *tok; + ListCell *cell; + HbaToken *tok; foreach(cell, tokens) { @@ -481,8 +480,8 @@ check_role(const char *role, Oid roleid, List *tokens) static bool check_db(const char *dbname, const char *role, Oid roleid, List *tokens) { - ListCell *cell; - HbaToken *tok; + ListCell *cell; + HbaToken *tok; foreach(cell, tokens) { @@ -825,7 +824,7 @@ parse_hba_line(List *line, int line_num) List *tokens; ListCell *tokencell; HbaToken *token; - HbaLine *parsedline; + HbaLine *parsedline; parsedline = palloc0(sizeof(HbaLine)); parsedline->linenumber = line_num; @@ -1042,8 +1041,8 @@ parse_hba_line(List *line, int line_num) (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("specifying both host name and CIDR mask is invalid: \"%s\"", token->string), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); return NULL; } @@ -1080,9 +1079,9 @@ parse_hba_line(List *line, int line_num) { ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("multiple values specified for netmask"), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errmsg("multiple values specified for netmask"), + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); return NULL; } token = linitial(tokens); @@ -1293,6 +1292,7 @@ parse_hba_line(List *line, int line_num) foreach(tokencell, tokens) { char *val; + token = lfirst(tokencell); str = pstrdup(token->string); @@ -1310,7 +1310,7 @@ parse_hba_line(List *line, int line_num) return NULL; } - *val++ = '\0'; /* str now holds "name", val holds "value" */ + *val++ = '\0'; /* str now holds "name", val holds "value" */ if (!parse_hba_auth_opt(str, val, parsedline, line_num)) /* parse_hba_auth_opt already logged the error message */ return NULL; @@ -1397,17 +1397,16 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num) else if (strcmp(name, "clientcert") == 0) { /* - * Since we require ctHostSSL, this really can never happen - * on non-SSL-enabled builds, so don't bother checking for - * USE_SSL. + * Since we require ctHostSSL, this really can never happen on + * non-SSL-enabled builds, so don't bother checking for USE_SSL. */ if (hbaline->conntype != ctHostSSL) { ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("clientcert can only be configured for \"hostssl\" rows"), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errmsg("clientcert can only be configured for \"hostssl\" rows"), + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); return false; } if (strcmp(val, "1") == 0) @@ -1418,8 +1417,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num) (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("client certificates can only be checked if a root certificate store is available"), errhint("Make sure the configuration parameter \"ssl_ca_file\" is set."), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); return false; } hbaline->clientcert = true; @@ -1431,8 +1430,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num) ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("clientcert can not be set to 0 when using \"cert\" authentication"), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); return false; } hbaline->clientcert = false; @@ -1465,8 +1464,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num) ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid LDAP port number: \"%s\"", val), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); return false; } } @@ -1528,7 +1527,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num) { struct addrinfo *gai_result; struct addrinfo hints; - int ret; + int ret; REQUIRE_AUTH_OPTION(uaRADIUS, "radiusserver", "radius"); @@ -1543,8 +1542,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num) (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not translate RADIUS server name \"%s\" to address: %s", val, gai_strerror(ret)), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); if (gai_result) pg_freeaddrinfo_all(hints.ai_family, gai_result); return false; @@ -1561,8 +1560,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num) ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid RADIUS port number: \"%s\"", val), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); return false; } } @@ -1580,8 +1579,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num) { ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("unrecognized authentication option name: \"%s\"", - name), + errmsg("unrecognized authentication option name: \"%s\"", + name), errcontext("line %d of configuration file \"%s\"", line_num, HbaFileName))); return false; @@ -1693,7 +1692,7 @@ check_hba(hbaPort *port) * Read the config file and create a List of HbaLine records for the contents. * * The configuration is read into a temporary list, and if any parse error - * occurs the old list is kept in place and false is returned. Only if the + * occurs the old list is kept in place and false is returned. Only if the * whole file parses OK is the list replaced, and the function returns true. * * On a false result, caller will take care of reporting a FATAL error in case @@ -1710,9 +1709,9 @@ load_hba(void) *line_num; List *new_parsed_lines = NIL; bool ok = true; - MemoryContext linecxt; - MemoryContext oldcxt; - MemoryContext hbacxt; + MemoryContext linecxt; + MemoryContext oldcxt; + MemoryContext hbacxt; file = AllocateFile(HbaFileName, "r"); if (file == NULL) @@ -1742,8 +1741,8 @@ load_hba(void) { /* * Parse error in the file, so indicate there's a problem. NB: a - * problem in a line will free the memory for all previous lines as - * well! + * problem in a line will free the memory for all previous lines + * as well! */ MemoryContextReset(hbacxt); new_parsed_lines = NIL; @@ -1761,9 +1760,9 @@ load_hba(void) } /* - * A valid HBA file must have at least one entry; else there's no way - * to connect to the postmaster. But only complain about this if we - * didn't already have parsing errors. + * A valid HBA file must have at least one entry; else there's no way to + * connect to the postmaster. But only complain about this if we didn't + * already have parsing errors. */ if (ok && new_parsed_lines == NIL) { diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 2082e3d4f6..5272811cc0 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -1247,9 +1247,9 @@ internal_flush(void) /* * We drop the buffered data anyway so that processing can - * continue, even though we'll probably quit soon. We also - * set a flag that'll cause the next CHECK_FOR_INTERRUPTS - * to terminate the connection. + * continue, even though we'll probably quit soon. We also set a + * flag that'll cause the next CHECK_FOR_INTERRUPTS to terminate + * the connection. */ PqSendStart = PqSendPointer = 0; ClientConnectionLost = 1; @@ -1373,7 +1373,7 @@ fail: void pq_putmessage_noblock(char msgtype, const char *s, size_t len) { - int res PG_USED_FOR_ASSERTS_ONLY; + int res PG_USED_FOR_ASSERTS_ONLY; int required; /* diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index 4c904e0329..ba10840166 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -362,8 +362,8 @@ bms_subset_compare(const Bitmapset *a, const Bitmapset *b) shortlen = Min(a->nwords, b->nwords); for (i = 0; i < shortlen; i++) { - bitmapword aword = a->words[i]; - bitmapword bword = b->words[i]; + bitmapword aword = a->words[i]; + bitmapword bword = b->words[i]; if ((aword & ~bword) != 0) { diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 0db60b161b..1743b8fdc8 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -381,7 +381,7 @@ _copyIndexScan(const IndexScan *from) static IndexOnlyScan * _copyIndexOnlyScan(const IndexOnlyScan *from) { - IndexOnlyScan *newnode = makeNode(IndexOnlyScan); + IndexOnlyScan *newnode = makeNode(IndexOnlyScan); /* * copy node superclass fields @@ -4473,7 +4473,7 @@ copyObject(const void *from) default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(from)); - retval = 0; /* keep compiler quiet */ + retval = 0; /* keep compiler quiet */ break; } diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 9d588feac2..f19ad77026 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -2360,8 +2360,8 @@ _equalXmlSerialize(const XmlSerialize *a, const XmlSerialize *b) static bool _equalList(const List *a, const List *b) { - const ListCell *item_a; - const ListCell *item_b; + const ListCell *item_a; + const ListCell *item_b; /* * Try to reject by simple scalar checks before grovelling through all the diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c index 209b72222e..4d19aed8f4 100644 --- a/src/backend/nodes/list.c +++ b/src/backend/nodes/list.c @@ -443,7 +443,7 @@ list_nth_oid(const List *list, int n) bool list_member(const List *list, const void *datum) { - const ListCell *cell; + const ListCell *cell; Assert(IsPointerList(list)); check_list_invariants(list); @@ -464,7 +464,7 @@ list_member(const List *list, const void *datum) bool list_member_ptr(const List *list, const void *datum) { - const ListCell *cell; + const ListCell *cell; Assert(IsPointerList(list)); check_list_invariants(list); @@ -484,7 +484,7 @@ list_member_ptr(const List *list, const void *datum) bool list_member_int(const List *list, int datum) { - const ListCell *cell; + const ListCell *cell; Assert(IsIntegerList(list)); check_list_invariants(list); @@ -504,7 +504,7 @@ list_member_int(const List *list, int datum) bool list_member_oid(const List *list, Oid datum) { - const ListCell *cell; + const ListCell *cell; Assert(IsOidList(list)); check_list_invariants(list); @@ -697,7 +697,7 @@ List * list_union(const List *list1, const List *list2) { List *result; - const ListCell *cell; + const ListCell *cell; Assert(IsPointerList(list1)); Assert(IsPointerList(list2)); @@ -721,7 +721,7 @@ List * list_union_ptr(const List *list1, const List *list2) { List *result; - const ListCell *cell; + const ListCell *cell; Assert(IsPointerList(list1)); Assert(IsPointerList(list2)); @@ -744,7 +744,7 @@ List * list_union_int(const List *list1, const List *list2) { List *result; - const ListCell *cell; + const ListCell *cell; Assert(IsIntegerList(list1)); Assert(IsIntegerList(list2)); @@ -767,7 +767,7 @@ List * list_union_oid(const List *list1, const List *list2) { List *result; - const ListCell *cell; + const ListCell *cell; Assert(IsOidList(list1)); Assert(IsOidList(list2)); @@ -800,7 +800,7 @@ List * list_intersection(const List *list1, const List *list2) { List *result; - const ListCell *cell; + const ListCell *cell; if (list1 == NIL || list2 == NIL) return NIL; @@ -831,7 +831,7 @@ list_intersection(const List *list1, const List *list2) List * list_difference(const List *list1, const List *list2) { - const ListCell *cell; + const ListCell *cell; List *result = NIL; Assert(IsPointerList(list1)); @@ -857,7 +857,7 @@ list_difference(const List *list1, const List *list2) List * list_difference_ptr(const List *list1, const List *list2) { - const ListCell *cell; + const ListCell *cell; List *result = NIL; Assert(IsPointerList(list1)); @@ -882,7 +882,7 @@ list_difference_ptr(const List *list1, const List *list2) List * list_difference_int(const List *list1, const List *list2) { - const ListCell *cell; + const ListCell *cell; List *result = NIL; Assert(IsIntegerList(list1)); @@ -907,7 +907,7 @@ list_difference_int(const List *list1, const List *list2) List * list_difference_oid(const List *list1, const List *list2) { - const ListCell *cell; + const ListCell *cell; List *result = NIL; Assert(IsOidList(list1)); diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 6f9e053669..813d1da1a2 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -59,7 +59,7 @@ exprType(const Node *expr) break; case T_ArrayRef: { - const ArrayRef *arrayref = (const ArrayRef *) expr; + const ArrayRef *arrayref = (const ArrayRef *) expr; /* slice and/or store operations yield the array type */ if (arrayref->reflowerindexpr || arrayref->refassgnexpr) @@ -91,7 +91,7 @@ exprType(const Node *expr) break; case T_SubLink: { - const SubLink *sublink = (const SubLink *) expr; + const SubLink *sublink = (const SubLink *) expr; if (sublink->subLinkType == EXPR_SUBLINK || sublink->subLinkType == ARRAY_SUBLINK) @@ -125,7 +125,7 @@ exprType(const Node *expr) break; case T_SubPlan: { - const SubPlan *subplan = (const SubPlan *) expr; + const SubPlan *subplan = (const SubPlan *) expr; if (subplan->subLinkType == EXPR_SUBLINK || subplan->subLinkType == ARRAY_SUBLINK) @@ -282,7 +282,7 @@ exprTypmod(const Node *expr) break; case T_SubLink: { - const SubLink *sublink = (const SubLink *) expr; + const SubLink *sublink = (const SubLink *) expr; if (sublink->subLinkType == EXPR_SUBLINK || sublink->subLinkType == ARRAY_SUBLINK) @@ -303,7 +303,7 @@ exprTypmod(const Node *expr) break; case T_SubPlan: { - const SubPlan *subplan = (const SubPlan *) expr; + const SubPlan *subplan = (const SubPlan *) expr; if (subplan->subLinkType == EXPR_SUBLINK || subplan->subLinkType == ARRAY_SUBLINK) @@ -341,7 +341,7 @@ exprTypmod(const Node *expr) * If all the alternatives agree on type/typmod, return that * typmod, else use -1 */ - const CaseExpr *cexpr = (const CaseExpr *) expr; + const CaseExpr *cexpr = (const CaseExpr *) expr; Oid casetype = cexpr->casetype; int32 typmod; ListCell *arg; @@ -374,7 +374,7 @@ exprTypmod(const Node *expr) * If all the elements agree on type/typmod, return that * typmod, else use -1 */ - const ArrayExpr *arrayexpr = (const ArrayExpr *) expr; + const ArrayExpr *arrayexpr = (const ArrayExpr *) expr; Oid commontype; int32 typmod; ListCell *elem; @@ -493,7 +493,7 @@ exprIsLengthCoercion(const Node *expr, int32 *coercedTypmod) */ if (expr && IsA(expr, FuncExpr)) { - const FuncExpr *func = (const FuncExpr *) expr; + const FuncExpr *func = (const FuncExpr *) expr; int nargs; Const *second_arg; @@ -707,7 +707,7 @@ exprCollation(const Node *expr) break; case T_SubLink: { - const SubLink *sublink = (const SubLink *) expr; + const SubLink *sublink = (const SubLink *) expr; if (sublink->subLinkType == EXPR_SUBLINK || sublink->subLinkType == ARRAY_SUBLINK) @@ -733,7 +733,7 @@ exprCollation(const Node *expr) break; case T_SubPlan: { - const SubPlan *subplan = (const SubPlan *) expr; + const SubPlan *subplan = (const SubPlan *) expr; if (subplan->subLinkType == EXPR_SUBLINK || subplan->subLinkType == ARRAY_SUBLINK) @@ -1137,7 +1137,7 @@ exprLocation(const Node *expr) break; case T_FuncExpr: { - const FuncExpr *fexpr = (const FuncExpr *) expr; + const FuncExpr *fexpr = (const FuncExpr *) expr; /* consider both function name and leftmost arg */ loc = leftmostLoc(fexpr->location, @@ -1157,7 +1157,7 @@ exprLocation(const Node *expr) case T_DistinctExpr: /* struct-equivalent to OpExpr */ case T_NullIfExpr: /* struct-equivalent to OpExpr */ { - const OpExpr *opexpr = (const OpExpr *) expr; + const OpExpr *opexpr = (const OpExpr *) expr; /* consider both operator name and leftmost arg */ loc = leftmostLoc(opexpr->location, @@ -1175,7 +1175,7 @@ exprLocation(const Node *expr) break; case T_BoolExpr: { - const BoolExpr *bexpr = (const BoolExpr *) expr; + const BoolExpr *bexpr = (const BoolExpr *) expr; /* * Same as above, to handle either NOT or AND/OR. We can't @@ -1188,7 +1188,7 @@ exprLocation(const Node *expr) break; case T_SubLink: { - const SubLink *sublink = (const SubLink *) expr; + const SubLink *sublink = (const SubLink *) expr; /* check the testexpr, if any, and the operator/keyword */ loc = leftmostLoc(exprLocation(sublink->testexpr), @@ -1273,7 +1273,7 @@ exprLocation(const Node *expr) break; case T_XmlExpr: { - const XmlExpr *xexpr = (const XmlExpr *) expr; + const XmlExpr *xexpr = (const XmlExpr *) expr; /* consider both function name and leftmost arg */ loc = leftmostLoc(xexpr->location, @@ -1327,7 +1327,7 @@ exprLocation(const Node *expr) break; case T_A_Expr: { - const A_Expr *aexpr = (const A_Expr *) expr; + const A_Expr *aexpr = (const A_Expr *) expr; /* use leftmost of operator or left operand (if any) */ /* we assume right operand can't be to left of operator */ @@ -1346,7 +1346,7 @@ exprLocation(const Node *expr) break; case T_FuncCall: { - const FuncCall *fc = (const FuncCall *) expr; + const FuncCall *fc = (const FuncCall *) expr; /* consider both function name and leftmost arg */ /* (we assume any ORDER BY nodes must be to right of name) */ @@ -1364,7 +1364,7 @@ exprLocation(const Node *expr) break; case T_TypeCast: { - const TypeCast *tc = (const TypeCast *) expr; + const TypeCast *tc = (const TypeCast *) expr; /* * This could represent CAST(), ::, or TypeName 'literal', so diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index e690194b74..d6dff9de47 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -141,7 +141,7 @@ _outToken(StringInfo str, const char *s) static void _outList(StringInfo str, const List *node) { - const ListCell *lc; + const ListCell *lc; appendStringInfoChar(str, '('); diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c index 20aeb085d8..8a5e59526d 100644 --- a/src/backend/nodes/print.c +++ b/src/backend/nodes/print.c @@ -251,7 +251,7 @@ pretty_format_node_dump(const char *dump) void print_rt(const List *rtable) { - const ListCell *l; + const ListCell *l; int i = 1; printf("resno\trefname \trelid\tinFromCl\n"); @@ -314,7 +314,7 @@ print_expr(const Node *expr, const List *rtable) if (IsA(expr, Var)) { - const Var *var = (const Var *) expr; + const Var *var = (const Var *) expr; char *relname, *attname; @@ -348,7 +348,7 @@ print_expr(const Node *expr, const List *rtable) } else if (IsA(expr, Const)) { - const Const *c = (const Const *) expr; + const Const *c = (const Const *) expr; Oid typoutput; bool typIsVarlena; char *outputstr; @@ -368,7 +368,7 @@ print_expr(const Node *expr, const List *rtable) } else if (IsA(expr, OpExpr)) { - const OpExpr *e = (const OpExpr *) expr; + const OpExpr *e = (const OpExpr *) expr; char *opname; opname = get_opname(e->opno); @@ -387,7 +387,7 @@ print_expr(const Node *expr, const List *rtable) } else if (IsA(expr, FuncExpr)) { - const FuncExpr *e = (const FuncExpr *) expr; + const FuncExpr *e = (const FuncExpr *) expr; char *funcname; ListCell *l; @@ -412,7 +412,7 @@ print_expr(const Node *expr, const List *rtable) void print_pathkeys(const List *pathkeys, const List *rtable) { - const ListCell *i; + const ListCell *i; printf("("); foreach(i, pathkeys) @@ -452,7 +452,7 @@ print_pathkeys(const List *pathkeys, const List *rtable) void print_tl(const List *tlist, const List *rtable) { - const ListCell *tl; + const ListCell *tl; printf("(\n"); foreach(tl, tlist) diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 7960793641..89ddf62d4d 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -49,7 +49,7 @@ #define READ_TEMP_LOCALS() \ char *token; \ int length; \ - (void) token /* possibly unused */ + (void) token /* possibly unused */ /* ... but most need both */ #define READ_LOCALS(nodeTypeName) \ @@ -195,7 +195,7 @@ _readQuery(void) READ_ENUM_FIELD(commandType, CmdType); READ_ENUM_FIELD(querySource, QuerySource); - local_node->queryId = 0; /* not saved in output format */ + local_node->queryId = 0; /* not saved in output format */ READ_BOOL_FIELD(canSetTag); READ_NODE_FIELD(utilityStmt); READ_INT_FIELD(resultRelation); diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index 17dae0d1b9..728619e75d 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -956,7 +956,7 @@ tbm_lossify(TIDBitmap *tbm) * * Since we are called as soon as nentries exceeds maxentries, we should * push nentries down to significantly less than maxentries, or else we'll - * just end up doing this again very soon. We shoot for maxentries/2. + * just end up doing this again very soon. We shoot for maxentries/2. */ Assert(!tbm->iterating); Assert(tbm->status == TBM_HASH); @@ -992,14 +992,14 @@ tbm_lossify(TIDBitmap *tbm) } /* - * With a big bitmap and small work_mem, it's possible that we cannot - * get under maxentries. Again, if that happens, we'd end up uselessly + * With a big bitmap and small work_mem, it's possible that we cannot get + * under maxentries. Again, if that happens, we'd end up uselessly * calling tbm_lossify over and over. To prevent this from becoming a * performance sink, force maxentries up to at least double the current * number of entries. (In essence, we're admitting inability to fit - * within work_mem when we do this.) Note that this test will not fire - * if we broke out of the loop early; and if we didn't, the current - * number of entries is simply not reducible any further. + * within work_mem when we do this.) Note that this test will not fire if + * we broke out of the loop early; and if we didn't, the current number of + * entries is simply not reducible any further. */ if (tbm->nentries > tbm->maxentries / 2) tbm->maxentries = Min(tbm->nentries, (INT_MAX - 1) / 2) * 2; @@ -1011,8 +1011,8 @@ tbm_lossify(TIDBitmap *tbm) static int tbm_comparator(const void *left, const void *right) { - BlockNumber l = (*((PagetableEntry * const *) left))->blockno; - BlockNumber r = (*((PagetableEntry * const *) right))->blockno; + BlockNumber l = (*((PagetableEntry *const *) left))->blockno; + BlockNumber r = (*((PagetableEntry *const *) right))->blockno; if (l < r) return -1; diff --git a/src/backend/optimizer/geqo/geqo_selection.c b/src/backend/optimizer/geqo/geqo_selection.c index be64576c2f..fbdcc5ff0c 100644 --- a/src/backend/optimizer/geqo/geqo_selection.c +++ b/src/backend/optimizer/geqo/geqo_selection.c @@ -65,8 +65,8 @@ geqo_selection(PlannerInfo *root, Chromosome *momma, Chromosome *daddy, * one, when we can't. * * This code was observed to hang up in an infinite loop when the - * platform's implementation of erand48() was broken. We now always - * use our own version. + * platform's implementation of erand48() was broken. We now always use + * our own version. */ if (pool->size > 1) { diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 0563cae1d7..f02954982a 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -50,19 +50,19 @@ join_search_hook_type join_search_hook = NULL; static void set_base_rel_sizes(PlannerInfo *root); static void set_base_rel_pathlists(PlannerInfo *root); static void set_rel_size(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte); + Index rti, RangeTblEntry *rte); static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte); static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); + RangeTblEntry *rte); static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); + RangeTblEntry *rte); static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte); + Index rti, RangeTblEntry *rte); static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte); static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, @@ -118,7 +118,7 @@ make_one_rel(PlannerInfo *root, List *joinlist) if (brel == NULL) continue; - Assert(brel->relid == rti); /* sanity check on array */ + Assert(brel->relid == rti); /* sanity check on array */ /* ignore RTEs that are "other rels" */ if (brel->reloptkind != RELOPT_BASEREL) @@ -211,7 +211,7 @@ set_base_rel_pathlists(PlannerInfo *root) */ static void set_rel_size(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte) + Index rti, RangeTblEntry *rte) { if (rel->reloptkind == RELOPT_BASEREL && relation_excluded_by_constraints(root, rel, rte)) @@ -251,6 +251,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel, } break; case RTE_SUBQUERY: + /* * Subqueries don't support parameterized paths, so just go * ahead and build their paths immediately. @@ -264,6 +265,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel, set_values_size_estimates(root, rel); break; case RTE_CTE: + /* * CTEs don't support parameterized paths, so just go ahead * and build their paths immediately. @@ -574,8 +576,8 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * It is possible that constraint exclusion detected a contradiction - * within a child subquery, even though we didn't prove one above. - * If so, we can skip this child. + * within a child subquery, even though we didn't prove one above. If + * so, we can skip this child. */ if (IS_DUMMY_REL(childrel)) continue; @@ -590,7 +592,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * Accumulate per-column estimates too. We need not do anything - * for PlaceHolderVars in the parent list. If child expression + * for PlaceHolderVars in the parent list. If child expression * isn't a Var, or we didn't record a width estimate for it, we * have to fall back on a datatype-based estimate. * @@ -609,7 +611,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, if (IsA(childvar, Var)) { - int cndx = ((Var *) childvar)->varattno - childrel->min_attr; + int cndx = ((Var *) childvar)->varattno - childrel->min_attr; child_width = childrel->attr_widths[cndx]; } @@ -664,7 +666,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * Generate access paths for each member relation, and remember the - * cheapest path for each one. Also, identify all pathkeys (orderings) + * cheapest path for each one. Also, identify all pathkeys (orderings) * and parameterizations (required_outer sets) available for the member * relations. */ @@ -708,7 +710,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * Collect lists of all the available path orderings and - * parameterizations for all the children. We use these as a + * parameterizations for all the children. We use these as a * heuristic to indicate which sort orderings and parameterizations we * should build Append and MergeAppend paths for. */ @@ -753,7 +755,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* Have we already seen this param set? */ foreach(lco, all_child_outers) { - Relids existing_outers = (Relids) lfirst(lco); + Relids existing_outers = (Relids) lfirst(lco); if (bms_equal(existing_outers, childouter)) { @@ -791,7 +793,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, * so that not that many cases actually get considered here.) * * The Append node itself cannot enforce quals, so all qual checking must - * be done in the child paths. This means that to have a parameterized + * be done in the child paths. This means that to have a parameterized * Append path, we must have the exact same parameterization for each * child path; otherwise some children might be failing to check the * moved-down quals. To make them match up, we can try to increase the @@ -799,7 +801,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, */ foreach(l, all_child_outers) { - Relids required_outer = (Relids) lfirst(l); + Relids required_outer = (Relids) lfirst(l); bool ok = true; ListCell *lcr; @@ -1115,9 +1117,9 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, rel->subroot = subroot; /* - * It's possible that constraint exclusion proved the subquery empty. - * If so, it's convenient to turn it back into a dummy path so that we - * will recognize appropriate optimizations at this level. + * It's possible that constraint exclusion proved the subquery empty. If + * so, it's convenient to turn it back into a dummy path so that we will + * recognize appropriate optimizations at this level. */ if (is_dummy_plan(rel->subplan)) { @@ -1639,7 +1641,7 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual, /* * It would be unsafe to push down window function calls, but at least for - * the moment we could never see any in a qual anyhow. (The same applies + * the moment we could never see any in a qual anyhow. (The same applies * to aggregates, which we check for in pull_var_clause below.) */ Assert(!contain_window_function(qual)); diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index e45bc121e4..480c1b7425 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -432,7 +432,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count) * qual clauses that we have to evaluate as qpquals. We approximate that * list as allclauses minus any clauses appearing in indexquals. (We * assume that pointer equality is enough to recognize duplicate - * RestrictInfos.) This method neglects some considerations such as + * RestrictInfos.) This method neglects some considerations such as * clauses that needn't be checked because they are implied by a partial * index's predicate. It does not seem worth the cycles to try to factor * those things in at this stage, even though createplan.c will take pains @@ -3135,7 +3135,7 @@ get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, * innerrel: inner relation under consideration * jointype: must be JOIN_SEMI or JOIN_ANTI * sjinfo: SpecialJoinInfo relevant to this join - * restrictlist: join quals + * restrictlist: join quals * Output parameters: * *semifactors is filled in (see relation.h for field definitions) */ @@ -3221,8 +3221,8 @@ compute_semi_anti_join_factors(PlannerInfo *root, * * Note: it is correct to use the inner rel's "rows" count here, even * though we might later be considering a parameterized inner path with - * fewer rows. This is because we have included all the join clauses - * in the selectivity estimate. + * fewer rows. This is because we have included all the join clauses in + * the selectivity estimate. */ if (jselec > 0) /* protect against zero divide */ { @@ -3271,17 +3271,18 @@ has_indexed_join_quals(NestPath *joinpath) indexclauses = ((IndexPath *) innerpath)->indexclauses; break; case T_BitmapHeapScan: - { - /* Accept only a simple bitmap scan, not AND/OR cases */ - Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual; - - if (IsA(bmqual, IndexPath)) - indexclauses = ((IndexPath *) bmqual)->indexclauses; - else - return false; - break; - } + { + /* Accept only a simple bitmap scan, not AND/OR cases */ + Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual; + + if (IsA(bmqual, IndexPath)) + indexclauses = ((IndexPath *) bmqual)->indexclauses; + else + return false; + break; + } default: + /* * If it's not a simple indexscan, it probably doesn't run quickly * for zero rows out, even if it's a parameterized path using all @@ -3293,8 +3294,8 @@ has_indexed_join_quals(NestPath *joinpath) /* * Examine the inner path's param clauses. Any that are from the outer * path must be found in the indexclauses list, either exactly or in an - * equivalent form generated by equivclass.c. Also, we must find at - * least one such clause, else it's a clauseless join which isn't fast. + * equivalent form generated by equivclass.c. Also, we must find at least + * one such clause, else it's a clauseless join which isn't fast. */ found_one = false; foreach(lc, innerpath->param_info->ppi_clauses) diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index bb196b8f2a..e34b9553bd 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -494,11 +494,11 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids, * * If rel is not NULL, it identifies a specific relation we're considering * a path for, and indicates that child EC members for that relation can be - * considered. Otherwise child members are ignored. (Note: since child EC + * considered. Otherwise child members are ignored. (Note: since child EC * members aren't guaranteed unique, a non-NULL value means that there could * be more than one EC that matches the expression; if so it's order-dependent * which one you get. This is annoying but it only happens in corner cases, - * so for now we live with just reporting the first match. See also + * so for now we live with just reporting the first match. See also * generate_implied_equalities_for_indexcol and match_pathkeys_to_index.) * * If create_it is TRUE, we'll build a new EquivalenceClass when there is no @@ -922,8 +922,8 @@ generate_base_implied_equalities_broken(PlannerInfo *root, * built any join RelOptInfos. * * An annoying special case for parameterized scans is that the inner rel can - * be an appendrel child (an "other rel"). In this case we must generate - * appropriate clauses using child EC members. add_child_rel_equivalences + * be an appendrel child (an "other rel"). In this case we must generate + * appropriate clauses using child EC members. add_child_rel_equivalences * must already have been done for the child rel. * * The results are sufficient for use in merge, hash, and plain nestloop join @@ -1002,9 +1002,9 @@ generate_join_implied_equalities(PlannerInfo *root, if (ec->ec_broken) sublist = generate_join_implied_equalities_broken(root, ec, - nominal_join_relids, + nominal_join_relids, outer_relids, - nominal_inner_relids, + nominal_inner_relids, inner_appinfo); result = list_concat(result, sublist); @@ -1217,9 +1217,9 @@ generate_join_implied_equalities_broken(PlannerInfo *root, /* * If we have to translate, just brute-force apply adjust_appendrel_attrs * to all the RestrictInfos at once. This will result in returning - * RestrictInfos that are not listed in ec_derives, but there shouldn't - * be any duplication, and it's a sufficiently narrow corner case that - * we shouldn't sweat too much over it anyway. + * RestrictInfos that are not listed in ec_derives, but there shouldn't be + * any duplication, and it's a sufficiently narrow corner case that we + * shouldn't sweat too much over it anyway. */ if (inner_appinfo) result = (List *) adjust_appendrel_attrs(root, (Node *) result, @@ -1966,7 +1966,7 @@ mutate_eclass_expressions(PlannerInfo *root, * is a redundant list of clauses equating the index column to each of * the other-relation values it is known to be equal to. Any one of * these clauses can be used to create a parameterized indexscan, and there - * is no value in using more than one. (But it *is* worthwhile to create + * is no value in using more than one. (But it *is* worthwhile to create * a separate parameterized path for each one, since that leads to different * join orders.) */ @@ -2014,7 +2014,7 @@ generate_implied_equalities_for_indexcol(PlannerInfo *root, * the target relation. (Unlike regular members, the same expression * could be a child member of more than one EC. Therefore, it's * potentially order-dependent which EC a child relation's index - * column gets matched to. This is annoying but it only happens in + * column gets matched to. This is annoying but it only happens in * corner cases, so for now we live with just reporting the first * match. See also get_eclass_for_sort_expr.) */ diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index 05530054e1..2e8ccd0578 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -103,12 +103,12 @@ static List *build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel, List *clauses, List *other_clauses); static List *drop_indexable_join_clauses(RelOptInfo *rel, List *clauses); static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, - List *paths); + List *paths); static int path_usage_comparator(const void *a, const void *b); static Cost bitmap_scan_cost_est(PlannerInfo *root, RelOptInfo *rel, - Path *ipath); + Path *ipath); static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, - List *paths); + List *paths); static PathClauseUsage *classify_index_clause_usage(Path *path, List **clauselist); static Relids get_bitmap_tree_required_outer(Path *bitmapqual); @@ -117,15 +117,15 @@ static int find_list_position(Node *node, List **nodelist); static bool check_index_only(RelOptInfo *rel, IndexOptInfo *index); static double get_loop_count(PlannerInfo *root, Relids outer_relids); static void match_restriction_clauses_to_index(RelOptInfo *rel, - IndexOptInfo *index, - IndexClauseSet *clauseset); + IndexOptInfo *index, + IndexClauseSet *clauseset); static void match_join_clauses_to_index(PlannerInfo *root, RelOptInfo *rel, IndexOptInfo *index, IndexClauseSet *clauseset, List **joinorclauses); static void match_eclass_clauses_to_index(PlannerInfo *root, - IndexOptInfo *index, - IndexClauseSet *clauseset); + IndexOptInfo *index, + IndexClauseSet *clauseset); static void match_clauses_to_index(IndexOptInfo *index, List *clauses, IndexClauseSet *clauseset); @@ -237,7 +237,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) match_restriction_clauses_to_index(rel, index, &rclauseset); /* - * Build index paths from the restriction clauses. These will be + * Build index paths from the restriction clauses. These will be * non-parameterized paths. Plain paths go directly to add_path(), * bitmap paths are added to bitindexpaths to be handled below. */ @@ -245,25 +245,25 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) &bitindexpaths); /* - * Identify the join clauses that can match the index. For the moment - * we keep them separate from the restriction clauses. Note that - * this finds only "loose" join clauses that have not been merged - * into EquivalenceClasses. Also, collect join OR clauses for later. + * Identify the join clauses that can match the index. For the moment + * we keep them separate from the restriction clauses. Note that this + * finds only "loose" join clauses that have not been merged into + * EquivalenceClasses. Also, collect join OR clauses for later. */ MemSet(&jclauseset, 0, sizeof(jclauseset)); match_join_clauses_to_index(root, rel, index, &jclauseset, &joinorclauses); /* - * Look for EquivalenceClasses that can generate joinclauses - * matching the index. + * Look for EquivalenceClasses that can generate joinclauses matching + * the index. */ MemSet(&eclauseset, 0, sizeof(eclauseset)); match_eclass_clauses_to_index(root, index, &eclauseset); /* - * If we found any plain or eclass join clauses, decide what to - * do with 'em. + * If we found any plain or eclass join clauses, decide what to do + * with 'em. */ if (jclauseset.nonempty || eclauseset.nonempty) consider_index_join_clauses(root, rel, index, @@ -287,7 +287,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) * the joinclause list. Add these to bitjoinpaths. */ indexpaths = generate_bitmap_or_paths(root, rel, - joinorclauses, rel->baserestrictinfo, + joinorclauses, rel->baserestrictinfo, false); bitjoinpaths = list_concat(bitjoinpaths, indexpaths); @@ -313,7 +313,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) * the most promising combination of join bitmap index paths. Note there * will be only one such path no matter how many join clauses are * available. (XXX is that good enough, or do we need to consider even - * more paths for different subsets of possible join partners? Also, + * more paths for different subsets of possible join partners? Also, * should we add in restriction bitmap paths as well?) */ if (bitjoinpaths != NIL) @@ -366,19 +366,19 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel, * We can always include any restriction clauses in the index clauses. * However, it's not obvious which subsets of the join clauses are worth * generating paths from, and it's unlikely that considering every - * possible subset is worth the cycles. Our current heuristic is based - * on the index columns, with the idea that later index columns are less + * possible subset is worth the cycles. Our current heuristic is based on + * the index columns, with the idea that later index columns are less * useful than earlier ones; therefore it's unlikely to be worth trying * combinations that would remove a clause from an earlier index column - * while adding one to a later column. Also, we know that all the - * eclass clauses for a particular column are redundant, so we should - * use only one of them. However, eclass clauses will always represent - * equality which is the strongest type of index constraint, so those - * are high-value and we should try every available combination when we - * have eclass clauses for more than one column. Furthermore, it's - * unlikely to be useful to combine an eclass clause with non-eclass - * clauses for the same index column. These considerations lead to the - * following heuristics: + * while adding one to a later column. Also, we know that all the eclass + * clauses for a particular column are redundant, so we should use only + * one of them. However, eclass clauses will always represent equality + * which is the strongest type of index constraint, so those are + * high-value and we should try every available combination when we have + * eclass clauses for more than one column. Furthermore, it's unlikely to + * be useful to combine an eclass clause with non-eclass clauses for the + * same index column. These considerations lead to the following + * heuristics: * * First, start with the restriction clauses, and add on all simple join * clauses for column 1. If there are any such join clauses, generate @@ -387,7 +387,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel, * any other clauses we have for column 1. * * Next, add on all simple join clauses for column 2. If there are any - * such join clauses, generate paths with this collection. If there are + * such join clauses, generate paths with this collection. If there are * eclass clauses for columns 1 or 2, generate paths with each such clause * replacing other clauses for its index column, including cases where we * use restriction or simple join clauses for one column and an eclass @@ -519,7 +519,7 @@ expand_eclass_clause_combinations(PlannerInfo *root, |