diff options
author | Bruce Momjian | 2011-04-10 15:42:00 +0000 |
---|---|---|
committer | Bruce Momjian | 2011-04-10 15:42:00 +0000 |
commit | bf50caf105a901c4f83ac1df3cdaf910c26694a4 (patch) | |
tree | dac42d7795070f107eefb085c500f86a4d35f92f | |
parent | 9a8b73147c07e02e10e0d0a34aa99d72e3336fb2 (diff) |
pgindent run before PG 9.1 beta 1.
446 files changed, 5742 insertions, 5263 deletions
diff --git a/contrib/adminpack/adminpack.c b/contrib/adminpack/adminpack.c index c149dd6c63..99fa02e813 100644 --- a/contrib/adminpack/adminpack.c +++ b/contrib/adminpack/adminpack.c @@ -78,18 +78,19 @@ convert_and_check_filename(text *arg, bool logAllowed) /* Disallow '/a/b/data/..' */ if (path_contains_parent_reference(filename)) ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("reference to parent directory (\"..\") not allowed")))); + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("reference to parent directory (\"..\") not allowed")))); + /* - * Allow absolute paths if within DataDir or Log_directory, even - * though Log_directory might be outside DataDir. + * Allow absolute paths if within DataDir or Log_directory, even + * though Log_directory might be outside DataDir. */ if (!path_is_prefix_of_path(DataDir, filename) && (!logAllowed || !is_absolute_path(Log_directory) || !path_is_prefix_of_path(Log_directory, filename))) ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("absolute path not allowed")))); + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("absolute path not allowed")))); } else if (!path_is_relative_and_below_cwd(filename)) ereport(ERROR, diff --git a/contrib/auth_delay/auth_delay.c b/contrib/auth_delay/auth_delay.c index ca388c4498..4e0d5959d1 100644 --- a/contrib/auth_delay/auth_delay.c +++ b/contrib/auth_delay/auth_delay.c @@ -18,13 +18,13 @@ PG_MODULE_MAGIC; -void _PG_init(void); +void _PG_init(void); /* GUC Variables */ static int auth_delay_milliseconds; /* Original Hook */ -static ClientAuthentication_hook_type original_client_auth_hook = NULL; +static ClientAuthentication_hook_type original_client_auth_hook = NULL; /* * Check authentication @@ -55,7 +55,7 @@ _PG_init(void) { /* Define custom GUC variables */ DefineCustomIntVariable("auth_delay.milliseconds", - "Milliseconds to delay before reporting authentication failure", + "Milliseconds to delay before reporting authentication failure", NULL, &auth_delay_milliseconds, 0, diff --git a/contrib/btree_gist/btree_cash.c b/contrib/btree_gist/btree_cash.c index 7938a70f17..2664a26870 100644 --- a/contrib/btree_gist/btree_cash.c +++ b/contrib/btree_gist/btree_cash.c @@ -169,7 +169,7 @@ gbt_cash_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_date.c b/contrib/btree_gist/btree_date.c index ccd7e2ad3f..8a675e2f1d 100644 --- a/contrib/btree_gist/btree_date.c +++ b/contrib/btree_gist/btree_date.c @@ -90,9 +90,9 @@ static float8 gdb_date_dist(const void *a, const void *b) { /* we assume the difference can't overflow */ - Datum diff = DirectFunctionCall2(date_mi, + Datum diff = DirectFunctionCall2(date_mi, DateADTGetDatum(*((const DateADT *) a)), - DateADTGetDatum(*((const DateADT *) b))); + DateADTGetDatum(*((const DateADT *) b))); return (float8) Abs(DatumGetInt32(diff)); } @@ -113,14 +113,14 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(date_dist); -Datum date_dist(PG_FUNCTION_ARGS); +Datum date_dist(PG_FUNCTION_ARGS); Datum date_dist(PG_FUNCTION_ARGS) { /* we assume the difference can't overflow */ - Datum diff = DirectFunctionCall2(date_mi, - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1)); + Datum diff = DirectFunctionCall2(date_mi, + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1)); PG_RETURN_INT32(Abs(DatumGetInt32(diff))); } @@ -181,7 +181,7 @@ gbt_date_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_float4.c b/contrib/btree_gist/btree_float4.c index 932a941f88..266256b23c 100644 --- a/contrib/btree_gist/btree_float4.c +++ b/contrib/btree_gist/btree_float4.c @@ -94,18 +94,18 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(float4_dist); -Datum float4_dist(PG_FUNCTION_ARGS); +Datum float4_dist(PG_FUNCTION_ARGS); Datum float4_dist(PG_FUNCTION_ARGS) { - float4 a = PG_GETARG_FLOAT4(0); + float4 a = PG_GETARG_FLOAT4(0); float4 b = PG_GETARG_FLOAT4(1); float4 r; r = a - b; CHECKFLOATVAL(r, isinf(a) || isinf(b), true); - PG_RETURN_FLOAT4( Abs(r) ); + PG_RETURN_FLOAT4(Abs(r)); } @@ -162,7 +162,7 @@ gbt_float4_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_float8.c b/contrib/btree_gist/btree_float8.c index 0c39980ba1..efbee0f3e4 100644 --- a/contrib/btree_gist/btree_float8.c +++ b/contrib/btree_gist/btree_float8.c @@ -76,8 +76,8 @@ gbt_float8key_cmp(const void *a, const void *b) static float8 gbt_float8_dist(const void *a, const void *b) { - float8 arg1 = *(const float8 *)a; - float8 arg2 = *(const float8 *)b; + float8 arg1 = *(const float8 *) a; + float8 arg2 = *(const float8 *) b; float8 r; r = arg1 - arg2; @@ -102,7 +102,7 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(float8_dist); -Datum float8_dist(PG_FUNCTION_ARGS); +Datum float8_dist(PG_FUNCTION_ARGS); Datum float8_dist(PG_FUNCTION_ARGS) { @@ -113,7 +113,7 @@ float8_dist(PG_FUNCTION_ARGS) r = a - b; CHECKFLOATVAL(r, isinf(a) || isinf(b), true); - PG_RETURN_FLOAT8( Abs(r) ); + PG_RETURN_FLOAT8(Abs(r)); } /************************************************** @@ -169,7 +169,7 @@ gbt_float8_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_int2.c b/contrib/btree_gist/btree_int2.c index c06d170a5e..7841145b53 100644 --- a/contrib/btree_gist/btree_int2.c +++ b/contrib/btree_gist/btree_int2.c @@ -94,12 +94,12 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(int2_dist); -Datum int2_dist(PG_FUNCTION_ARGS); +Datum int2_dist(PG_FUNCTION_ARGS); Datum int2_dist(PG_FUNCTION_ARGS) { - int2 a = PG_GETARG_INT16(0); - int2 b = PG_GETARG_INT16(1); + int2 a = PG_GETARG_INT16(0); + int2 b = PG_GETARG_INT16(1); int2 r; int2 ra; @@ -169,7 +169,7 @@ gbt_int2_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_int4.c b/contrib/btree_gist/btree_int4.c index ef7af524e7..0e4b4f85b0 100644 --- a/contrib/btree_gist/btree_int4.c +++ b/contrib/btree_gist/btree_int4.c @@ -95,14 +95,14 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(int4_dist); -Datum int4_dist(PG_FUNCTION_ARGS); +Datum int4_dist(PG_FUNCTION_ARGS); Datum int4_dist(PG_FUNCTION_ARGS) { - int4 a = PG_GETARG_INT32(0); - int4 b = PG_GETARG_INT32(1); - int4 r; - int4 ra; + int4 a = PG_GETARG_INT32(0); + int4 b = PG_GETARG_INT32(1); + int4 r; + int4 ra; r = a - b; ra = Abs(r); @@ -111,7 +111,7 @@ int4_dist(PG_FUNCTION_ARGS) if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a))) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("integer out of range"))); + errmsg("integer out of range"))); PG_RETURN_INT32(ra); } @@ -170,7 +170,7 @@ gbt_int4_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_int8.c b/contrib/btree_gist/btree_int8.c index 1f14d82891..d54113d393 100644 --- a/contrib/btree_gist/btree_int8.c +++ b/contrib/btree_gist/btree_int8.c @@ -95,14 +95,14 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(int8_dist); -Datum int8_dist(PG_FUNCTION_ARGS); +Datum int8_dist(PG_FUNCTION_ARGS); Datum int8_dist(PG_FUNCTION_ARGS) { - int64 a = PG_GETARG_INT64(0); - int64 b = PG_GETARG_INT64(1); - int64 r; - int64 ra; + int64 a = PG_GETARG_INT64(0); + int64 b = PG_GETARG_INT64(1); + int64 r; + int64 ra; r = a - b; ra = Abs(r); @@ -111,7 +111,7 @@ int8_dist(PG_FUNCTION_ARGS) if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a))) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); + errmsg("bigint out of range"))); PG_RETURN_INT64(ra); } @@ -170,7 +170,7 @@ gbt_int8_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c index 5195284afa..137a5fcd7f 100644 --- a/contrib/btree_gist/btree_interval.c +++ b/contrib/btree_gist/btree_interval.c @@ -88,7 +88,7 @@ intr2num(const Interval *i) static float8 gbt_intv_dist(const void *a, const void *b) { - return (float8)Abs(intr2num((Interval*)a) - intr2num((Interval*)b)); + return (float8) Abs(intr2num((Interval *) a) - intr2num((Interval *) b)); } /* @@ -127,7 +127,7 @@ abs_interval(Interval *a) } PG_FUNCTION_INFO_V1(interval_dist); -Datum interval_dist(PG_FUNCTION_ARGS); +Datum interval_dist(PG_FUNCTION_ARGS); Datum interval_dist(PG_FUNCTION_ARGS) { @@ -240,7 +240,7 @@ gbt_intv_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_oid.c b/contrib/btree_gist/btree_oid.c index c81dd31799..3b0929b42b 100644 --- a/contrib/btree_gist/btree_oid.c +++ b/contrib/btree_gist/btree_oid.c @@ -101,13 +101,13 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(oid_dist); -Datum oid_dist(PG_FUNCTION_ARGS); +Datum oid_dist(PG_FUNCTION_ARGS); Datum oid_dist(PG_FUNCTION_ARGS) { - Oid a = PG_GETARG_OID(0); - Oid b = PG_GETARG_OID(1); - Oid res; + Oid a = PG_GETARG_OID(0); + Oid b = PG_GETARG_OID(1); + Oid res; if (a < b) res = b - a; @@ -170,7 +170,7 @@ gbt_oid_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_time.c b/contrib/btree_gist/btree_time.c index 44f6923409..e9cfe33f45 100644 --- a/contrib/btree_gist/btree_time.c +++ b/contrib/btree_gist/btree_time.c @@ -119,7 +119,7 @@ gbt_time_dist(const void *a, const void *b) { const TimeADT *aa = (const TimeADT *) a; const TimeADT *bb = (const TimeADT *) b; - Interval *i; + Interval *i; i = DatumGetIntervalP(DirectFunctionCall2(time_mi_time, TimeADTGetDatumFast(*aa), @@ -143,7 +143,7 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(time_dist); -Datum time_dist(PG_FUNCTION_ARGS); +Datum time_dist(PG_FUNCTION_ARGS); Datum time_dist(PG_FUNCTION_ARGS) { @@ -239,7 +239,7 @@ gbt_time_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c index 9a0ec07a1e..9d3a5919a0 100644 --- a/contrib/btree_gist/btree_ts.c +++ b/contrib/btree_gist/btree_ts.c @@ -120,7 +120,7 @@ gbt_ts_dist(const void *a, const void *b) { const Timestamp *aa = (const Timestamp *) a; const Timestamp *bb = (const Timestamp *) b; - Interval *i; + Interval *i; if (TIMESTAMP_NOT_FINITE(*aa) || TIMESTAMP_NOT_FINITE(*bb)) return get_float8_infinity(); @@ -147,17 +147,17 @@ static const gbtree_ninfo tinfo = PG_FUNCTION_INFO_V1(ts_dist); -Datum ts_dist(PG_FUNCTION_ARGS); +Datum ts_dist(PG_FUNCTION_ARGS); Datum ts_dist(PG_FUNCTION_ARGS) { Timestamp a = PG_GETARG_TIMESTAMP(0); Timestamp b = PG_GETARG_TIMESTAMP(1); - Interval *r; + Interval *r; if (TIMESTAMP_NOT_FINITE(a) || TIMESTAMP_NOT_FINITE(b)) { - Interval *p = palloc(sizeof(Interval)); + Interval *p = palloc(sizeof(Interval)); p->day = INT_MAX; p->month = INT_MAX; @@ -169,25 +169,24 @@ ts_dist(PG_FUNCTION_ARGS) PG_RETURN_INTERVAL_P(p); } else - - r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi, - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1))); - PG_RETURN_INTERVAL_P( abs_interval(r) ); + r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi, + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1))); + PG_RETURN_INTERVAL_P(abs_interval(r)); } PG_FUNCTION_INFO_V1(tstz_dist); -Datum tstz_dist(PG_FUNCTION_ARGS); +Datum tstz_dist(PG_FUNCTION_ARGS); Datum tstz_dist(PG_FUNCTION_ARGS) { - TimestampTz a = PG_GETARG_TIMESTAMPTZ(0); - TimestampTz b = PG_GETARG_TIMESTAMPTZ(1); - Interval *r; + TimestampTz a = PG_GETARG_TIMESTAMPTZ(0); + TimestampTz b = PG_GETARG_TIMESTAMPTZ(1); + Interval *r; if (TIMESTAMP_NOT_FINITE(a) || TIMESTAMP_NOT_FINITE(b)) { - Interval *p = palloc(sizeof(Interval)); + Interval *p = palloc(sizeof(Interval)); p->day = INT_MAX; p->month = INT_MAX; @@ -202,7 +201,7 @@ tstz_dist(PG_FUNCTION_ARGS) r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1))); - PG_RETURN_INTERVAL_P( abs_interval(r) ); + PG_RETURN_INTERVAL_P(abs_interval(r)); } @@ -309,7 +308,7 @@ gbt_ts_distance(PG_FUNCTION_ARGS) key.upper = (GBT_NUMKEY *) &kkk->upper; PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo) ); } @@ -354,7 +353,7 @@ gbt_tstz_distance(PG_FUNCTION_ARGS) qqq = tstz_to_ts_gmt(query); PG_RETURN_FLOAT8( - gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo) + gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo) ); } diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c index 17440a191b..64c95854df 100644 --- a/contrib/btree_gist/btree_utils_num.c +++ b/contrib/btree_gist/btree_utils_num.c @@ -223,8 +223,8 @@ gbt_num_consistent(const GBT_NUMKEY_R *key, retval = (*tinfo->f_le) (query, key->upper); break; case BtreeGistNotEqualStrategyNumber: - retval = (! ((*tinfo->f_eq) (query, key->lower) && - (*tinfo->f_eq) (query, key->upper))) ? true : false; + retval = (!((*tinfo->f_eq) (query, key->lower) && + (*tinfo->f_eq) (query, key->upper))) ? true : false; break; default: retval = false; @@ -249,9 +249,9 @@ gbt_num_distance(const GBT_NUMKEY_R *key, if (tinfo->f_dist == NULL) elog(ERROR, "KNN search is not supported for btree_gist type %d", (int) tinfo->t); - if ( tinfo->f_le(query, key->lower) ) + if (tinfo->f_le(query, key->lower)) retval = tinfo->f_dist(query, key->lower); - else if ( tinfo->f_ge(query, key->upper) ) + else if (tinfo->f_ge(query, key->upper)) retval = tinfo->f_dist(query, key->upper); else retval = 0.0; diff --git a/contrib/btree_gist/btree_utils_num.h b/contrib/btree_gist/btree_utils_num.h index 243d3b5cb9..8935ed6630 100644 --- a/contrib/btree_gist/btree_utils_num.h +++ b/contrib/btree_gist/btree_utils_num.h @@ -46,7 +46,7 @@ typedef struct bool (*f_le) (const void *, const void *); /* less or equal */ bool (*f_lt) (const void *, const void *); /* less than */ int (*f_cmp) (const void *, const void *); /* key compare function */ - float8 (*f_dist) (const void *, const void *); /* key distance function */ + float8 (*f_dist) (const void *, const void *); /* key distance function */ } gbtree_ninfo; @@ -94,7 +94,7 @@ typedef struct #define GET_FLOAT_DISTANCE(t, arg1, arg2) Abs( ((float8) *((const t *) (arg1))) - ((float8) *((const t *) (arg2))) ) -#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0)) +#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0)) /* * check to see if a float4/8 val has underflowed or overflowed @@ -121,7 +121,7 @@ extern bool gbt_num_consistent(const GBT_NUMKEY_R *key, const void *query, const gbtree_ninfo *tinfo); extern float8 gbt_num_distance(const GBT_NUMKEY_R *key, const void *query, - bool is_leaf, const gbtree_ninfo *tinfo); + bool is_leaf, const gbtree_ninfo *tinfo); extern GIST_SPLITVEC *gbt_num_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v, const gbtree_ninfo *tinfo); diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c index 8f3173e499..d74013af88 100644 --- a/contrib/btree_gist/btree_utils_var.c +++ b/contrib/btree_gist/btree_utils_var.c @@ -598,7 +598,7 @@ gbt_var_consistent( || gbt_var_node_pf_match(key, query, tinfo); break; case BtreeGistNotEqualStrategyNumber: - retval = ! ((*tinfo->f_eq) (query, key->lower) && (*tinfo->f_eq) (query, key->upper)); + retval = !((*tinfo->f_eq) (query, key->lower) && (*tinfo->f_eq) (query, key->upper)); break; default: retval = FALSE; diff --git a/contrib/dummy_seclabel/dummy_seclabel.c b/contrib/dummy_seclabel/dummy_seclabel.c index 974806f1b6..5deb43fa9b 100644 --- a/contrib/dummy_seclabel/dummy_seclabel.c +++ b/contrib/dummy_seclabel/dummy_seclabel.c @@ -18,7 +18,7 @@ PG_MODULE_MAGIC; /* Entrypoint of the module */ -void _PG_init(void); +void _PG_init(void); static void dummy_object_relabel(const ObjectAddress *object, const char *seclabel) diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index 6a84a00e8d..466c015107 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -45,17 +45,17 @@ struct FileFdwOption */ static struct FileFdwOption valid_options[] = { /* File options */ - { "filename", ForeignTableRelationId }, + {"filename", ForeignTableRelationId}, /* Format options */ /* oids option is not supported */ - { "format", ForeignTableRelationId }, - { "header", ForeignTableRelationId }, - { "delimiter", ForeignTableRelationId }, - { "quote", ForeignTableRelationId }, - { "escape", ForeignTableRelationId }, - { "null", ForeignTableRelationId }, - { "encoding", ForeignTableRelationId }, + {"format", ForeignTableRelationId}, + {"header", ForeignTableRelationId}, + {"delimiter", ForeignTableRelationId}, + {"quote", ForeignTableRelationId}, + {"escape", ForeignTableRelationId}, + {"null", ForeignTableRelationId}, + {"encoding", ForeignTableRelationId}, /* * force_quote is not supported by file_fdw because it's for COPY TO. @@ -68,7 +68,7 @@ static struct FileFdwOption valid_options[] = { */ /* Sentinel */ - { NULL, InvalidOid } + {NULL, InvalidOid} }; /* @@ -76,9 +76,9 @@ static struct FileFdwOption valid_options[] = { */ typedef struct FileFdwExecutionState { - char *filename; /* file to read */ - List *options; /* merged COPY options, excluding filename */ - CopyState cstate; /* state of reading file */ + char *filename; /* file to read */ + List *options; /* merged COPY options, excluding filename */ + CopyState cstate; /* state of reading file */ } FileFdwExecutionState; /* @@ -94,8 +94,8 @@ PG_FUNCTION_INFO_V1(file_fdw_validator); * FDW callback routines */ static FdwPlan *filePlanForeignScan(Oid foreigntableid, - PlannerInfo *root, - RelOptInfo *baserel); + PlannerInfo *root, + RelOptInfo *baserel); static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es); static void fileBeginForeignScan(ForeignScanState *node, int eflags); static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node); @@ -109,8 +109,8 @@ static bool is_valid_option(const char *option, Oid context); static void fileGetOptions(Oid foreigntableid, char **filename, List **other_options); static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel, - const char *filename, - Cost *startup_cost, Cost *total_cost); + const char *filename, + Cost *startup_cost, Cost *total_cost); /* @@ -149,16 +149,16 @@ file_fdw_validator(PG_FUNCTION_ARGS) /* * Only superusers are allowed to set options of a file_fdw foreign table. - * This is because the filename is one of those options, and we don't - * want non-superusers to be able to determine which file gets read. + * This is because the filename is one of those options, and we don't want + * non-superusers to be able to determine which file gets read. * * Putting this sort of permissions check in a validator is a bit of a * crock, but there doesn't seem to be any other place that can enforce * the check more cleanly. * - * Note that the valid_options[] array disallows setting filename at - * any options level other than foreign table --- otherwise there'd - * still be a security hole. + * Note that the valid_options[] array disallows setting filename at any + * options level other than foreign table --- otherwise there'd still be a + * security hole. */ if (catalog == ForeignTableRelationId && !superuser()) ereport(ERROR, @@ -171,7 +171,7 @@ file_fdw_validator(PG_FUNCTION_ARGS) */ foreach(cell, options_list) { - DefElem *def = (DefElem *) lfirst(cell); + DefElem *def = (DefElem *) lfirst(cell); if (!is_valid_option(def->defname, catalog)) { @@ -276,7 +276,7 @@ fileGetOptions(Oid foreigntableid, prev = NULL; foreach(lc, options) { - DefElem *def = (DefElem *) lfirst(lc); + DefElem *def = (DefElem *) lfirst(lc); if (strcmp(def->defname, "filename") == 0) { @@ -302,7 +302,7 @@ filePlanForeignScan(Oid foreigntableid, PlannerInfo *root, RelOptInfo *baserel) { - FdwPlan *fdwplan; + FdwPlan *fdwplan; char *filename; List *options; @@ -313,7 +313,7 @@ filePlanForeignScan(Oid foreigntableid, fdwplan = makeNode(FdwPlan); estimate_costs(root, baserel, filename, &fdwplan->startup_cost, &fdwplan->total_cost); - fdwplan->fdw_private = NIL; /* not used */ + fdwplan->fdw_private = NIL; /* not used */ return fdwplan; } @@ -337,7 +337,7 @@ fileExplainForeignScan(ForeignScanState *node, ExplainState *es) /* Suppress file size if we're not showing cost details */ if (es->costs) { - struct stat stat_buf; + struct stat stat_buf; if (stat(filename, &stat_buf) == 0) ExplainPropertyLong("Foreign File Size", (long) stat_buf.st_size, @@ -368,8 +368,8 @@ fileBeginForeignScan(ForeignScanState *node, int eflags) &filename, &options); /* - * Create CopyState from FDW options. We always acquire all columns, - * so as to match the expected ScanTupleSlot signature. + * Create CopyState from FDW options. We always acquire all columns, so + * as to match the expected ScanTupleSlot signature. */ cstate = BeginCopyFrom(node->ss.ss_currentRelation, filename, @@ -398,7 +398,7 @@ fileIterateForeignScan(ForeignScanState *node) { FileFdwExecutionState *festate = (FileFdwExecutionState *) node->fdw_state; TupleTableSlot *slot = node->ss.ss_ScanTupleSlot; - bool found; + bool found; ErrorContextCallback errcontext; /* Set up callback to identify error line number. */ @@ -410,8 +410,8 @@ fileIterateForeignScan(ForeignScanState *node) /* * The protocol for loading a virtual tuple into a slot is first * ExecClearTuple, then fill the values/isnull arrays, then - * ExecStoreVirtualTuple. If we don't find another row in the file, - * we just skip the last step, leaving the slot empty as required. + * ExecStoreVirtualTuple. If we don't find another row in the file, we + * just skip the last step, leaving the slot empty as required. * * We can pass ExprContext = NULL because we read all columns from the * file, so no need to evaluate default expressions. @@ -471,17 +471,17 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel, const char *filename, Cost *startup_cost, Cost *total_cost) { - struct stat stat_buf; - BlockNumber pages; - int tuple_width; - double ntuples; - double nrows; - Cost run_cost = 0; - Cost cpu_per_tuple; + struct stat stat_buf; + BlockNumber pages; + int tuple_width; + double ntuples; + double nrows; + Cost run_cost = 0; + Cost cpu_per_tuple; /* - * Get size of the file. It might not be there at plan time, though, - * in which case we have to use a default estimate. + * Get size of the file. It might not be there at plan time, though, in + * which case we have to use a default estimate. */ if (stat(filename, &stat_buf) < 0) stat_buf.st_size = 10 * BLCKSZ; @@ -489,7 +489,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel, /* * Convert size to pages for use in I/O cost estimate below. */ - pages = (stat_buf.st_size + (BLCKSZ-1)) / BLCKSZ; + pages = (stat_buf.st_size + (BLCKSZ - 1)) / BLCKSZ; if (pages < 1) pages = 1; @@ -505,10 +505,9 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel, ntuples = clamp_row_est((double) stat_buf.st_size / (double) tuple_width); /* - * Now estimate the number of rows returned by the scan after applying - * the baserestrictinfo quals. This is pretty bogus too, since the - * planner will have no stats about the relation, but it's better than - * nothing. + * Now estimate the number of rows returned by the scan after applying the + * baserestrictinfo quals. This is pretty bogus too, since the planner + * will have no stats about the relation, but it's better than nothing. */ nrows = ntuples * clauselist_selectivity(root, @@ -523,7 +522,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel, baserel->rows = nrows; /* - * Now estimate costs. We estimate costs almost the same way as + * Now estimate costs. We estimate costs almost the same way as * cost_seqscan(), thus assuming that I/O costs are equivalent to a * regular table file of the same size. However, we take per-tuple CPU * costs as 10x of a seqscan, to account for the cost of parsing records. diff --git a/contrib/fuzzystrmatch/levenshtein.c b/contrib/fuzzystrmatch/levenshtein.c index 3d85d4175f..a84c46a4a4 100644 --- a/contrib/fuzzystrmatch/levenshtein.c +++ b/contrib/fuzzystrmatch/levenshtein.c @@ -23,7 +23,7 @@ */ #ifdef LEVENSHTEIN_LESS_EQUAL static int levenshtein_less_equal_internal(text *s, text *t, - int ins_c, int del_c, int sub_c, int max_d); + int ins_c, int del_c, int sub_c, int max_d); #else static int levenshtein_internal(text *s, text *t, int ins_c, int del_c, int sub_c); @@ -50,7 +50,7 @@ static int levenshtein_internal(text *s, text *t, * array. * * If max_d >= 0, we only need to provide an accurate answer when that answer - * is less than or equal to the bound. From any cell in the matrix, there is + * is less than or equal to the bound. From any cell in the matrix, there is * theoretical "minimum residual distance" from that cell to the last column * of the final row. This minimum residual distance is zero when the * untransformed portions of the strings are of equal length (because we might @@ -87,11 +87,13 @@ levenshtein_internal(text *s, text *t, /* * For levenshtein_less_equal_internal, we have real variables called - * start_column and stop_column; otherwise it's just short-hand for 0 - * and m. + * start_column and stop_column; otherwise it's just short-hand for 0 and + * m. */ #ifdef LEVENSHTEIN_LESS_EQUAL - int start_column, stop_column; + int start_column, + stop_column; + #undef START_COLUMN #undef STOP_COLUMN #define START_COLUMN start_column @@ -139,16 +141,16 @@ levenshtein_internal(text *s, text *t, stop_column = m + 1; /* - * If max_d >= 0, determine whether the bound is impossibly tight. If so, + * If max_d >= 0, determine whether the bound is impossibly tight. If so, * return max_d + 1 immediately. Otherwise, determine whether it's tight * enough to limit the computation we must perform. If so, figure out * initial stop column. */ if (max_d >= 0) { - int min_theo_d; /* Theoretical minimum distance. */ - int max_theo_d; /* Theoretical maximum distance. */ - int net_inserts = n - m; + int min_theo_d; /* Theoretical minimum distance. */ + int max_theo_d; /* Theoretical maximum distance. */ + int net_inserts = n - m; min_theo_d = net_inserts < 0 ? -net_inserts * del_c : net_inserts * ins_c; @@ -162,20 +164,20 @@ levenshtein_internal(text *s, text *t, else if (ins_c + del_c > 0) { /* - * Figure out how much of the first row of the notional matrix - * we need to fill in. If the string is growing, the theoretical + * Figure out how much of the first row of the notional matrix we + * need to fill in. If the string is growing, the theoretical * minimum distance already incorporates the cost of deleting the - * number of characters necessary to make the two strings equal - * in length. Each additional deletion forces another insertion, - * so the best-case total cost increases by ins_c + del_c. - * If the string is shrinking, the minimum theoretical cost - * assumes no excess deletions; that is, we're starting no futher - * right than column n - m. If we do start further right, the - * best-case total cost increases by ins_c + del_c for each move - * right. + * number of characters necessary to make the two strings equal in + * length. Each additional deletion forces another insertion, so + * the best-case total cost increases by ins_c + del_c. If the + * string is shrinking, the minimum theoretical cost assumes no + * excess deletions; that is, we're starting no futher right than + * column n - m. If we do start further right, the best-case + * total cost increases by ins_c + del_c for each move right. */ - int slack_d = max_d - min_theo_d; - int best_column = net_inserts < 0 ? -net_inserts : 0; + int slack_d = max_d - min_theo_d; + int best_column = net_inserts < 0 ? -net_inserts : 0; + stop_column = best_column + (slack_d / (ins_c + del_c)) + 1; if (stop_column > m) stop_column = m + 1; @@ -185,15 +187,15 @@ levenshtein_internal(text *s, text *t, /* * In order to avoid calling pg_mblen() repeatedly on each character in s, - * we cache all the lengths before starting the main loop -- but if all the - * characters in both strings are single byte, then we skip this and use - * a fast-path in the main loop. If only one string contains multi-byte - * characters, we still build the array, so that the fast-path needn't - * deal with the case where the array hasn't been initialized. + * we cache all the lengths before starting the main loop -- but if all + * the characters in both strings are single byte, then we skip this and + * use a fast-path in the main loop. If only one string contains + * multi-byte characters, we still build the array, so that the fast-path + * needn't deal with the case where the array hasn't been initialized. */ if (m != s_bytes || n != t_bytes) { - int i; + int i; const char *cp = s_data; s_char_len = (int *) palloc((m + 1) * sizeof(int)); @@ -214,8 +216,8 @@ levenshtein_internal(text *s, text *t, curr = prev + m; /* - * To transform the first i characters of s into the first 0 characters - * of t, we must perform i deletions. + * To transform the first i characters of s into the first 0 characters of + * t, we must perform i deletions. */ for (i = START_COLUMN; i < STOP_COLUMN; i++) prev[i] = i * del_c; @@ -228,6 +230,7 @@ levenshtein_internal(text *s, text *t, int y_char_len = n != t_bytes + 1 ? pg_mblen(y) : 1; #ifdef LEVENSHTEIN_LESS_EQUAL + /* * In the best case, values percolate down the diagonal unchanged, so * we must increment stop_column unless it's already on the right end @@ -241,10 +244,10 @@ levenshtein_internal(text *s, text *t, } /* - * The main loop fills in curr, but curr[0] needs a special case: - * to transform the first 0 characters of s into the first j - * characters of t, we must perform j insertions. However, if - * start_column > 0, this special case does not apply. + * The main loop fills in curr, but curr[0] needs a special case: to + * transform the first 0 characters of s into the first j characters + * of t, we must perform j insertions. However, if start_column > 0, + * this special case does not apply. */ if (start_column == 0) { @@ -285,7 +288,7 @@ levenshtein_internal(text *s, text *t, */ ins = prev[i] + ins_c; del = curr[i - 1] + del_c; - if (x[x_char_len-1] == y[y_char_len-1] + if (x[x_char_len - 1] == y[y_char_len - 1] && x_char_len == y_char_len && (x_char_len == 1 || rest_of_char_same(x, y, x_char_len))) sub = prev[i - 1]; @@ -331,6 +334,7 @@ levenshtein_internal(text *s, text *t, y += y_char_len; #ifdef LEVENSHTEIN_LESS_EQUAL + /* * This chunk of code represents a significant performance hit if used * in the case where there is no max_d bound. This is probably not @@ -348,15 +352,16 @@ levenshtein_internal(text *s, text *t, * string, so we want to find the value for zp where where (n - 1) * - j = (m - 1) - zp. */ - int zp = j - (n - m); + int zp = j - (n - m); /* Check whether the stop column can slide left. */ while (stop_column > 0) { - int ii = stop_column - 1; - int net_inserts = ii - zp; + int ii = stop_column - 1; + int net_inserts = ii - zp; + if (prev[ii] + (net_inserts > 0 ? net_inserts * ins_c : - -net_inserts * del_c) <= max_d) + -net_inserts * del_c) <= max_d) break; stop_column--; } @@ -364,14 +369,16 @@ levenshtein_internal(text *s, text *t, /* Check whether the start column can slide right. */ while (start_column < stop_column) { - int net_inserts = start_column - zp; + int net_inserts = start_column - zp; + if (prev[start_column] + (net_inserts > 0 ? net_inserts * ins_c : - -net_inserts * del_c) <= max_d) + -net_inserts * del_c) <= max_d) break; + /* - * We'll never again update these values, so we must make - * sure there's nothing here that could confuse any future + * We'll never again update these values, so we must make sure + * there's nothing here that could confuse any future * iteration of the outer loop. */ prev[start_column] = max_d + 1; diff --git a/contrib/hstore/hstore_gin.c b/contrib/hstore/hstore_gin.c index d55674c79f..2007801cf0 100644 --- a/contrib/hstore/hstore_gin.c +++ b/contrib/hstore/hstore_gin.c @@ -13,7 +13,7 @@ /* * When using a GIN index for hstore, we choose to index both keys and values. * The storage format is "text" values, with K, V, or N prepended to the string - * to indicate key, value, or null values. (As of 9.1 it might be better to + * to indicate key, value, or null values. (As of 9.1 it might be better to * store null values as nulls, but we'll keep it this way for on-disk * compatibility.) */ @@ -168,7 +168,7 @@ gin_consistent_hstore(PG_FUNCTION_ARGS) { /* * Index doesn't have information about correspondence of keys and - * values, so we need recheck. However, if not all the keys are + * values, so we need recheck. However, if not all the keys are * present, we can fail at once. */ *recheck = true; diff --git a/contrib/hstore/hstore_op.c b/contrib/hstore/hstore_op.c index cb6200ab1d..5b278c14ff 100644 --- a/contrib/hstore/hstore_op.c +++ b/contrib/hstore/hstore_op.c @@ -437,7 +437,7 @@ hstore_delete_hstore(PG_FUNCTION_ARGS) if (snullval != HS_VALISNULL(es2, j) || (!snullval && (svallen != HS_VALLEN(es2, j) - || memcmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0))) + || memcmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0))) { HS_COPYITEM(ed, bufd, pd, HS_KEY(es, ps, i), HS_KEYLEN(es, i), @@ -1000,7 +1000,7 @@ hstore_contains(PG_FUNCTION_ARGS) if (nullval != HS_VALISNULL(ve, idx) || (!nullval && (vallen != HS_VALLEN(ve, idx) - || memcmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen)))) + || memcmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen)))) res = false; } else diff --git a/contrib/intarray/_int_bool.c b/contrib/intarray/_int_bool.c index 072e8cc897..4e63f6d66c 100644 --- a/contrib/intarray/_int_bool.c +++ b/contrib/intarray/_int_bool.c @@ -98,7 +98,7 @@ gettoken(WORKSTATE *state, int4 *val) } else { - long lval; + long lval; nnn[innn] = '\0'; errno = 0; @@ -355,8 +355,8 @@ gin_bool_consistent(QUERYTYPE *query, bool *check) return FALSE; /* - * Set up data for checkcondition_gin. This must agree with the - * query extraction code in ginint4_queryextract. + * Set up data for checkcondition_gin. This must agree with the query + * extraction code in ginint4_queryextract. */ gcv.first = items; gcv.mapped_check = (bool *) palloc(sizeof(bool) * query->size); diff --git a/contrib/intarray/_int_gin.c b/contrib/intarray/_int_gin.c index 3ef5c4635a..9abe54e55f 100644 --- a/contrib/intarray/_int_gin.c +++ b/contrib/intarray/_int_gin.c @@ -34,8 +34,8 @@ ginint4_queryextract(PG_FUNCTION_ARGS) /* * If the query doesn't have any required primitive values (for - * instance, it's something like '! 42'), we have to do a full - * index scan. + * instance, it's something like '! 42'), we have to do a full index + * scan. */ if (query_has_required_values(query)) *searchMode = GIN_SEARCH_MODE_DEFAULT; @@ -95,7 +95,7 @@ ginint4_queryextract(PG_FUNCTION_ARGS) case RTOldContainsStrategyNumber: if (*nentries > 0) *searchMode = GIN_SEARCH_MODE_DEFAULT; - else /* everything contains the empty set */ + else /* everything contains the empty set */ *searchMode = GIN_SEARCH_MODE_ALL; break; default: @@ -116,6 +116,7 @@ ginint4_consistent(PG_FUNCTION_ARGS) bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); bool res = FALSE; diff --git a/contrib/intarray/_int_tool.c b/contrib/intarray/_int_tool.c index ddf07f042b..bfc55501db 100644 --- a/contrib/intarray/_int_tool.c +++ b/contrib/intarray/_int_tool.c @@ -183,7 +183,7 @@ rt__int_size(ArrayType *a, float *size) *size = (float) ARRNELEMS(a); } -/* Sort the given data (len >= 2). Return true if any duplicates found */ +/* Sort the given data (len >= 2). Return true if any duplicates found */ bool isort(int4 *a, int len) { @@ -195,7 +195,7 @@ isort(int4 *a, int len) bool r = FALSE; /* - * We use a simple insertion sort. While this is O(N^2) in the worst + * We use a simple insertion sort. While this is O(N^2) in the worst * case, it's quite fast if the input is already sorted or nearly so. * Also, for not-too-large inputs it's faster than more complex methods * anyhow. diff --git a/contrib/isn/ISBN.h b/contrib/isn/ISBN.h index c0301ced1e..dbda6fb724 100644 --- a/contrib/isn/ISBN.h +++ b/contrib/isn/ISBN.h @@ -988,4 +988,3 @@ const char *ISBN_range_new[][2] = { {"10-976000", "10-999999"}, {NULL, NULL}, }; - diff --git a/contrib/pg_archivecleanup/pg_archivecleanup.c b/contrib/pg_archivecleanup/pg_archivecleanup.c index 79892077c8..d96eef2c5a 100644 --- a/contrib/pg_archivecleanup/pg_archivecleanup.c +++ b/contrib/pg_archivecleanup/pg_archivecleanup.c @@ -25,9 +25,9 @@ #ifdef HAVE_GETOPT_H #include <getopt.h> #endif -#else /* WIN32 */ +#else /* WIN32 */ extern int getopt(int argc, char *const argv[], const char *optstring); -#endif /* ! WIN32 */ +#endif /* ! WIN32 */ extern char *optarg; extern int optind; diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 87cf8c55cf..0236b87498 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -137,7 +137,7 @@ typedef enum PGSS_TRACK_NONE, /* track no statements */ PGSS_TRACK_TOP, /* only top level statements */ PGSS_TRACK_ALL /* all statements, including nested ones */ -} PGSSTrackLevel; +} PGSSTrackLevel; static const struct config_enum_entry track_options[] = { diff --git a/contrib/pg_test_fsync/pg_test_fsync.c b/contrib/pg_test_fsync/pg_test_fsync.c index 49a7b3c2c0..305b3d0723 100644 --- a/contrib/pg_test_fsync/pg_test_fsync.c +++ b/contrib/pg_test_fsync/pg_test_fsync.c @@ -28,24 +28,28 @@ static const char *progname; -static int ops_per_test = 2000; -static char full_buf[XLOG_SEG_SIZE], *buf, *filename = FSYNC_FILENAME; -static struct timeval start_t, stop_t; - - -static void handle_args(int argc, char *argv[]); -static void prepare_buf(void); -static void test_open(void); -static void test_non_sync(void); -static void test_sync(int writes_per_op); -static void test_open_syncs(void); -static void test_open_sync(const char *msg, int writes_size); -static void test_file_descriptor_sync(void); +static int ops_per_test = 2000; +static char full_buf[XLOG_SEG_SIZE], + *buf, + *filename = FSYNC_FILENAME; +static struct timeval start_t, + stop_t; + + +static void handle_args(int argc, char *argv[]); +static void prepare_buf(void); +static void test_open(void); +static void test_non_sync(void); +static void test_sync(int writes_per_op); +static void test_open_syncs(void); +static void test_open_sync(const char *msg, int writes_size); +static void test_file_descriptor_sync(void); + #ifdef HAVE_FSYNC_WRITETHROUGH static int pg_fsync_writethrough(int fd); #endif -static void print_elapse(struct timeval start_t, struct timeval stop_t); -static void die(const char *str); +static void print_elapse(struct timeval start_t, struct timeval stop_t); +static void die(const char *str); int @@ -103,7 +107,7 @@ handle_args(int argc, char *argv[]) } while ((option = getopt_long(argc, argv, "f:o:", - long_options, &optindex)) != -1) + long_options, &optindex)) != -1) { switch (option) { @@ -176,7 +180,9 @@ test_open(void) static void test_sync(int writes_per_op) { - int tmpfile, ops, writes; + int tmpfile, + ops, + writes; bool fs_warning = false; if (writes_per_op == 1) @@ -353,7 +359,9 @@ test_open_syncs(void) static void test_open_sync(const char *msg, int writes_size) { - int tmpfile, ops, writes; + int tmpfile, + ops, + writes; printf(LABEL_FORMAT, msg); fflush(stdout); @@ -377,7 +385,6 @@ test_open_sync(const char *msg, int writes_size) close(tmpfile); print_elapse(start_t, stop_t); } - #else printf(NA_FORMAT, "n/a\n"); #endif @@ -386,22 +393,22 @@ test_open_sync(const char *msg, int writes_size) static void test_file_descriptor_sync(void) { - int tmpfile, ops; + int tmpfile, + ops; /* - * Test whether fsync can sync data written on a different - * descriptor for the same file. This checks the efficiency - * of multi-process fsyncs against the same file. - * Possibly this should be done with writethrough on platforms - * which support it. + * Test whether fsync can sync data written on a different descriptor for + * the same file. This checks the efficiency of multi-process fsyncs + * against the same file. Possibly this should be done with writethrough + * on platforms which support it. */ printf("\nTest if fsync on non-write file descriptor is honored:\n"); printf("(If the times are similar, fsync() can sync data written\n"); printf("on a different descriptor.)\n"); /* - * first write, fsync and close, which is the - * normal behavior without multiple descriptors + * first write, fsync and close, which is the normal behavior without + * multiple descriptors */ printf(LABEL_FORMAT, "write, fsync, close"); fflush(stdout); @@ -416,9 +423,10 @@ test_file_descriptor_sync(void) if (fsync(tmpfile) != 0) die("fsync failed"); close(tmpfile); + /* - * open and close the file again to be consistent - * with the following test + * open and close the file again to be consistent with the following + * test */ if ((tmpfile = open(filename, O_RDWR, 0)) == -1) die("could not open output file"); @@ -428,9 +436,8 @@ test_file_descriptor_sync(void) print_elapse(start_t, stop_t); /* - * Now open, write, close, open again and fsync - * This simulates processes fsyncing each other's - * writes. + * Now open, write, close, open again and fsync This simulates processes + * fsyncing each other's writes. */ printf(LABEL_FORMAT, "write, close, fsync"); fflush(stdout); @@ -458,7 +465,8 @@ test_file_descriptor_sync(void) static void test_non_sync(void) { - int tmpfile, ops; + int tmpfile, + ops; /* * Test a simple write without fsync @@ -494,7 +502,6 @@ pg_fsync_writethrough(int fd) return -1; #endif } - #endif /* diff --git a/contrib/pg_trgm/trgm.h b/contrib/pg_trgm/trgm.h index f3644fcce7..61de5d89d1 100644 --- a/contrib/pg_trgm/trgm.h +++ b/contrib/pg_trgm/trgm.h @@ -51,8 +51,9 @@ uint32 trgm2int(trgm *ptr); #endif #define ISPRINTABLETRGM(t) ( ISPRINTABLECHAR( ((char*)(t)) ) && ISPRINTABLECHAR( ((char*)(t))+1 ) && ISPRINTABLECHAR( ((char*)(t))+2 ) ) -#define ISESCAPECHAR(x) (*(x) == '\\') /* Wildcard escape character */ -#define ISWILDCARDCHAR(x) (*(x) == '_' || *(x) == '%') /* Wildcard meta-character */ +#define ISESCAPECHAR(x) (*(x) == '\\') /* Wildcard escape character */ +#define ISWILDCARDCHAR(x) (*(x) == '_' || *(x) == '%') /* Wildcard + * meta-character */ typedef struct { @@ -105,4 +106,4 @@ TRGM *generate_wildcard_trgm(const char *str, int slen); float4 cnt_sml(TRGM *trg1, TRGM *trg2); bool trgm_contained_by(TRGM *trg1, TRGM *trg2); -#endif /* __TRGM_H__ */ +#endif /* __TRGM_H__ */ diff --git a/contrib/pg_trgm/trgm_gin.c b/contrib/pg_trgm/trgm_gin.c index aaca1f9737..43ac0b0c65 100644 --- a/contrib/pg_trgm/trgm_gin.c +++ b/contrib/pg_trgm/trgm_gin.c @@ -67,7 +67,7 @@ gin_extract_value_trgm(PG_FUNCTION_ARGS) ptr = GETARR(trg); for (i = 0; i < trglen; i++) { - int32 item = trgm2int(ptr); + int32 item = trgm2int(ptr); entries[i] = Int32GetDatum(item); ptr++; @@ -83,10 +83,11 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS) text *val = (text *) PG_GETARG_TEXT_P(0); int32 *nentries = (int32 *) PG_GETARG_POINTER(1); StrategyNumber strategy = PG_GETARG_UINT16(2); - /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */ - /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ - /* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */ - int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); + + /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */ + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ + /* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */ + int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); Datum *entries = NULL; TRGM *trg; int32 trglen; @@ -104,6 +105,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS) #endif /* FALL THRU */ case LikeStrategyNumber: + /* * For wildcard search we extract all the trigrams that every * potentially-matching string must include. @@ -112,7 +114,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS) break; default: elog(ERROR, "unrecognized strategy number: %d", strategy); - trg = NULL; /* keep compiler quiet */ + trg = NULL; /* keep compiler quiet */ break; } @@ -125,7 +127,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS) ptr = GETARR(trg); for (i = 0; i < trglen; i++) { - int32 item = trgm2int(ptr); + int32 item = trgm2int(ptr); entries[i] = Int32GetDatum(item); ptr++; @@ -146,9 +148,11 @@ gin_trgm_consistent(PG_FUNCTION_ARGS) { bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); + /* text *query = PG_GETARG_TEXT_P(2); */ int32 nkeys = PG_GETARG_INT32(3); - /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ + + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); bool res; int32 i, diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c index d83265c11c..b328a09f41 100644 --- a/contrib/pg_trgm/trgm_gist.c +++ b/contrib/pg_trgm/trgm_gist.c @@ -190,17 +190,18 @@ gtrgm_consistent(PG_FUNCTION_ARGS) GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); text *query = PG_GETARG_TEXT_P(1); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + /* Oid subtype = PG_GETARG_OID(3); */ bool *recheck = (bool *) PG_GETARG_POINTER(4); TRGM *key = (TRGM *) DatumGetPointer(entry->key); TRGM *qtrg; bool res; char *cache = (char *) fcinfo->flinfo->fn_extra, - *cacheContents = cache + MAXALIGN(sizeof(StrategyNumber)); + *cacheContents = cache + MAXALIGN(sizeof(StrategyNumber)); /* * Store both the strategy number and extracted trigrams in cache, because - * trigram extraction is relatively CPU-expensive. We must include + * trigram extraction is relatively CPU-expensive. We must include * strategy number because trigram extraction depends on strategy. */ if (cache == NULL || strategy != *((StrategyNumber *) cache) || @@ -222,7 +223,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS) break; default: elog(ERROR, "unrecognized strategy number: %d", strategy); - qtrg = NULL; /* keep compiler quiet */ + qtrg = NULL; /* keep compiler quiet */ break; } @@ -251,20 +252,20 @@ gtrgm_consistent(PG_FUNCTION_ARGS) *recheck = false; if (GIST_LEAF(entry)) - { /* all leafs contains orig trgm */ - float4 tmpsml = cnt_sml(key, qtrg); + { /* all leafs contains orig trgm */ + float4 tmpsml = cnt_sml(key, qtrg); /* strange bug at freebsd 5.2.1 and gcc 3.3.3 */ res = (*(int *) &tmpsml == *(int *) &trgm_limit || tmpsml > trgm_limit) ? true : false; } else if (ISALLTRUE(key)) - { /* non-leaf contains signature */ + { /* non-leaf contains signature */ res = true; } else - { /* non-leaf contains signature */ - int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key)); - int4 len = ARRNELEM(qtrg); + { /* non-leaf contains signature */ + int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key)); + int4 len = ARRNELEM(qtrg); if (len == 0) res = false; @@ -286,20 +287,20 @@ gtrgm_consistent(PG_FUNCTION_ARGS) * nodes. */ if (GIST_LEAF(entry)) - { /* all leafs contains orig trgm */ + { /* all leafs contains orig trgm */ res = trgm_contained_by(qtrg, key); } else if (ISALLTRUE(key)) - { /* non-leaf contains signature */ + { /* non-leaf contains signature */ res = true; } else - { /* non-leaf contains signature */ - int32 k, - tmp = 0, - len = ARRNELEM(qtrg); - trgm *ptr = GETARR(qtrg); - BITVECP sign = GETSIGN(key); + { /* non-leaf contains signature */ + int32 k, + tmp = 0, + len = ARRNELEM(qtrg); + trgm *ptr = GETARR(qtrg); + BITVECP sign = GETSIGN(key); res = true; for (k = 0; k < len; k++) @@ -328,6 +329,7 @@ gtrgm_distance(PG_FUNCTION_ARGS) GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); text *query = PG_GETARG_TEXT_P(1); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + /* Oid subtype = PG_GETARG_OID(3); */ TRGM *key = (TRGM *) DatumGetPointer(entry->key); TRGM *qtrg; @@ -355,17 +357,17 @@ gtrgm_distance(PG_FUNCTION_ARGS) { case DistanceStrategyNumber: if (GIST_LEAF(entry)) - { /* all leafs contains orig trgm */ + { /* all leafs contains orig trgm */ res = 1.0 - cnt_sml(key, qtrg); } else if (ISALLTRUE(key)) - { /* all leafs contains orig trgm */ + { /* all leafs contains orig trgm */ res = 0.0; } else - { /* non-leaf contains signature */ - int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key)); - int4 len = ARRNELEM(qtrg); + { /* non-leaf contains signature */ + int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key)); + int4 len = ARRNELEM(qtrg); res = (len == 0) ? -1.0 : 1.0 - ((float8) count) / ((float8) len); } diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c index 52f9172f6d..dfb2df5048 100644 --- a/contrib/pg_trgm/trgm_op.c +++ b/contrib/pg_trgm/trgm_op.c @@ -273,9 +273,9 @@ get_wildcard_part(const char *str, int lenstr, const char *beginword = str; const char *endword; char *s = buf; - bool in_wildcard_meta = false; - bool in_escape = false; - int clen; + bool in_wildcard_meta = false; + bool in_escape = false; + int clen; /* * Find the first word character remembering whether last character was @@ -410,14 +410,14 @@ generate_wildcard_trgm(const char *str, int slen) { TRGM *trg; char *buf, - *buf2; + *buf2; trgm *tptr; int len, charlen, bytelen; const char *eword; - trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) * 3); + trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) *3); trg->flag = ARRKEY; SET_VARSIZE(trg, TRGMHDRSIZE); @@ -638,6 +638,7 @@ similarity_dist(PG_FUNCTION_ARGS) float4 res = DatumGetFloat4(DirectFunctionCall2(similarity, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1))); + PG_RETURN_FLOAT4(1.0 - res); } diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c index 05aac8fde9..747244072d 100644 --- a/contrib/pg_upgrade/check.c +++ b/contrib/pg_upgrade/check.c @@ -212,7 +212,10 @@ check_cluster_versions(void) old_cluster.major_version = get_major_server_version(&old_cluster); new_cluster.major_version = get_major_server_version(&new_cluster); - /* We allow upgrades from/to the same major version for alpha/beta upgrades */ + /* + * We allow upgrades from/to the same major version for alpha/beta + * upgrades + */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 803) pg_log(PG_FATAL, "This utility can only upgrade from PostgreSQL version 8.3 and later.\n"); @@ -516,7 +519,7 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster) } if (script) - fclose(script); + fclose(script); if (found) { diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c index 78c75e8a84..3ac2180d49 100644 --- a/contrib/pg_upgrade/controldata.c +++ b/contrib/pg_upgrade/controldata.c @@ -505,8 +505,7 @@ check_control_data(ControlData *oldctrl, "\nOld and new pg_controldata date/time storage types do not match.\n"); /* - * This is a common 8.3 -> 8.4 upgrade problem, so we are more - * verbose + * This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose */ pg_log(PG_FATAL, "You will need to rebuild the new server with configure\n" diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c index 7095ba62a8..59a76bc8ae 100644 --- a/contrib/pg_upgrade/exec.c +++ b/contrib/pg_upgrade/exec.c @@ -15,7 +15,7 @@ static void check_data_dir(const char *pg_data); static void check_bin_dir(ClusterInfo *cluster); -static void validate_exec(const char *dir, const char *cmdName); +static void validate_exec(const char *dir, const char *cmdName); /* diff --git a/contrib/pg_upgrade/file.c b/contrib/pg_upgrade/file.c index 0024b6ee00..f8f7233593 100644 --- a/contrib/pg_upgrade/file.c +++ b/contrib/pg_upgrade/file.c @@ -377,4 +377,5 @@ win32_pghardlink(const char *src, const char *dst) else return 0; } + #endif diff --git a/contrib/pg_upgrade/function.c b/contrib/pg_upgrade/function.c index c01ff046bb..322014cd23 100644 --- a/contrib/pg_upgrade/function.c +++ b/contrib/pg_upgrade/function.c @@ -21,13 +21,13 @@ void install_support_functions_in_new_db(const char *db_name) { - PGconn *conn = connectToServer(&new_cluster, db_name); - + PGconn *conn = connectToServer(&new_cluster, db_name); + /* suppress NOTICE of dropped objects */ PQclear(executeQueryOrDie(conn, "SET client_min_messages = warning;")); PQclear(executeQueryOrDie(conn, - "DROP SCHEMA IF EXISTS binary_upgrade CASCADE;")); + "DROP SCHEMA IF EXISTS binary_upgrade CASCADE;")); PQclear(executeQueryOrDie(conn, "RESET client_min_messages;")); @@ -42,31 +42,31 @@ install_support_functions_in_new_db(const char *db_name) "LANGUAGE C STRICT;")); PQclear(executeQueryOrDie(conn, "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_array_pg_type_oid(OID) " + "binary_upgrade.set_next_array_pg_type_oid(OID) " "RETURNS VOID " "AS '$libdir/pg_upgrade_support' " "LANGUAGE C STRICT;")); PQclear(executeQueryOrDie(conn, "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_toast_pg_type_oid(OID) " + "binary_upgrade.set_next_toast_pg_type_oid(OID) " "RETURNS VOID " "AS '$libdir/pg_upgrade_support' " "LANGUAGE C STRICT;")); PQclear(executeQueryOrDie(conn, "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_heap_pg_class_oid(OID) " + "binary_upgrade.set_next_heap_pg_class_oid(OID) " "RETURNS VOID " "AS '$libdir/pg_upgrade_support' " "LANGUAGE C STRICT;")); PQclear(executeQueryOrDie(conn, "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_index_pg_class_oid(OID) " + "binary_upgrade.set_next_index_pg_class_oid(OID) " "RETURNS VOID " "AS '$libdir/pg_upgrade_support' " "LANGUAGE C STRICT;")); PQclear(executeQueryOrDie(conn, "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_toast_pg_class_oid(OID) " + "binary_upgrade.set_next_toast_pg_class_oid(OID) " "RETURNS VOID " "AS '$libdir/pg_upgrade_support' " "LANGUAGE C STRICT;")); diff --git a/contrib/pg_upgrade/info.c b/contrib/pg_upgrade/info.c index ceb1601cc6..f0cd8e5ede 100644 --- a/contrib/pg_upgrade/info.c +++ b/contrib/pg_upgrade/info.c @@ -13,9 +13,9 @@ static void create_rel_filename_map(const char *old_data, const char *new_data, - const DbInfo *old_db, const DbInfo *new_db, - const RelInfo *old_rel, const RelInfo *new_rel, - FileNameMap *map); + const DbInfo *old_db, const DbInfo *new_db, + const RelInfo *old_rel, const RelInfo *new_rel, + FileNameMap *map); static void get_db_infos(ClusterInfo *cluster); static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo); static void free_rel_infos(RelInfoArr *rel_arr); @@ -40,7 +40,7 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, if (old_db->rel_arr.nrels != new_db->rel_arr.nrels) pg_log(PG_FATAL, "old and new databases \"%s\" have a different number of relations\n", - old_db->db_name); + old_db->db_name); maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) * old_db->rel_arr.nrels); @@ -52,24 +52,24 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, if (old_rel->reloid != new_rel->reloid) pg_log(PG_FATAL, "Mismatch of relation id: database \"%s\", old relid %d, new relid %d\n", - old_db->db_name, old_rel->reloid, new_rel->reloid); + old_db->db_name, old_rel->reloid, new_rel->reloid); /* - * In pre-8.4, TOAST table names change during CLUSTER; in >= 8.4 - * TOAST relation names always use heap table oids, hence we - * cannot check relation names when upgrading from pre-8.4. + * In pre-8.4, TOAST table names change during CLUSTER; in >= 8.4 + * TOAST relation names always use heap table oids, hence we cannot + * check relation names when upgrading from pre-8.4. */ if (strcmp(old_rel->nspname, new_rel->nspname) != 0 || ((GET_MAJOR_VERSION(old_cluster.major_version) >= 804 || strcmp(old_rel->nspname, "pg_toast") != 0) && strcmp(old_rel->relname, new_rel->relname) != 0)) pg_log(PG_FATAL, "Mismatch of relation names: database \"%s\", " - "old rel %s.%s, new rel %s.%s\n", - old_db->db_name, old_rel->nspname, old_rel->relname, - new_rel->nspname, new_rel->relname); + "old rel %s.%s, new rel %s.%s\n", + old_db->db_name, old_rel->nspname, old_rel->relname, + new_rel->nspname, new_rel->relname); create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db, - old_rel, new_rel, maps + num_maps); + old_rel, new_rel, maps + num_maps); num_maps++; } @@ -85,9 +85,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, */ static void create_rel_filename_map(const char *old_data, const char *new_data, - const DbInfo *old_db, const DbInfo *new_db, - const RelInfo *old_rel, const RelInfo *new_rel, - FileNameMap *map) + const DbInfo *old_db, const DbInfo *new_db, + const RelInfo *old_rel, const RelInfo *new_rel, + FileNameMap *map) { if (strlen(old_rel->tablespace) == 0) { @@ -110,8 +110,8 @@ create_rel_filename_map(const char *old_data, const char *new_data, } /* - * old_relfilenode might differ from pg_class.oid (and hence - * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL. + * old_relfilenode might differ from pg_class.oid (and hence + * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL. */ map->old_relfilenode = old_rel->relfilenode; @@ -185,7 +185,9 @@ get_db_infos(ClusterInfo *cluster) int ntups; int tupnum; DbInfo *dbinfos; - int i_datname, i_oid, i_spclocation; + int i_datname, + i_oid, + i_spclocation; res = executeQueryOrDie(conn, "SELECT d.oid, d.datname, t.spclocation " @@ -241,15 +243,19 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) int num_rels = 0; char *nspname = NULL; char *relname = NULL; - int i_spclocation, i_nspname, i_relname, i_oid, i_relfilenode; + int i_spclocation, + i_nspname, + i_relname, + i_oid, + i_relfilenode; char query[QUERY_ALLOC]; /* * pg_largeobject contains user data that does not appear in pg_dumpall * --schema-only output, so we have to copy that system table heap and - * index. We could grab the pg_largeobject oids from template1, but - * it is easy to treat it as a normal table. - * Order by oid so we can join old/new structures efficiently. + * index. We could grab the pg_largeobject oids from template1, but it is + * easy to treat it as a normal table. Order by oid so we can join old/new + * structures efficiently. */ snprintf(query, sizeof(query), @@ -263,7 +269,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) " ((n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND " " c.oid >= %u) " " OR (n.nspname = 'pg_catalog' AND " - " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) " + " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) " /* we preserve pg_class.oid so we sort by it to match old/new */ "ORDER BY 1;", /* see the comment at the top of old_8_3_create_sequence_script() */ @@ -273,7 +279,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) FirstNormalObjectId, /* does pg_largeobject_metadata need to be migrated? */ (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? - "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'"); + "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'"); res = executeQueryOrDie(conn, query); diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c index 061544cac8..e435aaef08 100644 --- a/contrib/pg_upgrade/pg_upgrade.c +++ b/contrib/pg_upgrade/pg_upgrade.c @@ -18,7 +18,7 @@ * FYI, while pg_class.oid and pg_class.relfilenode are intially the same * in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM * FULL. The new cluster will have matching pg_class.oid and - * pg_class.relfilenode values and be based on the old oid value. This can + * pg_class.relfilenode values and be based on the old oid value. This can * cause the old and new pg_class.relfilenode values to differ. In summary, * old and new pg_class.oid and new pg_class.relfilenode will have the * same value, and old pg_class.relfilenode might differ. @@ -34,7 +34,7 @@ */ - + #include "pg_upgrade.h" #ifdef HAVE_LANGINFO_H @@ -53,7 +53,8 @@ static void cleanup(void); /* This is the database used by pg_dumpall to restore global tables */ #define GLOBAL_DUMP_DB "postgres" -ClusterInfo old_cluster, new_cluster; +ClusterInfo old_cluster, + new_cluster; OSInfo os_info; int @@ -192,7 +193,7 @@ prepare_new_cluster(void) exec_prog(true, SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" " "--all --analyze >> %s 2>&1" SYSTEMQUOTE, - new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename); + new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename); check_ok(); /* @@ -205,7 +206,7 @@ prepare_new_cluster(void) exec_prog(true, SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" " "--all --freeze >> %s 2>&1" SYSTEMQUOTE, - new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename); + new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename); check_ok(); get_pg_database_relfilenode(&new_cluster); @@ -229,16 +230,16 @@ prepare_new_databases(void) prep_status("Creating databases in the new cluster"); /* - * Install support functions in the global-restore database - * to preserve pg_authid.oid. + * Install support functions in the global-restore database to preserve + * pg_authid.oid. */ install_support_functions_in_new_db(GLOBAL_DUMP_DB); /* * We have to create the databases first so we can install support - * functions in all the other databases. Ideally we could create - * the support functions in template1 but pg_dumpall creates database - * using the template0 template. + * functions in all the other databases. Ideally we could create the + * support functions in template1 but pg_dumpall creates database using + * the template0 template. */ exec_prog(true, SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on " diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h index 8f72ea80d7..5ca570eb15 100644 --- a/contrib/pg_upgrade/pg_upgrade.h +++ b/contrib/pg_upgrade/pg_upgrade.h @@ -85,6 +85,7 @@ typedef struct { char old_dir[MAXPGPATH]; char new_dir[MAXPGPATH]; + /* * old/new relfilenodes might differ for pg_largeobject(_metadata) indexes * due to VACUUM FULL or REINDEX. Other relfilenodes are preserved. @@ -92,7 +93,7 @@ typedef struct Oid old_relfilenode; Oid new_relfilenode; /* the rest are used only for logging and error reporting */ - char nspname[NAMEDATALEN]; /* namespaces */ + char nspname[NAMEDATALEN]; /* namespaces */ char relname[NAMEDATALEN]; } FileNameMap; @@ -180,7 +181,7 @@ typedef struct char *bindir; /* pathname for cluster's executable directory */ unsigned short port; /* port number where postmaster is waiting */ uint32 major_version; /* PG_VERSION of cluster */ - char major_version_str[64]; /* string PG_VERSION of cluster */ + char major_version_str[64]; /* string PG_VERSION of cluster */ Oid pg_database_oid; /* OID of pg_database relation */ char *libpath; /* pathname for cluster's pkglibdir */ char *tablespace_suffix; /* directory specification */ @@ -232,9 +233,10 @@ typedef struct /* * Global variables */ -extern LogOpts log_opts; +extern LogOpts log_opts; extern UserOpts user_opts; -extern ClusterInfo old_cluster, new_cluster; +extern ClusterInfo old_cluster, + new_cluster; extern OSInfo os_info; extern char scandir_file_pattern[]; @@ -246,8 +248,8 @@ void check_old_cluster(bool live_check, char **sequence_script_file_name); void check_new_cluster(void); void report_clusters_compatible(void); -void issue_warnings(char *sequence_script_file_name); -void output_completion_banner(char *deletion_script_file_name); +void issue_warnings(char *sequence_script_file_name); +void output_completion_banner(char *deletion_script_file_name); void check_cluster_versions(void); void check_cluster_compatibility(bool live_check); void create_script_for_old_cluster_deletion(char **deletion_script_file_name); @@ -309,11 +311,11 @@ typedef void *pageCnvCtx; int dir_matching_filenames(const struct dirent * scan_ent); int pg_scandir(const char *dirname, struct dirent *** namelist, - int (*selector) (const struct dirent *)); + int (*selector) (const struct dirent *)); const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src, const char *dst, bool force); const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src, - const char *dst); + const char *dst); void check_hard_link(void); @@ -329,10 +331,10 @@ void check_loadable_libraries(void); FileNameMap *gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, int *nmaps, const char *old_pgdata, const char *new_pgdata); -void get_db_and_rel_infos(ClusterInfo *cluster); +void get_db_and_rel_infos(ClusterInfo *cluster); void free_db_and_rel_infos(DbInfoArr *db_arr); -void print_maps(FileNameMap *maps, int n, - const char *db_name); +void print_maps(FileNameMap *maps, int n, + const char *db_name); /* option.c */ @@ -352,12 +354,12 @@ void init_tablespaces(void); /* server.c */ -PGconn *connectToServer(ClusterInfo *cluster, const char *db_name); -PGresult *executeQueryOrDie(PGconn *conn, const char *fmt,...); +PGconn *connectToServer(ClusterInfo *cluster, const char *db_name); +PGresult *executeQueryOrDie(PGconn *conn, const char *fmt,...); void start_postmaster(ClusterInfo *cluster, bool quiet); void stop_postmaster(bool fast, bool quiet); -uint32 get_major_server_version(ClusterInfo *cluster); +uint32 get_major_server_version(ClusterInfo *cluster); void check_for_libpq_envvars(void); @@ -380,14 +382,14 @@ unsigned int str2uint(const char *str); /* version.c */ void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, - bool check_mode); + bool check_mode); /* version_old_8_3.c */ void old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster); void old_8_3_check_for_tsquery_usage(ClusterInfo *cluster); -void old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode); -void old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode); +void old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode); +void old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode); void old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster, - bool check_mode); + bool check_mode); char *old_8_3_create_sequence_script(ClusterInfo *cluster); diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c index d111b13de9..9a0a3ac18d 100644 --- a/contrib/pg_upgrade/relfilenode.c +++ b/contrib/pg_upgrade/relfilenode.c @@ -30,7 +30,7 @@ char scandir_file_pattern[MAXPGPATH]; */ const char * transfer_all_new_dbs(DbInfoArr *old_db_arr, - DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata) + DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata) { int dbnum; const char *msg = NULL; @@ -39,7 +39,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, if (old_db_arr->ndbs != new_db_arr->ndbs) pg_log(PG_FATAL, "old and new clusters have a different number of databases\n"); - + for (dbnum = 0; dbnum < old_db_arr->ndbs; dbnum++) { DbInfo *old_db = &old_db_arr->dbs[dbnum]; @@ -50,8 +50,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, if (strcmp(old_db->db_name, new_db->db_name) != 0) pg_log(PG_FATAL, "old and new databases have different names: old \"%s\", new \"%s\"\n", - old_db->db_name, new_db->db_name); - + old_db->db_name, new_db->db_name); + n_maps = 0; mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata, new_pgdata); @@ -169,7 +169,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter, for (fileno = 0; fileno < numFiles; fileno++) { if (strncmp(namelist[fileno]->d_name, scandir_file_pattern, - strlen(scandir_file_pattern)) == 0) + strlen(scandir_file_pattern)) == 0) { snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir, namelist[fileno]->d_name); @@ -178,7 +178,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter, unlink(new_file); transfer_relfile(pageConverter, old_file, new_file, - maps[mapnum].nspname, maps[mapnum].relname); + maps[mapnum].nspname, maps[mapnum].relname); } } } @@ -196,7 +196,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter, for (fileno = 0; fileno < numFiles; fileno++) { if (strncmp(namelist[fileno]->d_name, scandir_file_pattern, - strlen(scandir_file_pattern)) == 0) + strlen(scandir_file_pattern)) == 0) { snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir, namelist[fileno]->d_name); @@ -205,7 +205,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter, unlink(new_file); transfer_relfile(pageConverter, old_file, new_file, - maps[mapnum].nspname, maps[mapnum].relname); + maps[mapnum].nspname, maps[mapnum].relname); } } } @@ -227,7 +227,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter, */ static void transfer_relfile(pageCnvCtx *pageConverter, const char *old_file, - const char *new_file, const char *nspname, const char *relname) + const char *new_file, const char *nspname, const char *relname) { const char *msg; @@ -249,7 +249,7 @@ transfer_relfile(pageCnvCtx *pageConverter, const char *old_file, if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL) pg_log(PG_FATAL, - "error while creating link from %s.%s (%s to %s): %s\n", + "error while creating link from %s.%s (%s to %s): %s\n", nspname, relname, old_file, new_file, msg); } return; diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c index a7d5787234..2a0f50eb2a 100644 --- a/contrib/pg_upgrade/server.c +++ b/contrib/pg_upgrade/server.c @@ -194,12 +194,12 @@ start_postmaster(ClusterInfo *cluster, bool quiet) * because it is being used by another process." so we have to send all * other output to 'nul'. * - * Using autovacuum=off disables cleanup vacuum and analyze, but - * freeze vacuums can still happen, so we set - * autovacuum_freeze_max_age to its maximum. We assume all datfrozenxid - * and relfrozen values are less than a gap of 2000000000 from the current - * xid counter, so autovacuum will not touch them. - */ + * Using autovacuum=off disables cleanup vacuum and analyze, but freeze + * vacuums can still happen, so we set autovacuum_freeze_max_age to its + * maximum. We assume all datfrozenxid and relfrozen values are less than + * a gap of 2000000000 from the current xid counter, so autovacuum will + * not touch them. + */ snprintf(cmd, sizeof(cmd), SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" " "-o \"-p %d -c autovacuum=off " @@ -251,7 +251,7 @@ stop_postmaster(bool fast, bool quiet) "\"%s\" 2>&1" SYSTEMQUOTE, bindir, #ifndef WIN32 - log_opts.filename, datadir, fast ? "-m fast" : "", log_opts.filename); + log_opts.filename, datadir, fast ? "-m fast" : "", log_opts.filename); #else DEVNULL, datadir, fast ? "-m fast" : "", DEVNULL); #endif diff --git a/contrib/pg_upgrade/tablespace.c b/contrib/pg_upgrade/tablespace.c index a575487621..6cdae51cf1 100644 --- a/contrib/pg_upgrade/tablespace.c +++ b/contrib/pg_upgrade/tablespace.c @@ -78,8 +78,8 @@ set_tablespace_directory_suffix(ClusterInfo *cluster) { /* This cluster has a version-specific subdirectory */ cluster->tablespace_suffix = pg_malloc(4 + - strlen(cluster->major_version_str) + - 10 /* OIDCHARS */ + 1); + strlen(cluster->major_version_str) + + 10 /* OIDCHARS */ + 1); /* The leading slash is needed to start a new directory. */ sprintf(cluster->tablespace_suffix, "/PG_%s_%d", cluster->major_version_str, diff --git a/contrib/pg_upgrade/util.c b/contrib/pg_upgrade/util.c index 804aa0d1e5..9a6691ce75 100644 --- a/contrib/pg_upgrade/util.c +++ b/contrib/pg_upgrade/util.c @@ -12,7 +12,7 @@ #include <signal.h> -LogOpts log_opts; +LogOpts log_opts; /* * report_status() diff --git a/contrib/pg_upgrade/version_old_8_3.c b/contrib/pg_upgrade/version_old_8_3.c index 3ec4b59a05..0a60eec926 100644 --- a/contrib/pg_upgrade/version_old_8_3.c +++ b/contrib/pg_upgrade/version_old_8_3.c @@ -288,7 +288,7 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode) /* Rebuild all tsvector collumns with one ALTER TABLE command */ if (strcmp(PQgetvalue(res, rowno, i_nspname), nspname) != 0 || - strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0) + strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0) { if (strlen(nspname) != 0 || strlen(relname) != 0) fprintf(script, ";\n\n"); diff --git a/contrib/pg_upgrade_support/pg_upgrade_support.c b/contrib/pg_upgrade_support/pg_upgrade_support.c index 02d1512719..2c23cbab9d 100644 --- a/contrib/pg_upgrade_support/pg_upgrade_support.c +++ b/contrib/pg_upgrade_support/pg_upgrade_support.c @@ -178,9 +178,9 @@ create_empty_extension(PG_FUNCTION_ARGS) &textDatums, NULL, &ndatums); for (i = 0; i < ndatums; i++) { - text *txtname = DatumGetTextPP(textDatums[i]); - char *extName = text_to_cstring(txtname); - Oid extOid = get_extension_oid(extName, false); + text *txtname = DatumGetTextPP(textDatums[i]); + char *extName = text_to_cstring(txtname); + Oid extOid = get_extension_oid(extName, false); requiredExtensions = lappend_oid(requiredExtensions, extOid); } @@ -188,7 +188,7 @@ create_empty_extension(PG_FUNCTION_ARGS) InsertExtensionTuple(text_to_cstring(extName), GetUserId(), - get_namespace_oid(text_to_cstring(schemaName), false), + get_namespace_oid(text_to_cstring(schemaName), false), relocatable, text_to_cstring(extVersion), extConfig, diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c index 7c2ca6e84d..0a3e5fd928 100644 --- a/contrib/pgbench/pgbench.c +++ b/contrib/pgbench/pgbench.c @@ -69,7 +69,7 @@ typedef struct win32_pthread *pthread_t; typedef int pthread_attr_t; -static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); +static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg); static int pthread_join(pthread_t th, void **thread_return); #elif defined(ENABLE_THREAD_SAFETY) /* Use platform-dependent pthread capability */ @@ -87,7 +87,7 @@ static int pthread_join(pthread_t th, void **thread_return); typedef struct fork_pthread *pthread_t; typedef int pthread_attr_t; -static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); +static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg); static int pthread_join(pthread_t th, void **thread_return); #endif @@ -817,7 +817,7 @@ top: INSTR_TIME_SET_CURRENT(now); INSTR_TIME_ACCUM_DIFF(thread->exec_elapsed[cnum], - now, st->stmt_begin); + now, st->stmt_begin); thread->exec_count[cnum]++; } @@ -850,8 +850,8 @@ top: if (commands[st->state]->type == SQL_COMMAND) { /* - * Read and discard the query result; note this is not included - * in the statement latency numbers. + * Read and discard the query result; note this is not included in + * the statement latency numbers. */ res = PQgetResult(st->con); switch (PQresultStatus(res)) @@ -1716,16 +1716,16 @@ printResults(int ttype, int normal_xacts, int nclients, for (i = 0; i < num_files; i++) { - Command **commands; + Command **commands; if (num_files > 1) - printf("statement latencies in milliseconds, file %d:\n", i+1); + printf("statement latencies in milliseconds, file %d:\n", i + 1); else printf("statement latencies in milliseconds:\n"); for (commands = sql_files[i]; *commands != NULL; commands++) { - Command *command = *commands; + Command *command = *commands; int cnum = command->command_num; double total_time; instr_time total_exec_elapsed; @@ -1737,7 +1737,7 @@ printResults(int ttype, int normal_xacts, int nclients, total_exec_count = 0; for (t = 0; t < nthreads; t++) { - TState *thread = &threads[t]; + TState *thread = &threads[t]; INSTR_TIME_ADD(total_exec_elapsed, thread->exec_elapsed[cnum]); @@ -2014,9 +2014,9 @@ main(int argc, char **argv) * is_latencies only works with multiple threads in thread-based * implementations, not fork-based ones, because it supposes that the * parent can see changes made to the per-thread execution stats by child - * threads. It seems useful enough to accept despite this limitation, - * but perhaps we should FIXME someday (by passing the stats data back - * up through the parent-to-child pipes). + * threads. It seems useful enough to accept despite this limitation, but + * perhaps we should FIXME someday (by passing the stats data back up + * through the parent-to-child pipes). */ #ifndef ENABLE_THREAD_SAFETY if (is_latencies && nthreads > 1) @@ -2161,7 +2161,7 @@ main(int argc, char **argv) threads = (TState *) xmalloc(sizeof(TState) * nthreads); for (i = 0; i < nthreads; i++) { - TState *thread = &threads[i]; + TState *thread = &threads[i]; thread->tid = i; thread->state = &state[nclients / nthreads * i]; @@ -2170,7 +2170,7 @@ main(int argc, char **argv) if (is_latencies) { /* Reserve memory for the thread to store per-command latencies */ - int t; + int t; thread->exec_elapsed = (instr_time *) xmalloc(sizeof(instr_time) * num_commands); @@ -2200,7 +2200,7 @@ main(int argc, char **argv) /* start threads */ for (i = 0; i < nthreads; i++) { - TState *thread = &threads[i]; + TState *thread = &threads[i]; INSTR_TIME_SET_CURRENT(thread->start_time); @@ -2472,7 +2472,7 @@ typedef struct fork_pthread static int pthread_create(pthread_t *thread, - pthread_attr_t *attr, + pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { @@ -2586,7 +2586,7 @@ typedef struct win32_pthread void *(*routine) (void *); void *arg; void *result; -} win32_pthread; +} win32_pthread; static unsigned __stdcall win32_pthread_run(void *arg) @@ -2600,7 +2600,7 @@ win32_pthread_run(void *arg) static int pthread_create(pthread_t *thread, - pthread_attr_t *attr, + pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c index afada2a0aa..fd284e0c07 100644 --- a/contrib/seg/seg.c +++ b/contrib/seg/seg.c @@ -356,7 +356,7 @@ gseg_picksplit(GistEntryVector *entryvec, { seg = (SEG *) DatumGetPointer(entryvec->vector[i].key); /* center calculation is done this way to avoid possible overflow */ - sort_items[i - 1].center = seg->lower*0.5f + seg->upper*0.5f; + sort_items[i - 1].center = seg->lower * 0.5f + seg->upper * 0.5f; sort_items[i - 1].index = i; sort_items[i - 1].data = seg; } diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c index 358a2643ca..22666b708e 100644 --- a/contrib/sepgsql/dml.c +++ b/contrib/sepgsql/dml.c @@ -59,7 +59,7 @@ fixup_whole_row_references(Oid relOid, Bitmapset *columns) result = bms_copy(columns); result = bms_del_member(result, index); - for (attno=1; attno <= natts; attno++) + for (attno = 1; attno <= natts; attno++) { tuple = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relOid), @@ -108,6 +108,7 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns) while ((index = bms_first_member(tmpset)) > 0) { attno = index + FirstLowInvalidHeapAttributeNumber; + /* * whole-row-reference shall be fixed-up later */ @@ -158,14 +159,13 @@ check_relation_privileges(Oid relOid, bool result = true; /* - * Hardwired Policies: - * SE-PostgreSQL enforces - * - clients cannot modify system catalogs using DMLs - * - clients cannot reference/modify toast relations using DMLs + * Hardwired Policies: SE-PostgreSQL enforces - clients cannot modify + * system catalogs using DMLs - clients cannot reference/modify toast + * relations using DMLs */ if (sepgsql_getenforce() > 0) { - Oid relnamespace = get_rel_namespace(relOid); + Oid relnamespace = get_rel_namespace(relOid); if (IsSystemNamespace(relnamespace) && (required & (SEPG_DB_TABLE__UPDATE | @@ -242,7 +242,7 @@ check_relation_privileges(Oid relOid, { AttrNumber attnum; uint32 column_perms = 0; - ObjectAddress object; + ObjectAddress object; if (bms_is_member(index, selected)) column_perms |= SEPG_DB_COLUMN__SELECT; @@ -290,12 +290,12 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort) { ListCell *lr; - foreach (lr, rangeTabls) + foreach(lr, rangeTabls) { - RangeTblEntry *rte = lfirst(lr); - uint32 required = 0; - List *tableIds; - ListCell *li; + RangeTblEntry *rte = lfirst(lr); + uint32 required = 0; + List *tableIds; + ListCell *li; /* * Only regular relations shall be checked @@ -328,25 +328,24 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort) /* * If this RangeTblEntry is also supposed to reference inherited - * tables, we need to check security label of the child tables. - * So, we expand rte->relid into list of OIDs of inheritance - * hierarchy, then checker routine will be invoked for each - * relations. + * tables, we need to check security label of the child tables. So, we + * expand rte->relid into list of OIDs of inheritance hierarchy, then + * checker routine will be invoked for each relations. */ if (!rte->inh) tableIds = list_make1_oid(rte->relid); else tableIds = find_all_inheritors(rte->relid, NoLock, NULL); - foreach (li, tableIds) + foreach(li, tableIds) { Oid tableOid = lfirst_oid(li); Bitmapset *selectedCols; Bitmapset *modifiedCols; /* - * child table has different attribute numbers, so we need - * to fix up them. + * child table has different attribute numbers, so we need to fix + * up them. */ selectedCols = fixup_inherited_columns(rte->relid, tableOid, rte->selectedCols); diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c index 5dc8a3ecaa..7797ccb199 100644 --- a/contrib/sepgsql/hooks.c +++ b/contrib/sepgsql/hooks.c @@ -29,17 +29,17 @@ PG_MODULE_MAGIC; /* * Declarations */ -void _PG_init(void); +void _PG_init(void); /* * Saved hook entries (if stacked) */ -static object_access_hook_type next_object_access_hook = NULL; -static ClientAuthentication_hook_type next_client_auth_hook = NULL; -static ExecutorCheckPerms_hook_type next_exec_check_perms_hook = NULL; -static needs_fmgr_hook_type next_needs_fmgr_hook = NULL; -static fmgr_hook_type next_fmgr_hook = NULL; -static ProcessUtility_hook_type next_ProcessUtility_hook = NULL; +static object_access_hook_type next_object_access_hook = NULL; +static ClientAuthentication_hook_type next_client_auth_hook = NULL; +static ExecutorCheckPerms_hook_type next_exec_check_perms_hook = NULL; +static needs_fmgr_hook_type next_needs_fmgr_hook = NULL; +static fmgr_hook_type next_fmgr_hook = NULL; +static ProcessUtility_hook_type next_ProcessUtility_hook = NULL; /* * GUC: sepgsql.permissive = (on|off) @@ -73,14 +73,14 @@ sepgsql_get_debug_audit(void) static void sepgsql_client_auth(Port *port, int status) { - char *context; + char *context; if (next_client_auth_hook) - (*next_client_auth_hook)(port, status); + (*next_client_auth_hook) (port, status); /* - * In the case when authentication failed, the supplied socket - * shall be closed soon, so we don't need to do anything here. + * In the case when authentication failed, the supplied socket shall be + * closed soon, so we don't need to do anything here. */ if (status != STATUS_OK) return; @@ -96,8 +96,8 @@ sepgsql_client_auth(Port *port, int status) sepgsql_set_client_label(context); /* - * Switch the current performing mode from INTERNAL to either - * DEFAULT or PERMISSIVE. + * Switch the current performing mode from INTERNAL to either DEFAULT or + * PERMISSIVE. */ if (sepgsql_permissive) sepgsql_set_mode(SEPGSQL_MODE_PERMISSIVE); @@ -113,12 +113,12 @@ sepgsql_client_auth(Port *port, int status) */ static void sepgsql_object_access(ObjectAccessType access, - Oid classId, - Oid objectId, - int subId) + Oid classId, + Oid objectId, + int subId) { if (next_object_access_hook) - (*next_object_access_hook)(access, classId, objectId, subId); + (*next_object_access_hook) (access, classId, objectId, subId); switch (access) { @@ -147,7 +147,7 @@ sepgsql_object_access(ObjectAccessType access, break; default: - elog(ERROR, "unexpected object access type: %d", (int)access); + elog(ERROR, "unexpected object access type: %d", (int) access); break; } } @@ -161,11 +161,11 @@ static bool sepgsql_exec_check_perms(List *rangeTabls, bool abort) { /* - * If security provider is stacking and one of them replied 'false' - * at least, we don't need to check any more. + * If security provider is stacking and one of them replied 'false' at + * least, we don't need to check any more. */ if (next_exec_check_perms_hook && - !(*next_exec_check_perms_hook)(rangeTabls, abort)) + !(*next_exec_check_perms_hook) (rangeTabls, abort)) return false; if (!sepgsql_dml_privileges(rangeTabls, abort)) @@ -184,20 +184,19 @@ sepgsql_exec_check_perms(List *rangeTabls, bool abort) static bool sepgsql_needs_fmgr_hook(Oid functionId) { - char *old_label; - char *new_label; - char *function_label; + char *old_label; + char *new_label; + char *function_label; if (next_needs_fmgr_hook && - (*next_needs_fmgr_hook)(functionId)) + (*next_needs_fmgr_hook) (functionId)) return true; /* - * SELinux needs the function to be called via security_definer - * wrapper, if this invocation will take a domain-transition. - * We call these functions as trusted-procedure, if the security - * policy has a rule that switches security label of the client - * on execution. + * SELinux needs the function to be called via security_definer wrapper, + * if this invocation will take a domain-transition. We call these + * functions as trusted-procedure, if the security policy has a rule that + * switches security label of the client on execution. */ old_label = sepgsql_get_client_label(); new_label = sepgsql_proc_get_domtrans(functionId); @@ -210,9 +209,9 @@ sepgsql_needs_fmgr_hook(Oid functionId) /* * Even if not a trusted-procedure, this function should not be inlined - * unless the client has db_procedure:{execute} permission. - * Please note that it shall be actually failed later because of same - * reason with ACL_EXECUTE. + * unless the client has db_procedure:{execute} permission. Please note + * that it shall be actually failed later because of same reason with + * ACL_EXECUTE. */ function_label = sepgsql_get_label(ProcedureRelationId, functionId, 0); if (sepgsql_check_perms(sepgsql_get_client_label(), @@ -238,20 +237,21 @@ static void sepgsql_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *private) { - struct { - char *old_label; - char *new_label; - Datum next_private; - } *stack; + struct + { + char *old_label; + char *new_label; + Datum next_private; + } *stack; switch (event) { case FHET_START: - stack = (void *)DatumGetPointer(*private); + stack = (void *) DatumGetPointer(*private); if (!stack) { - MemoryContext oldcxt; - const char *cur_label = sepgsql_get_client_label(); + MemoryContext oldcxt; + const char *cur_label = sepgsql_get_client_label(); oldcxt = MemoryContextSwitchTo(flinfo->fn_mcxt); stack = palloc(sizeof(*stack)); @@ -265,8 +265,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event, { /* * process:transition permission between old and new - * label, when user tries to switch security label of - * the client on execution of trusted procedure. + * label, when user tries to switch security label of the + * client on execution of trusted procedure. */ sepgsql_check_perms(cur_label, stack->new_label, SEPG_CLASS_PROCESS, @@ -280,22 +280,22 @@ sepgsql_fmgr_hook(FmgrHookEventType event, stack->old_label = sepgsql_set_client_label(stack->new_label); if (next_fmgr_hook) - (*next_fmgr_hook)(event, flinfo, &stack->next_private); + (*next_fmgr_hook) (event, flinfo, &stack->next_private); break; case FHET_END: case FHET_ABORT: - stack = (void *)DatumGetPointer(*private); + stack = (void *) DatumGetPointer(*private); if (next_fmgr_hook) - (*next_fmgr_hook)(event, flinfo, &stack->next_private); + (*next_fmgr_hook) (event, flinfo, &stack->next_private); sepgsql_set_client_label(stack->old_label); stack->old_label = NULL; break; default: - elog(ERROR, "unexpected event type: %d", (int)event); + elog(ERROR, "unexpected event type: %d", (int) event); break; } } @@ -315,8 +315,8 @@ sepgsql_utility_command(Node *parsetree, char *completionTag) { if (next_ProcessUtility_hook) - (*next_ProcessUtility_hook)(parsetree, queryString, params, - isTopLevel, dest, completionTag); + (*next_ProcessUtility_hook) (parsetree, queryString, params, + isTopLevel, dest, completionTag); /* * Check command tag to avoid nefarious operations @@ -324,6 +324,7 @@ sepgsql_utility_command(Node *parsetree, switch (nodeTag(parsetree)) { case T_LoadStmt: + /* * We reject LOAD command across the board on enforcing mode, * because a binary module can arbitrarily override hooks. @@ -336,11 +337,12 @@ sepgsql_utility_command(Node *parsetree, } break; default: + /* - * Right now we don't check any other utility commands, - * because it needs more detailed information to make - * access control decision here, but we don't want to - * have two parse and analyze routines individually. + * Right now we don't check any other utility commands, because it + * needs more detailed information to make access control decision + * here, but we don't want to have two parse and analyze routines + * individually. */ break; } @@ -358,7 +360,7 @@ sepgsql_utility_command(Node *parsetree, void _PG_init(void) { - char *context; + char *context; /* * We allow to load the SE-PostgreSQL module on single-user-mode or @@ -367,12 +369,12 @@ _PG_init(void) if (IsUnderPostmaster) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("sepgsql must be loaded via shared_preload_libraries"))); + errmsg("sepgsql must be loaded via shared_preload_libraries"))); /* - * Check availability of SELinux on the platform. - * If disabled, we cannot activate any SE-PostgreSQL features, - * and we have to skip rest of initialization. + * Check availability of SELinux on the platform. If disabled, we cannot + * activate any SE-PostgreSQL features, and we have to skip rest of + * initialization. */ if (is_selinux_enabled() < 1) { @@ -383,8 +385,8 @@ _PG_init(void) /* * sepgsql.permissive = (on|off) * - * This variable controls performing mode of SE-PostgreSQL - * on user's session. + * This variable controls performing mode of SE-PostgreSQL on user's + * session. */ DefineCustomBoolVariable("sepgsql.permissive", "Turn on/off permissive mode in SE-PostgreSQL", @@ -400,10 +402,9 @@ _PG_init(void) /* * sepgsql.debug_audit = (on|off) * - * This variable allows users to turn on/off audit logs on access - * control decisions, independent from auditallow/auditdeny setting - * in the security policy. - * We intend to use this option for debugging purpose. + * This variable allows users to turn on/off audit logs on access control + * decisions, independent from auditallow/auditdeny setting in the + * security policy. We intend to use this option for debugging purpose. */ DefineCustomBoolVariable("sepgsql.debug_audit", "Turn on/off debug audit messages", @@ -419,13 +420,12 @@ _PG_init(void) /* * Set up dummy client label. * - * XXX - note that PostgreSQL launches background worker process - * like autovacuum without authentication steps. So, we initialize - * sepgsql_mode with SEPGSQL_MODE_INTERNAL, and client_label with - * the security context of server process. - * Later, it also launches background of user session. In this case, - * the process is always hooked on post-authentication, and we can - * initialize the sepgsql_mode and client_label correctly. + * XXX - note that PostgreSQL launches background worker process like + * autovacuum without authentication steps. So, we initialize sepgsql_mode + * with SEPGSQL_MODE_INTERNAL, and client_label with the security context + * of server process. Later, it also launches background of user session. + * In this case, the process is always hooked on post-authentication, and + * we can initialize the sepgsql_mode and client_label correctly. */ if (getcon_raw(&context) < 0) ereport(ERROR, diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c index 828512a961..669ee35ac3 100644 --- a/contrib/sepgsql/label.c +++ b/contrib/sepgsql/label.c @@ -38,7 +38,7 @@ * * security label of the client process */ -static char *client_label = NULL; +static char *client_label = NULL; char * sepgsql_get_client_label(void) @@ -49,7 +49,7 @@ sepgsql_get_client_label(void) char * sepgsql_set_client_label(char *new_label) { - char *old_label = client_label; + char *old_label = client_label; client_label = new_label; @@ -66,22 +66,22 @@ sepgsql_set_client_label(char *new_label) char * sepgsql_get_label(Oid classId, Oid objectId, int32 subId) { - ObjectAddress object; - char *label; + ObjectAddress object; + char *label; - object.classId = classId; - object.objectId = objectId; - object.objectSubId = subId; + object.classId = classId; + object.objectId = objectId; + object.objectSubId = subId; label = GetSecurityLabel(&object, SEPGSQL_LABEL_TAG); - if (!label || security_check_context_raw((security_context_t)label)) + if (!label || security_check_context_raw((security_context_t) label)) { - security_context_t unlabeled; + security_context_t unlabeled; if (security_get_initial_context_raw("unlabeled", &unlabeled) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("SELinux: failed to get initial security label: %m"))); + errmsg("SELinux: failed to get initial security label: %m"))); PG_TRY(); { label = pstrdup(unlabeled); @@ -107,21 +107,22 @@ void sepgsql_object_relabel(const ObjectAddress *object, const char *seclabel) { /* - * validate format of the supplied security label, - * if it is security context of selinux. + * validate format of the supplied security label, if it is security + * context of selinux. */ if (seclabel && security_check_context_raw((security_context_t) seclabel) < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), - errmsg("SELinux: invalid security label: \"%s\"", seclabel))); + errmsg("SELinux: invalid security label: \"%s\"", seclabel))); + /* * Do actual permission checks for each object classes */ switch (object->classId) { case NamespaceRelationId: - sepgsql_schema_relabel(object->objectId, seclabel); + sepgsql_schema_relabel(object->objectId, seclabel); break; case RelationRelationId: if (object->objectSubId == 0) @@ -151,7 +152,7 @@ PG_FUNCTION_INFO_V1(sepgsql_getcon); Datum sepgsql_getcon(PG_FUNCTION_ARGS) { - char *client_label; + char *client_label; if (!sepgsql_is_enabled()) PG_RETURN_NULL(); @@ -171,9 +172,9 @@ PG_FUNCTION_INFO_V1(sepgsql_mcstrans_in); Datum sepgsql_mcstrans_in(PG_FUNCTION_ARGS) { - text *label = PG_GETARG_TEXT_P(0); - char *raw_label; - char *result; + text *label = PG_GETARG_TEXT_P(0); + char *raw_label; + char *result; if (!sepgsql_is_enabled()) ereport(ERROR, @@ -211,9 +212,9 @@ PG_FUNCTION_INFO_V1(sepgsql_mcstrans_out); Datum sepgsql_mcstrans_out(PG_FUNCTION_ARGS) { - text *label = PG_GETARG_TEXT_P(0); - char *qual_label; - char *result; + text *label = PG_GETARG_TEXT_P(0); + char *qual_label; + char *result; if (!sepgsql_is_enabled()) ereport(ERROR, @@ -250,8 +251,8 @@ static char * quote_object_name(const char *src1, const char *src2, const char *src3, const char *src4) { - StringInfoData result; - const char *temp; + StringInfoData result; + const char *temp; initStringInfo(&result); @@ -260,28 +261,28 @@ quote_object_name(const char *src1, const char *src2, temp = quote_identifier(src1); appendStringInfo(&result, "%s", temp); if (src1 != temp) - pfree((void *)temp); + pfree((void *) temp); } if (src2) { temp = quote_identifier(src2); appendStringInfo(&result, ".%s", temp); if (src2 != temp) - pfree((void *)temp); + pfree((void *) temp); } if (src3) { temp = quote_identifier(src3); appendStringInfo(&result, ".%s", temp); if (src3 != temp) - pfree((void *)temp); + pfree((void *) temp); } if (src4) { temp = quote_identifier(src4); appendStringInfo(&result, ".%s", temp); if (src4 != temp) - pfree((void *)temp); + pfree((void *) temp); } return result.data; } @@ -294,19 +295,19 @@ quote_object_name(const char *src1, const char *src2, * catalog OID. */ static void -exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId) +exec_object_restorecon(struct selabel_handle * sehnd, Oid catalogId) { - Relation rel; - SysScanDesc sscan; - HeapTuple tuple; - char *database_name = get_database_name(MyDatabaseId); - char *namespace_name; - Oid namespace_id; - char *relation_name; + Relation rel; + SysScanDesc sscan; + HeapTuple tuple; + char *database_name = get_database_name(MyDatabaseId); + char *namespace_name; + Oid namespace_id; + char *relation_name; /* - * Open the target catalog. We don't want to allow writable - * accesses by other session during initial labeling. + * Open the target catalog. We don't want to allow writable accesses by + * other session during initial labeling. */ rel = heap_open(catalogId, AccessShareLock); @@ -314,18 +315,18 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId) SnapshotNow, 0, NULL); while (HeapTupleIsValid(tuple = systable_getnext(sscan))) { - Form_pg_namespace nspForm; - Form_pg_class relForm; - Form_pg_attribute attForm; - Form_pg_proc proForm; - char *objname; - int objtype = 1234; - ObjectAddress object; - security_context_t context; + Form_pg_namespace nspForm; + Form_pg_class relForm; + Form_pg_attribute attForm; + Form_pg_proc proForm; + char *objname; + int objtype = 1234; + ObjectAddress object; + security_context_t context; /* - * The way to determine object name depends on object classes. - * So, any branches set up `objtype', `objname' and `object' here. + * The way to determine object name depends on object classes. So, any + * branches set up `objtype', `objname' and `object' here. */ switch (catalogId) { @@ -409,7 +410,7 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId) default: elog(ERROR, "unexpected catalog id: %u", catalogId); - objname = NULL; /* for compiler quiet */ + objname = NULL; /* for compiler quiet */ break; } @@ -464,8 +465,8 @@ PG_FUNCTION_INFO_V1(sepgsql_restorecon); Datum sepgsql_restorecon(PG_FUNCTION_ARGS) { - struct selabel_handle *sehnd; - struct selinux_opt seopts; + struct selabel_handle *sehnd; + struct selinux_opt seopts; /* * SELinux has to be enabled on the running platform. @@ -474,19 +475,19 @@ sepgsql_restorecon(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("sepgsql is not currently enabled"))); + /* - * Check DAC permission. Only superuser can set up initial - * security labels, like root-user in filesystems + * Check DAC permission. Only superuser can set up initial security + * labels, like root-user in filesystems */ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("SELinux: must be superuser to restore initial contexts"))); + errmsg("SELinux: must be superuser to restore initial contexts"))); /* - * Open selabel_lookup(3) stuff. It provides a set of mapping - * between an initial security label and object class/name due - * to the system setting. + * Open selabel_lookup(3) stuff. It provides a set of mapping between an + * initial security label and object class/name due to the system setting. */ if (PG_ARGISNULL(0)) { @@ -502,12 +503,12 @@ sepgsql_restorecon(PG_FUNCTION_ARGS) if (!sehnd) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("SELinux: failed to initialize labeling handle: %m"))); + errmsg("SELinux: failed to initialize labeling handle: %m"))); PG_TRY(); { /* - * Right now, we have no support labeling on the shared - * database objects, such as database, role, or tablespace. + * Right now, we have no support labeling on the shared database + * objects, such as database, role, or tablespace. */ exec_object_restorecon(sehnd, NamespaceRelationId); exec_object_restorecon(sehnd, RelationRelationId); @@ -519,7 +520,7 @@ sepgsql_restorecon(PG_FUNCTION_ARGS) selabel_close(sehnd); PG_RE_THROW(); } - PG_END_TRY(); + PG_END_TRY(); selabel_close(sehnd); diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c index 5a0c4947f7..3b8bf23ba3 100644 --- a/contrib/sepgsql/proc.c +++ b/contrib/sepgsql/proc.c @@ -33,15 +33,15 @@ void sepgsql_proc_post_create(Oid functionId) { - Relation rel; - ScanKeyData skey; - SysScanDesc sscan; - HeapTuple tuple; - Oid namespaceId; - ObjectAddress object; - char *scontext; - char *tcontext; - char *ncontext; + Relation rel; + ScanKeyData skey; + SysScanDesc sscan; + HeapTuple tuple; + Oid namespaceId; + ObjectAddress object; + char *scontext; + char *tcontext; + char *ncontext; /* * Fetch namespace of the new procedure. Because pg_proc entry is not @@ -67,8 +67,8 @@ sepgsql_proc_post_create(Oid functionId) heap_close(rel, AccessShareLock); /* - * Compute a default security label when we create a new procedure - * object under the specified namespace. + * Compute a default security label when we create a new procedure object + * under the specified namespace. */ scontext = sepgsql_get_client_label(); tcontext = sepgsql_get_label(NamespaceRelationId, namespaceId, 0); @@ -144,9 +144,9 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel) char * sepgsql_proc_get_domtrans(Oid functionId) { - char *scontext = sepgsql_get_client_label(); - char *tcontext; - char *ncontext; + char *scontext = sepgsql_get_client_label(); + char *tcontext; + char *ncontext; tcontext = sepgsql_get_label(ProcedureRelationId, functionId, 0); diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c index ed5e3adc0e..963cfdf9f1 100644 --- a/contrib/sepgsql/relation.c +++ b/contrib/sepgsql/relation.c @@ -36,26 +36,27 @@ void sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum) { - char *scontext = sepgsql_get_client_label(); - char *tcontext; - char *ncontext; - ObjectAddress object; + char *scontext = sepgsql_get_client_label(); + char *tcontext; + char *ncontext; + ObjectAddress object; /* - * Only attributes within regular relation have individual - * security labels. + * Only attributes within regular relation have individual security + * labels. */ if (get_rel_relkind(relOid) != RELKIND_RELATION) return; /* - * Compute a default security label when we create a new procedure - * object under the specified namespace. + * Compute a default security label when we create a new procedure object + * under the specified namespace. */ scontext = sepgsql_get_client_label(); tcontext = sepgsql_get_label(RelationRelationId, relOid, 0); ncontext = sepgsql_compute_create(scontext, tcontext, SEPG_CLASS_DB_COLUMN); + /* * Assign the default security label on a new procedure */ @@ -81,7 +82,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum, char *scontext = sepgsql_get_client_label(); char *tcontext; char *audit_name; - ObjectAddress object; + ObjectAddress object; if (get_rel_relkind(relOid) != RELKIND_RELATION) ereport(ERROR, @@ -127,21 +128,21 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum, void sepgsql_relation_post_create(Oid relOid) { - Relation rel; - ScanKeyData skey; - SysScanDesc sscan; - HeapTuple tuple; - Form_pg_class classForm; - ObjectAddress object; - uint16 tclass; - char *scontext; /* subject */ - char *tcontext; /* schema */ - char *rcontext; /* relation */ - char *ccontext; /* column */ + Relation rel; + ScanKeyData skey; + SysScanDesc sscan; + HeapTuple tuple; + Form_pg_class classForm; + ObjectAddress object; + uint16 tclass; + char *scontext; /* subject */ + char *tcontext; /* schema */ + char *rcontext; /* relation */ + char *ccontext; /* column */ /* - * Fetch catalog record of the new relation. Because pg_class entry is - * not visible right now, we need to scan the catalog using SnapshotSelf. + * Fetch catalog record of the new relation. Because pg_class entry is not + * visible right now, we need to scan the catalog using SnapshotSelf. */ rel = heap_open(RelationRelationId, AccessShareLock); @@ -166,11 +167,11 @@ sepgsql_relation_post_create(Oid relOid) else if (classForm->relkind == RELKIND_VIEW) tclass = SEPG_CLASS_DB_VIEW; else - goto out; /* No need to assign individual labels */ + goto out; /* No need to assign individual labels */ /* - * Compute a default security label when we create a new relation - * object under the specified namespace. + * Compute a default security label when we create a new relation object + * under the specified namespace. */ scontext = sepgsql_get_client_label(); tcontext = sepgsql_get_label(NamespaceRelationId, @@ -186,8 +187,8 @@ sepgsql_relation_post_create(Oid relOid) SetSecurityLabel(&object, SEPGSQL_LABEL_TAG, rcontext); /* - * We also assigns a default security label on columns of the new - * regular tables. + * We also assigns a default security label on columns of the new regular + * tables. */ if (classForm->relkind == RELKIND_RELATION) { diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c index 8538d18ac9..0de89971fb 100644 --- a/contrib/sepgsql/schema.c +++ b/contrib/sepgsql/schema.c @@ -26,21 +26,21 @@ void sepgsql_schema_post_create(Oid namespaceId) { - char *scontext = sepgsql_get_client_label(); - char *tcontext; - char *ncontext; - ObjectAddress object; + char *scontext = sepgsql_get_client_label(); + char *tcontext; + char *ncontext; + ObjectAddress object; /* - * FIXME: Right now, we assume pg_database object has a fixed - * security label, because pg_seclabel does not support to store - * label of shared database objects. + * FIXME: Right now, we assume pg_database object has a fixed security + * label, because pg_seclabel does not support to store label of shared + * database objects. */ tcontext = "system_u:object_r:sepgsql_db_t:s0"; /* - * Compute a default security label when we create a new schema - * object under the working database. + * Compute a default security label when we create a new schema object + * under the working database. */ ncontext = sepgsql_compute_create(scontext, tcontext, SEPG_CLASS_DB_SCHEMA); diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c index 03ba25cef0..1f5a97e878 100644 --- a/contrib/sepgsql/selinux.c +++ b/contrib/sepgsql/selinux.c @@ -29,255 +29,563 @@ */ static struct { - const char *class_name; - uint16 class_code; + const char *class_name; + uint16 class_code; struct { - const char *av_name; - uint32 av_code; - } av[32]; -} selinux_catalog[] = { + const char *av_name; + uint32 av_code; + } av[32]; +} selinux_catalog[] = + +{ { - "process", SEPG_CLASS_PROCESS, + "process", SEPG_CLASS_PROCESS, { - { "transition", SEPG_PROCESS__TRANSITION }, - { NULL, 0UL } + { + "transition", SEPG_PROCESS__TRANSITION + }, + { + NULL, 0UL + } } }, { - "file", SEPG_CLASS_FILE, + "file", SEPG_CLASS_FILE, { - { "read", SEPG_FILE__READ }, - { "write", SEPG_FILE__WRITE }, - { "create", SEPG_FILE__CREATE }, - { "getattr", SEPG_FILE__GETATTR }, - { "unlink", SEPG_FILE__UNLINK }, - { "rename", SEPG_FILE__RENAME }, - { "append", SEPG_FILE__APPEND }, - { NULL, 0UL } + { + "read", SEPG_FILE__READ + }, + { + "write", SEPG_FILE__WRITE + }, + { + "create", SEPG_FILE__CREATE + }, + { + "getattr", SEPG_FILE__GETATTR + }, + { + "unlink", SEPG_FILE__UNLINK + }, + { + "rename", SEPG_FILE__RENAME + }, + { + "append", SEPG_FILE__APPEND + }, + { + NULL, 0UL + } } }, { - "dir", SEPG_CLASS_DIR, + "dir", SEPG_CLASS_DIR, { - { "read", SEPG_DIR__READ }, - { "write", SEPG_DIR__WRITE }, - { "create", SEPG_DIR__CREATE }, - { "getattr", SEPG_DIR__GETATTR }, - { "unlink", SEPG_DIR__UNLINK }, - { "rename", SEPG_DIR__RENAME }, - { "search", SEPG_DIR__SEARCH }, - { "add_name", SEPG_DIR__ADD_NAME }, - { "remove_name", SEPG_DIR__REMOVE_NAME }, - { "rmdir", SEPG_DIR__RMDIR }, - { "reparent", SEPG_DIR__REPARENT }, - { NULL, 0UL } + { + "read", SEPG_DIR__READ + }, + { + "write", SEPG_DIR__WRITE + }, + { + "create", SEPG_DIR__CREATE + }, + { + "getattr", SEPG_DIR__GETATTR + }, + { + "unlink", SEPG_DIR__UNLINK + }, + { + "rename", SEPG_DIR__RENAME + }, + { + "search", SEPG_DIR__SEARCH + }, + { + "add_name", SEPG_DIR__ADD_NAME + }, + { + "remove_name", SEPG_DIR__REMOVE_NAME + }, + { + "rmdir", SEPG_DIR__RMDIR + }, + { + "reparent", SEPG_DIR__REPARENT + }, + { + NULL, 0UL + } } }, { - "lnk_file", SEPG_CLASS_LNK_FILE, + "lnk_file", SEPG_CLASS_LNK_FILE, { - { "read", SEPG_LNK_FILE__READ }, - { "write", SEPG_LNK_FILE__WRITE }, - { "create", SEPG_LNK_FILE__CREATE }, - { "getattr", SEPG_LNK_FILE__GETATTR }, - { "unlink", SEPG_LNK_FILE__UNLINK }, - { "rename", SEPG_LNK_FILE__RENAME }, - { NULL, 0UL } + { + "read", SEPG_LNK_FILE__READ + }, + { + "write", SEPG_LNK_FILE__WRITE + }, + { + "create", SEPG_LNK_FILE__CREATE + }, + { + "getattr", SEPG_LNK_FILE__GETATTR + }, + { + "unlink", SEPG_LNK_FILE__UNLINK + }, + { + "rename", SEPG_LNK_FILE__RENAME + }, + { + NULL, 0UL + } } }, { - "chr_file", SEPG_CLASS_CHR_FILE, + "chr_file", SEPG_CLASS_CHR_FILE, { - { "read", SEPG_CHR_FILE__READ }, - { "write", SEPG_CHR_FILE__WRITE }, - { "create", SEPG_CHR_FILE__CREATE }, - { "getattr", SEPG_CHR_FILE__GETATTR }, - { "unlink", SEPG_CHR_FILE__UNLINK }, - { "rename", SEPG_CHR_FILE__RENAME }, - { NULL, 0UL } + { + "read", SEPG_CHR_FILE__READ + }, + { + "write", SEPG_CHR_FILE__WRITE + }, + { + "create", SEPG_CHR_FILE__CREATE + }, + { + "getattr", SEPG_CHR_FILE__GETATTR + }, + { + "unlink", SEPG_CHR_FILE__UNLINK + }, + { + "rename", SEPG_CHR_FILE__RENAME + }, + { + NULL, 0UL + } } }, { - "blk_file", SEPG_CLASS_BLK_FILE, + "blk_file", SEPG_CLASS_BLK_FILE, { - { "read", SEPG_BLK_FILE__READ }, - { "write", SEPG_BLK_FILE__WRITE }, - { "create", SEPG_BLK_FILE__CREATE }, - { "getattr", SEPG_BLK_FILE__GETATTR }, - { "unlink", SEPG_BLK_FILE__UNLINK }, - { "rename", SEPG_BLK_FILE__RENAME }, - { NULL, 0UL } + { + "read", SEPG_BLK_FILE__READ + }, + { + "write", SEPG_BLK_FILE__WRITE + }, + { + "create", SEPG_BLK_FILE__CREATE + }, + { + "getattr", SEPG_BLK_FILE__GETATTR + }, + { + "unlink", SEPG_BLK_FILE__UNLINK + }, + { + "rename", SEPG_BLK_FILE__RENAME + }, + { + NULL, 0UL + } } }, { - "sock_file", SEPG_CLASS_SOCK_FILE, + "sock_file", SEPG_CLASS_SOCK_FILE, { - { "read", SEPG_SOCK_FILE__READ }, - { "write", SEPG_SOCK_FILE__WRITE }, - { "create", SEPG_SOCK_FILE__CREATE }, - { "getattr", SEPG_SOCK_FILE__GETATTR }, - { "unlink", SEPG_SOCK_FILE__UNLINK }, - { "rename", SEPG_SOCK_FILE__RENAME }, - { NULL, 0UL } + { + "read", SEPG_SOCK_FILE__READ + }, + { + "write", SEPG_SOCK_FILE__WRITE + }, + { + "create", SEPG_SOCK_FILE__CREATE + }, + { + "getattr", SEPG_SOCK_FILE__GETATTR + }, + { + "unlink", SEPG_SOCK_FILE__UNLINK + }, + { + "rename", SEPG_SOCK_FILE__RENAME + }, + { + NULL, 0UL + } } }, { - "fifo_file", SEPG_CLASS_FIFO_FILE, + "fifo_file", SEPG_CLASS_FIFO_FILE, { - { "read", SEPG_FIFO_FILE__READ }, - { "write", SEPG_FIFO_FILE__WRITE }, - { "create", SEPG_FIFO_FILE__CREATE }, - { "getattr", SEPG_FIFO_FILE__GETATTR }, - { "unlink", SEPG_FIFO_FILE__UNLINK }, - { "rename", SEPG_FIFO_FILE__RENAME }, - { NULL, 0UL } + { + "read", SEPG_FIFO_FILE__READ + }, + { + "write", SEPG_FIFO_FILE__WRITE + }, + { + "create", SEPG_FIFO_FILE__CREATE + }, + { + "getattr", SEPG_FIFO_FILE__GETATTR + }, + { + "unlink", SEPG_FIFO_FILE__UNLINK + }, + { + "rename", SEPG_FIFO_FILE__RENAME + }, + { + NULL, 0UL + } } }, { - "db_database", SEPG_CLASS_DB_DATABASE, + "db_database", SEPG_CLASS_DB_DATABASE, { - { "create", SEPG_DB_DATABASE__CREATE }, - { "drop", SEPG_DB_DATABASE__DROP }, - { "getattr", SEPG_DB_DATABASE__GETATTR }, - { "setattr", SEPG_DB_DATABASE__SETATTR }, - { "relabelfrom", SEPG_DB_DATABASE__RELABELFROM }, - { "relabelto", SEPG_DB_DATABASE__RELABELTO }, - { "access", SEPG_DB_DATABASE__ACCESS }, - { "load_module", SEPG_DB_DATABASE__LOAD_MODULE }, - { NULL, 0UL }, + { + "create", SEPG_DB_DATABASE__CREATE + }, + { + "drop", SEPG_DB_DATABASE__DROP + }, + { + "getattr", SEPG_DB_DATABASE__GETATTR + }, + { + "setattr", SEPG_DB_DATABASE__SETATTR + }, + { + "relabelfrom", SEPG_DB_DATABASE__RELABELFROM + }, + { + "relabelto", SEPG_DB_DATABASE__RELABELTO + }, + { + "access", SEPG_DB_DATABASE__ACCESS + }, + { + "load_module", SEPG_DB_DATABASE__LOAD_MODULE + }, + { + NULL, 0UL + }, } }, { - "db_schema", SEPG_CLASS_DB_SCHEMA, + "db_schema", SEPG_CLASS_DB_SCHEMA, { - { "create", SEPG_DB_SCHEMA__CREATE }, - { "drop", SEPG_DB_SCHEMA__DROP }, - { "getattr", SEPG_DB_SCHEMA__GETATTR }, - { "setattr", SEPG_DB_SCHEMA__SETATTR }, - { "relabelfrom", SEPG_DB_SCHEMA__RELABELFROM }, - { "relabelto", SEPG_DB_SCHEMA__RELABELTO }, - { "search", SEPG_DB_SCHEMA__SEARCH }, - { "add_name", SEPG_DB_SCHEMA__ADD_NAME }, - { "remove_name", SEPG_DB_SCHEMA__REMOVE_NAME }, - { NULL, 0UL }, + { + "create", SEPG_DB_SCHEMA__CREATE + }, + { + "drop", SEPG_DB_SCHEMA__DROP + }, + { + "getattr", SEPG_DB_SCHEMA__GETATTR + }, + { + "setattr", SEPG_DB_SCHEMA__SETATTR + }, + { + "relabelfrom", SEPG_DB_SCHEMA__RELABELFROM + }, + { + "relabelto", SEPG_DB_SCHEMA__RELABELTO + }, + { + "search", SEPG_DB_SCHEMA__SEARCH + }, + { + "add_name", SEPG_DB_SCHEMA__ADD_NAME + }, + { + "remove_name", SEPG_DB_SCHEMA__REMOVE_NAME + }, + { + NULL, 0UL + }, } }, { - "db_table", SEPG_CLASS_DB_TABLE, + "db_table", SEPG_CLASS_DB_TABLE, { - { "create", SEPG_DB_TABLE__CREATE }, - { "drop", SEPG_DB_TABLE__DROP }, - { "getattr", SEPG_DB_TABLE__GETATTR }, - { "setattr", SEPG_DB_TABLE__SETATTR }, - { "relabelfrom", SEPG_DB_TABLE__RELABELFROM }, - { "relabelto", SEPG_DB_TABLE__RELABELTO }, - { "select", SEPG_DB_TABLE__SELECT }, - { "update", SEPG_DB_TABLE__UPDATE }, - { "insert", SEPG_DB_TABLE__INSERT }, - { "delete", SEPG_DB_TABLE__DELETE }, - { "lock", SEPG_DB_TABLE__LOCK }, - { NULL, 0UL }, + { + "create", SEPG_DB_TABLE__CREATE + }, + { + "drop", SEPG_DB_TABLE__DROP + }, + { + "getattr", SEPG_DB_TABLE__GETATTR + }, + { + "setattr", SEPG_DB_TABLE__SETATTR + }, + { + "relabelfrom", SEPG_DB_TABLE__RELABELFROM + }, + { + "relabelto", SEPG_DB_TABLE__RELABELTO + }, + { + "select", SEPG_DB_TABLE__SELECT + }, + { + "update", SEPG_DB_TABLE__UPDATE + }, + { + "insert", SEPG_DB_TABLE__INSERT + }, + { + "delete", SEPG_DB_TABLE__DELETE + }, + { + "lock", SEPG_DB_TABLE__LOCK + }, + { + NULL, 0UL + }, } }, { - "db_sequence", SEPG_CLASS_DB_SEQUENCE, + "db_sequence", SEPG_CLASS_DB_SEQUENCE, { - { "create", SEPG_DB_SEQUENCE__CREATE }, - { "drop", SEPG_DB_SEQUENCE__DROP }, - { "getattr", SEPG_DB_SEQUENCE__GETATTR }, - { "setattr", SEPG_DB_SEQUENCE__SETATTR }, - { "relabelfrom", SEPG_DB_SEQUENCE__RELABELFROM }, - { "relabelto", SEPG_DB_SEQUENCE__RELABELTO }, - { "get_value", SEPG_DB_SEQUENCE__GET_VALUE }, - { "next_value", SEPG_DB_SEQUENCE__NEXT_VALUE }, - { "set_value", SEPG_DB_SEQUENCE__SET_VALUE }, - { NULL, 0UL }, + { + "create", SEPG_DB_SEQUENCE__CREATE + }, + { + "drop", SEPG_DB_SEQUENCE__DROP + }, + { + "getattr", SEPG_DB_SEQUENCE__GETATTR + }, + { + "setattr", SEPG_DB_SEQUENCE__SETATTR + }, + { + "relabelfrom", SEPG_DB_SEQUENCE__RELABELFROM + }, + { + "relabelto", SEPG_DB_SEQUENCE__RELABELTO + }, + { + "get_value", SEPG_DB_SEQUENCE__GET_VALUE + }, + { + "next_value", SEPG_DB_SEQUENCE__NEXT_VALUE + }, + { + "set_value", SEPG_DB_SEQUENCE__SET_VALUE + }, + { + NULL, 0UL + }, } }, { - "db_procedure", SEPG_CLASS_DB_PROCEDURE, + "db_procedure", SEPG_CLASS_DB_PROCEDURE, { - { "create", SEPG_DB_PROCEDURE__CREATE }, - { "drop", SEPG_DB_PROCEDURE__DROP }, - { "getattr", SEPG_DB_PROCEDURE__GETATTR }, - { "setattr", SEPG_DB_PROCEDURE__SETATTR }, - { "relabelfrom", SEPG_DB_PROCEDURE__RELABELFROM }, - { "relabelto", SEPG_DB_PROCEDURE__RELABELTO }, - { "execute", SEPG_DB_PROCEDURE__EXECUTE }, - { "entrypoint", SEPG_DB_PROCEDURE__ENTRYPOINT }, - { "install", SEPG_DB_PROCEDURE__INSTALL }, - { NULL, 0UL }, + { + "create", SEPG_DB_PROCEDURE__CREATE + }, + { + "drop", SEPG_DB_PROCEDURE__DROP + }, + { + "getattr", SEPG_DB_PROCEDURE__GETATTR + }, + { + "setattr", SEPG_DB_PROCEDURE__SETATTR + }, + { + "relabelfrom", SEPG_DB_PROCEDURE__RELABELFROM + }, + { + "relabelto", SEPG_DB_PROCEDURE__RELABELTO + }, + { + "execute", SEPG_DB_PROCEDURE__EXECUTE + }, + { + "entrypoint", SEPG_DB_PROCEDURE__ENTRYPOINT + }, + { + "install", SEPG_DB_PROCEDURE__INSTALL + }, + { + NULL, 0UL + }, } }, { - "db_column", SEPG_CLASS_DB_COLUMN, + "db_column", SEPG_CLASS_DB_COLUMN, { - { "create", SEPG_DB_COLUMN__CREATE }, - { "drop", SEPG_DB_COLUMN__DROP }, - { "getattr", SEPG_DB_COLUMN__GETATTR }, - { "setattr", SEPG_DB_COLUMN__SETATTR }, - { "relabelfrom", SEPG_DB_COLUMN__RELABELFROM }, - { "relabelto", SEPG_DB_COLUMN__RELABELTO }, - { "select", SEPG_DB_COLUMN__SELECT }, - { "update", SEPG_DB_COLUMN__UPDATE }, - { "insert", SEPG_DB_COLUMN__INSERT }, - { NULL, 0UL }, + { + "create", SEPG_DB_COLUMN__CREATE + }, + { + "drop", SEPG_DB_COLUMN__DROP + }, + { + "getattr", SEPG_DB_COLUMN__GETATTR + }, + { + "setattr", SEPG_DB_COLUMN__SETATTR + }, + { + "relabelfrom", SEPG_DB_COLUMN__RELABELFROM + }, + { + "relabelto", SEPG_DB_COLUMN__RELABELTO + }, + { + "select", SEPG_DB_COLUMN__SELECT + }, + { + "update", SEPG_DB_COLUMN__UPDATE + }, + { + "insert", SEPG_DB_COLUMN__INSERT + }, + { + NULL, 0UL + }, } }, { - "db_tuple", SEPG_CLASS_DB_TUPLE, + "db_tuple", SEPG_CLASS_DB_TUPLE, { - { "relabelfrom", SEPG_DB_TUPLE__RELABELFROM }, - { "relabelto", SEPG_DB_TUPLE__RELABELTO }, - { "select", SEPG_DB_TUPLE__SELECT }, - { "update", SEPG_DB_TUPLE__UPDATE }, - { "insert", SEPG_DB_TUPLE__INSERT }, - { "delete", SEPG_DB_TUPLE__DELETE }, - { NULL, 0UL }, + { + "relabelfrom", SEPG_DB_TUPLE__RELABELFROM + }, + { + "relabelto", SEPG_DB_TUPLE__RELABELTO + }, + { + "select", SEPG_DB_TUPLE__SELECT + }, + { + "update", SEPG_DB_TUPLE__UPDATE + }, + { + "insert", SEPG_DB_TUPLE__INSERT + }, + { + "delete", SEPG_DB_TUPLE__DELETE + }, + { + NULL, 0UL + }, } }, { - "db_blob", SEPG_CLASS_DB_BLOB, + "db_blob", SEPG_CLASS_DB_BLOB, { - { "create", SEPG_DB_BLOB__CREATE }, - { "drop", SEPG_DB_BLOB__DROP }, - { "getattr", SEPG_DB_BLOB__GETATTR }, - { "setattr", SEPG_DB_BLOB__SETATTR }, - { "relabelfrom", SEPG_DB_BLOB__RELABELFROM }, - { "relabelto", SEPG_DB_BLOB__RELABELTO }, - { "read", SEPG_DB_BLOB__READ }, - { "write", SEPG_DB_BLOB__WRITE }, - { "import", SEPG_DB_BLOB__IMPORT }, - { "export", SEPG_DB_BLOB__EXPORT }, - { NULL, 0UL }, + { + "create", SEPG_DB_BLOB__CREATE + }, + { + "drop", SEPG_DB_BLOB__DROP + }, + { + "getattr", SEPG_DB_BLOB__GETATTR + }, + { + "setattr", SEPG_DB_BLOB__SETATTR + }, + { + "relabelfrom", SEPG_DB_BLOB__RELABELFROM + }, + { + "relabelto", SEPG_DB_BLOB__RELABELTO + }, + { + "read", SEPG_DB_BLOB__READ + }, + { + "write", SEPG_DB_BLOB__WRITE + }, + { + "import", SEPG_DB_BLOB__IMPORT + }, + { + "export", SEPG_DB_BLOB__EXPORT + }, + { + NULL, 0UL + }, } }, { - "db_language", SEPG_CLASS_DB_LANGUAGE, + "db_language", SEPG_CLASS_DB_LANGUAGE, { - { "create", SEPG_DB_LANGUAGE__CREATE }, - { "drop", SEPG_DB_LANGUAGE__DROP }, - { "getattr", SEPG_DB_LANGUAGE__GETATTR }, - { "setattr", SEPG_DB_LANGUAGE__SETATTR }, - { "relabelfrom", SEPG_DB_LANGUAGE__RELABELFROM }, - { "relabelto", SEPG_DB_LANGUAGE__RELABELTO }, - { "implement", SEPG_DB_LANGUAGE__IMPLEMENT }, - { "execute", SEPG_DB_LANGUAGE__EXECUTE }, - { NULL, 0UL }, + { + "create", SEPG_DB_LANGUAGE__CREATE + }, + { + "drop", SEPG_DB_LANGUAGE__DROP + }, + { + "getattr", SEPG_DB_LANGUAGE__GETATTR + }, + { + "setattr", SEPG_DB_LANGUAGE__SETATTR + }, + { + "relabelfrom", SEPG_DB_LANGUAGE__RELABELFROM + }, + { + "relabelto", SEPG_DB_LANGUAGE__RELABELTO + }, + { + "implement", SEPG_DB_LANGUAGE__IMPLEMENT + }, + { + "execute", SEPG_DB_LANGUAGE__EXECUTE + }, + { + NULL, 0UL + }, } }, { - "db_view", SEPG_CLASS_DB_VIEW, + "db_view", SEPG_CLASS_DB_VIEW, { - { "create", SEPG_DB_VIEW__CREATE }, - { "drop", SEPG_DB_VIEW__DROP }, - { "getattr", SEPG_DB_VIEW__GETATTR }, - { "setattr", SEPG_DB_VIEW__SETATTR }, - { "relabelfrom", SEPG_DB_VIEW__RELABELFROM }, - { "relabelto", SEPG_DB_VIEW__RELABELTO }, - { "expand", SEPG_DB_VIEW__EXPAND }, - { NULL, 0UL }, + { + "create", SEPG_DB_VIEW__CREATE + }, + { + "drop", SEPG_DB_VIEW__DROP + }, + { + "getattr", SEPG_DB_VIEW__GETATTR + }, + { + "setattr", SEPG_DB_VIEW__SETATTR + }, + { + "relabelfrom", SEPG_DB_VIEW__RELABELFROM + }, + { + "relabelto", SEPG_DB_VIEW__RELABELTO + }, + { + "expand", SEPG_DB_VIEW__EXPAND + }, + { + NULL, 0UL + }, } }, }; @@ -316,7 +624,7 @@ sepgsql_get_mode(void) int sepgsql_set_mode(int new_mode) { - int old_mode = sepgsql_mode; + int old_mode = sepgsql_mode; sepgsql_mode = new_mode; @@ -367,10 +675,10 @@ sepgsql_audit_log(bool denied, uint32 audited, const char *audit_name) { - StringInfoData buf; - const char *class_name; - const char *av_name; - int i; + StringInfoData buf; + const char *class_name; + const char *av_name; + int i; /* lookup name of the object class */ Assert(tclass < SEPG_CLASS_MAX); @@ -380,7 +688,7 @@ sepgsql_audit_log(bool denied, initStringInfo(&buf); appendStringInfo(&buf, "%s {", (denied ? "denied" : "allowed")); - for (i=0; selinux_catalog[tclass].av[i].av_name; i++) + for (i = 0; selinux_catalog[tclass].av[i].av_name; i++) { if (audited & (1UL << i)) { @@ -418,14 +726,15 @@ void sepgsql_compute_avd(const char *scontext, const char *tcontext, uint16 tclass, - struct av_decision *avd) + struct av_decision * avd) { - const char *tclass_name; - security_class_t tclass_ex; - struct av_decision avd_ex; - int i, deny_unknown = security_deny_unknown(); + const char *tclass_name; + security_class_t tclass_ex; + struct av_decision avd_ex; + int i, + deny_unknown = security_deny_unknown(); - /* Get external code of the object class*/ + /* Get external code of the object class */ Assert(tclass < SEPG_CLASS_MAX); Assert(tclass == selinux_catalog[tclass].class_code); @@ -436,14 +745,13 @@ sepgsql_compute_avd(const char *scontext, { /* * If the current security policy does not support permissions - * corresponding to database objects, we fill up them with dummy - * data. + * corresponding to database objects, we fill up them with dummy data. * If security_deny_unknown() returns positive value, undefined * permissions should be denied. Otherwise, allowed */ avd->allowed = (security_deny_unknown() > 0 ? 0 : ~0); avd->auditallow = 0U; - avd->auditdeny = ~0U; + avd->auditdeny = ~0U; avd->flags = 0; return; @@ -453,8 +761,8 @@ sepgsql_compute_avd(const char *scontext, * Ask SELinux what is allowed set of permissions on a pair of the * security contexts and the given object class. */ - if (security_compute_av_flags_raw((security_context_t)scontext, - (security_context_t)tcontext, + if (security_compute_av_flags_raw((security_context_t) scontext, + (security_context_t) tcontext, tclass_ex, 0, &avd_ex) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), @@ -464,17 +772,17 @@ sepgsql_compute_avd(const char *scontext, /* * SELinux returns its access control decision as a set of permissions - * represented in external code which depends on run-time environment. - * So, we need to translate it to the internal representation before - * returning results for the caller. + * represented in external code which depends on run-time environment. So, + * we need to translate it to the internal representation before returning + * results for the caller. */ memset(avd, 0, sizeof(struct av_decision)); - for (i=0; selinux_catalog[tclass].av[i].av_name; i++) + for (i = 0; selinux_catalog[tclass].av[i].av_name; i++) { - access_vector_t av_code_ex; - const char *av_name = selinux_catalog[tclass].av[i].av_name; - uint32 av_code = selinux_catalog[tclass].av[i].av_code; + access_vector_t av_code_ex; + const char *av_name = selinux_catalog[tclass].av[i].av_name; + uint32 av_code = selinux_catalog[tclass].av[i].av_code; av_code_ex = string_to_av_perm(tclass_ex, av_name); if (av_code_ex == 0) @@ -524,23 +832,23 @@ sepgsql_compute_create(const char *scontext, const char *tcontext, uint16 tclass) { - security_context_t ncontext; - security_class_t tclass_ex; - const char *tclass_name; - char *result; + security_context_t ncontext; + security_class_t tclass_ex; + const char *tclass_name; + char *result; - /* Get external code of the object class*/ + /* Get external code of the object class */ Assert(tclass < SEPG_CLASS_MAX); tclass_name = selinux_catalog[tclass].class_name; tclass_ex = string_to_security_class(tclass_name); /* - * Ask SELinux what is the default context for the given object class - * on a pair of security contexts + * Ask SELinux what is the default context for the given object class on a + * pair of security contexts */ - if (security_compute_create_raw((security_context_t)scontext, - (security_context_t)tcontext, + if (security_compute_create_raw((security_context_t) scontext, + (security_context_t) tcontext, tclass_ex, &ncontext) < 0) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), @@ -549,8 +857,8 @@ sepgsql_compute_create(const char *scontext, scontext, tcontext, tclass_name))); /* - * libselinux returns malloc()'ed string, so we need to copy it - * on the palloc()'ed region. + * libselinux returns malloc()'ed string, so we need to copy it on the + * palloc()'ed region. */ PG_TRY(); { @@ -589,7 +897,7 @@ sepgsql_check_perms(const char *scontext, const char *audit_name, bool abort) { - struct av_decision avd; + struct av_decision avd; uint32 denied; uint32 audited; bool result = true; @@ -602,7 +910,7 @@ sepgsql_check_perms(const char *scontext, audited = (denied ? denied : required); else audited = (denied ? (denied & avd.auditdeny) - : (required & avd.auditallow)); + : (required & avd.auditallow)); if (denied && sepgsql_getenforce() > 0 && @@ -610,8 +918,8 @@ sepgsql_check_perms(const char *scontext, result = false; /* - * It records a security audit for the request, if needed. - * But, when SE-PgSQL performs 'internal' mode, it needs to keep silent. + * It records a security audit for the request, if needed. But, when + * SE-PgSQL performs 'internal' mode, it needs to keep silent. */ if (audited && sepgsql_mode != SEPGSQL_MODE_INTERNAL) { diff --git a/contrib/sepgsql/sepgsql.h b/contrib/sepgsql/sepgsql.h index ba7b2d1597..71688ab784 100644 --- a/contrib/sepgsql/sepgsql.h +++ b/contrib/sepgsql/sepgsql.h @@ -218,33 +218,34 @@ extern bool sepgsql_get_debug_audit(void); /* * selinux.c */ -extern bool sepgsql_is_enabled(void); +extern bool sepgsql_is_enabled(void); extern int sepgsql_get_mode(void); extern int sepgsql_set_mode(int new_mode); extern bool sepgsql_getenforce(void); extern void sepgsql_audit_log(bool denied, - const char *scontext, - const char *tcontext, - uint16 tclass, - uint32 audited, - const char *audit_name); + const char *scontext, + const char *tcontext, + uint16 tclass, + uint32 audited, + const char *audit_name); extern void sepgsql_compute_avd(const char *scontext, - const char *tcontext, - uint16 tclass, - struct av_decision *avd); + const char *tcontext, + uint16 tclass, + struct av_decision * avd); extern char *sepgsql_compute_create(const char *scontext, - const char *tcontext, - uint16 tclass); + const char *tcontext, + uint16 tclass); extern bool sepgsql_check_perms(const char *scontext, - const char *tcontext, - uint16 tclass, - uint32 required, - const char *audit_name, - bool abort); + const char *tcontext, + uint16 tclass, + uint32 required, + const char *audit_name, + bool abort); + /* * label.c */ @@ -252,8 +253,8 @@ extern char *sepgsql_get_client_label(void); extern char *sepgsql_set_client_label(char *new_label); extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId); -extern void sepgsql_object_relabel(const ObjectAddress *object, - const char *seclabel); +extern void sepgsql_object_relabel(const ObjectAddress *object, + const char *seclabel); extern Datum sepgsql_getcon(PG_FUNCTION_ARGS); extern Datum sepgsql_mcstrans_in(PG_FUNCTION_ARGS); @@ -276,7 +277,7 @@ extern void sepgsql_schema_relabel(Oid namespaceId, const char *seclabel); */ extern void sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum); extern void sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum, - const char *seclabel); + const char *seclabel); extern void sepgsql_relation_post_create(Oid relOid); extern void sepgsql_relation_relabel(Oid relOid, const char *seclabel); @@ -287,4 +288,4 @@ extern void sepgsql_proc_post_create(Oid functionId); extern void sepgsql_proc_relabel(Oid functionId, const char *seclabel); extern char *sepgsql_proc_get_domtrans(Oid functionId); -#endif /* SEPGSQL_H */ +#endif /* SEPGSQL_H */ diff --git a/contrib/spi/moddatetime.c b/contrib/spi/moddatetime.c index f5a0d93ef5..d02560c298 100644 --- a/contrib/spi/moddatetime.c +++ b/contrib/spi/moddatetime.c @@ -84,7 +84,7 @@ moddatetime(PG_FUNCTION_ARGS) /* * This is where we check to see if the field we are supposed to update - * even exists. The above function must return -1 if name not found? + * even exists. The above function must return -1 if name not found? */ if (attnum < 0) ereport(ERROR, diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index e92ab66491..44c600e134 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -61,7 +61,7 @@ static text *pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar *toptag, static xmlChar *pgxml_texttoxmlchar(text *textstring); static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar *xpath, - xpath_workspace *workspace); + xpath_workspace *workspace); static void cleanup_workspace(xpath_workspace *workspace); @@ -234,7 +234,7 @@ Datum xpath_nodeset(PG_FUNCTION_ARGS) { text *document = PG_GETARG_TEXT_P(0); - text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ + text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ xmlChar *toptag = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(2)); xmlChar *septag = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(3)); xmlChar *xpath; @@ -267,7 +267,7 @@ Datum xpath_list(PG_FUNCTION_ARGS) { text *document = PG_GETARG_TEXT_P(0); - text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ + text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ xmlChar *plainsep = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(2)); xmlChar *xpath; text *xpres; @@ -296,7 +296,7 @@ Datum xpath_string(PG_FUNCTION_ARGS) { text *document = PG_GETARG_TEXT_P(0); - text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ + text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ xmlChar *xpath; int32 pathsize; text *xpres; @@ -337,7 +337,7 @@ Datum xpath_number(PG_FUNCTION_ARGS) { text *document = PG_GETARG_TEXT_P(0); - text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ + text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ xmlChar *xpath; float4 fRes; xmlXPathObjectPtr res; @@ -369,7 +369,7 @@ Datum xpath_bool(PG_FUNCTION_ARGS) { text *document = PG_GETARG_TEXT_P(0); - text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ + text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ xmlChar *xpath; int bRes; xmlXPathObjectPtr res; diff --git a/contrib/xml2/xslt_proc.c b/contrib/xml2/xslt_proc.c index a90104d17a..f8f7d7263f 100644 --- a/contrib/xml2/xslt_proc.c +++ b/contrib/xml2/xslt_proc.c @@ -42,7 +42,6 @@ extern void pgxml_parser_init(void); /* local defs */ static const char **parse_params(text *paramstr); - #endif /* USE_LIBXSLT */ @@ -166,7 +165,7 @@ parse_params(text *paramstr) { max_params *= 2; params = (const char **) repalloc(params, - (max_params + 1) * sizeof(char *)); + (max_params + 1) * sizeof(char *)); } params[nparams++] = pos; pos = strstr(pos, nvsep); diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 6d608fed89..175e6ea2f2 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -350,7 +350,7 @@ nocachegetattr(HeapTuple tuple, * * check to see if any preceding bits are null... */ - int byte = attnum >> 3; + int byte = attnum >> 3; int finalbit = attnum & 0x07; /* check for nulls "before" final bit of last byte */ diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index 9ea87360f9..85c43199aa 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -237,7 +237,7 @@ nocache_index_getattr(IndexTuple tup, * Now check to see if any preceding bits are null... */ { - int byte = attnum >> 3; + int byte = attnum >> 3; int finalbit = attnum & 0x07; /* check for nulls "before" final bit of last byte */ diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c index ce9abae6aa..2de58604ee 100644 --- a/src/backend/access/gin/ginarrayproc.c +++ b/src/backend/access/gin/ginarrayproc.c @@ -82,7 +82,8 @@ ginqueryarrayextract(PG_FUNCTION_ARGS) ArrayType *array = PG_GETARG_ARRAYTYPE_P_COPY(0); int32 *nkeys = (int32 *) PG_GETARG_POINTER(1); StrategyNumber strategy = PG_GETARG_UINT16(2); - /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */ + + /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */ /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool **nullFlags = (bool **) PG_GETARG_POINTER(5); int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); @@ -112,7 +113,7 @@ ginqueryarrayextract(PG_FUNCTION_ARGS) case GinContainsStrategy: if (nelems > 0) *searchMode = GIN_SEARCH_MODE_DEFAULT; - else /* everything contains the empty set */ + else /* everything contains the empty set */ *searchMode = GIN_SEARCH_MODE_ALL; break; case GinContainedStrategy: @@ -142,10 +143,13 @@ ginarrayconsistent(PG_FUNCTION_ARGS) { bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); + /* ArrayType *query = PG_GETARG_ARRAYTYPE_P(2); */ int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); + /* Datum *queryKeys = (Datum *) PG_GETARG_POINTER(6); */ bool *nullFlags = (bool *) PG_GETARG_POINTER(7); bool res; @@ -190,10 +194,11 @@ ginarrayconsistent(PG_FUNCTION_ARGS) case GinEqualStrategy: /* we will need recheck */ *recheck = true; + /* * Must have all elements in check[] true; no discrimination - * against nulls here. This is because array_contain_compare - * and array_eq handle nulls differently ... + * against nulls here. This is because array_contain_compare and + * array_eq handle nulls differently ... */ res = true; for (i = 0; i < nkeys; i++) diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c index f0c8c8e37f..9e5bab194d 100644 --- a/src/backend/access/gin/ginbulk.c +++ b/src/backend/access/gin/ginbulk.c @@ -80,8 +80,8 @@ ginAllocEntryAccumulator(void *arg) GinEntryAccumulator *ea; /* - * Allocate memory by rather big chunks to decrease overhead. We have - * no need to reclaim RBNodes individually, so this costs nothing. + * Allocate memory by rather big chunks to decrease overhead. We have no + * need to reclaim RBNodes individually, so this costs nothing. */ if (accum->entryallocator == NULL || accum->eas_used >= DEF_NENTRY) { @@ -108,7 +108,7 @@ ginInitBA(BuildAccumulator *accum) cmpEntryAccumulator, ginCombineData, ginAllocEntryAccumulator, - NULL, /* no freefunc needed */ + NULL, /* no freefunc needed */ (void *) accum); } @@ -145,8 +145,8 @@ ginInsertBAEntry(BuildAccumulator *accum, bool isNew; /* - * For the moment, fill only the fields of eatmp that will be looked at - * by cmpEntryAccumulator or ginCombineData. + * For the moment, fill only the fields of eatmp that will be looked at by + * cmpEntryAccumulator or ginCombineData. */ eatmp.attnum = attnum; eatmp.key = key; diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index 4a1e754800..41dbe9fd11 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -21,13 +21,13 @@ int ginCompareItemPointers(ItemPointer a, ItemPointer b) { - BlockNumber ba = GinItemPointerGetBlockNumber(a); - BlockNumber bb = GinItemPointerGetBlockNumber(b); + BlockNumber ba = GinItemPointerGetBlockNumber(a); + BlockNumber bb = GinItemPointerGetBlockNumber(b); if (ba == bb) { - OffsetNumber oa = GinItemPointerGetOffsetNumber(a); - OffsetNumber ob = GinItemPointerGetOffsetNumber(b); + OffsetNumber oa = GinItemPointerGetOffsetNumber(a); + OffsetNumber ob = GinItemPointerGetOffsetNumber(b); if (oa == ob) return 0; @@ -383,6 +383,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda Page page = BufferGetPage(buf); int sizeofitem = GinSizeOfDataPageItem(page); int cnt = 0; + /* these must be static so they can be returned to caller */ static XLogRecData rdata[3]; static ginxlogInsert data; @@ -474,6 +475,7 @@ dataSplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRe Size pageSize = PageGetPageSize(lpage); Size freeSpace; uint32 nCopied = 1; + /* these must be static so they can be returned to caller */ static ginxlogSplit data; static XLogRecData rdata[4]; diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index 9749a1be78..fa134f9fc3 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -98,11 +98,11 @@ GinFormTuple(GinState *ginstate, if (errorTooBig) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", - (unsigned long) newsize, - (unsigned long) Min(INDEX_SIZE_MASK, - GinMaxItemSize), - RelationGetRelationName(ginstate->index)))); + errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", + (unsigned long) newsize, + (unsigned long) Min(INDEX_SIZE_MASK, + GinMaxItemSize), + RelationGetRelationName(ginstate->index)))); pfree(itup); return NULL; } @@ -164,7 +164,7 @@ GinShortenTuple(IndexTuple itup, uint32 nipd) * Form a non-leaf entry tuple by copying the key data from the given tuple, * which can be either a leaf or non-leaf entry tuple. * - * Any posting list in the source tuple is not copied. The specified child + * Any posting list in the source tuple is not copied. The specified child * block number is inserted into t_tid. */ static IndexTuple @@ -225,7 +225,7 @@ entryIsMoveRight(GinBtree btree, Page page) key = gintuple_get_key(btree->ginstate, itup, &category); if (ginCompareAttEntries(btree->ginstate, - btree->entryAttnum, btree->entryKey, btree->entryCategory, + btree->entryAttnum, btree->entryKey, btree->entryCategory, attnum, key, category) > 0) return TRUE; @@ -488,6 +488,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd Page page = BufferGetPage(buf); OffsetNumber placed; int cnt = 0; + /* these must be static so they can be returned to caller */ static XLogRecData rdata[3]; static ginxlogInsert data; @@ -561,6 +562,7 @@ entrySplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogR Page lpage = PageGetTempPageCopy(BufferGetPage(lbuf)); Page rpage = BufferGetPage(rbuf); Size pageSize = PageGetPageSize(lpage); + /* these must be static so they can be returned to caller */ static XLogRecData rdata[2]; static ginxlogSplit data; diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 9960c786c9..82419e37ac 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -88,9 +88,9 @@ writeListPage(Relation index, Buffer buffer, GinPageGetOpaque(page)->rightlink = rightlink; /* - * tail page may contain only whole row(s) or final part of row placed - * on previous pages (a "row" here meaning all the index tuples generated - * for one heap tuple) + * tail page may contain only whole row(s) or final part of row placed on + * previous pages (a "row" here meaning all the index tuples generated for + * one heap tuple) */ if (rightlink == InvalidBlockNumber) { @@ -437,7 +437,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) * Create temporary index tuples for a single indexable item (one index column * for the heap tuple specified by ht_ctid), and append them to the array * in *collector. They will subsequently be written out using - * ginHeapTupleFastInsert. Note that to guarantee consistent state, all + * ginHeapTupleFastInsert. Note that to guarantee consistent state, all * temp tuples for a given heap tuple must be written in one call to * ginHeapTupleFastInsert. */ @@ -475,8 +475,8 @@ ginHeapTupleFastCollect(GinState *ginstate, } /* - * Build an index tuple for each key value, and add to array. In - * pending tuples we just stick the heap TID into t_tid. + * Build an index tuple for each key value, and add to array. In pending + * tuples we just stick the heap TID into t_tid. */ for (i = 0; i < nentries; i++) { @@ -665,7 +665,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, { IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i)); OffsetNumber curattnum; - Datum curkey; + Datum curkey; GinNullCategory curcategory; /* Check for change of heap TID or attnum */ @@ -830,7 +830,7 @@ ginInsertCleanup(GinState *ginstate, */ ginBeginBAScan(&accum); while ((list = ginGetBAEntry(&accum, - &attnum, &key, &category, &nlist)) != NULL) + &attnum, &key, &category, &nlist)) != NULL) { ginEntryInsert(ginstate, attnum, key, category, list, nlist, NULL); @@ -867,7 +867,7 @@ ginInsertCleanup(GinState *ginstate, ginBeginBAScan(&accum); while ((list = ginGetBAEntry(&accum, - &attnum, &key, &category, &nlist)) != NULL) + &attnum, &key, &category, &nlist)) != NULL) ginEntryInsert(ginstate, attnum, key, category, list, nlist, NULL); } diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index e07dc0a6ce..a4771654a6 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -40,8 +40,8 @@ static bool callConsistentFn(GinState *ginstate, GinScanKey key) { /* - * If we're dealing with a dummy EVERYTHING key, we don't want to call - * the consistentFn; just claim it matches. + * If we're dealing with a dummy EVERYTHING key, we don't want to call the + * consistentFn; just claim it matches. */ if (key->searchMode == GIN_SEARCH_MODE_EVERYTHING) { @@ -174,14 +174,14 @@ scanPostingTree(Relation index, GinScanEntry scanEntry, /* * Collects TIDs into scanEntry->matchBitmap for all heap tuples that - * match the search entry. This supports three different match modes: + * match the search entry. This supports three different match modes: * * 1. Partial-match support: scan from current point until the - * comparePartialFn says we're done. + * comparePartialFn says we're done. * 2. SEARCH_MODE_ALL: scan from current point (which should be first - * key for the current attnum) until we hit null items or end of attnum + * key for the current attnum) until we hit null items or end of attnum * 3. SEARCH_MODE_EVERYTHING: scan from current point (which should be first - * key for the current attnum) until we hit end of attnum + * key for the current attnum) until we hit end of attnum * * Returns true if done, false if it's necessary to restart scan from scratch */ @@ -189,7 +189,7 @@ static bool collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, GinScanEntry scanEntry) { - OffsetNumber attnum; + OffsetNumber attnum; Form_pg_attribute attr; /* Initialize empty bitmap result */ @@ -253,8 +253,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, cmp = DatumGetInt32(FunctionCall4(&btree->ginstate->comparePartialFn[attnum - 1], scanEntry->queryKey, idatum, - UInt16GetDatum(scanEntry->strategy), - PointerGetDatum(scanEntry->extra_data))); + UInt16GetDatum(scanEntry->strategy), + PointerGetDatum(scanEntry->extra_data))); if (cmp > 0) return true; @@ -269,7 +269,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, /* * In ALL mode, we are not interested in null items, so we can * stop if we get to a null-item placeholder (which will be the - * last entry for a given attnum). We do want to include NULL_KEY + * last entry for a given attnum). We do want to include NULL_KEY * and EMPTY_ITEM entries, though. */ if (icategory == GIN_CAT_NULL_ITEM) @@ -287,8 +287,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, * We should unlock current page (but not unpin) during tree scan * to prevent deadlock with vacuum processes. * - * We save current entry value (idatum) to be able to re-find - * our tuple after re-locking + * We save current entry value (idatum) to be able to re-find our + * tuple after re-locking */ if (icategory == GIN_CAT_NORM_KEY) idatum = datumCopy(idatum, attr->attbyval, attr->attlen); @@ -442,11 +442,11 @@ restartScanEntry: Page page; /* - * We should unlock entry page before touching posting tree - * to prevent deadlocks with vacuum processes. Because entry is - * never deleted from page and posting tree is never reduced to - * the posting list, we can unlock page after getting BlockNumber - * of root of posting tree. + * We should unlock entry page before touching posting tree to + * prevent deadlocks with vacuum processes. Because entry is never + * deleted from page and posting tree is never reduced to the + * posting list, we can unlock page after getting BlockNumber of + * root of posting tree. */ LockBuffer(stackEntry->buffer, GIN_UNLOCK); needUnlock = FALSE; @@ -596,7 +596,7 @@ entryGetNextItem(GinState *ginstate, GinScanEntry entry) if (!ItemPointerIsValid(&entry->curItem) || ginCompareItemPointers(&entry->curItem, - entry->list + entry->offset - 1) == 0) + entry->list + entry->offset - 1) == 0) { /* * First pages are deleted or empty, or we found exact @@ -656,10 +656,10 @@ entryGetItem(GinState *ginstate, GinScanEntry entry) } /* - * Reset counter to the beginning of entry->matchResult. - * Note: entry->offset is still greater than - * matchResult->ntuples if matchResult is lossy. So, on next - * call we will get next result from TIDBitmap. + * Reset counter to the beginning of entry->matchResult. Note: + * entry->offset is still greater than matchResult->ntuples if + * matchResult is lossy. So, on next call we will get next + * result from TIDBitmap. */ entry->offset = 0; } @@ -745,10 +745,10 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key) /* * Find the minimum of the active entry curItems. * - * Note: a lossy-page entry is encoded by a ItemPointer with max value - * for offset (0xffff), so that it will sort after any exact entries - * for the same page. So we'll prefer to return exact pointers not - * lossy pointers, which is good. + * Note: a lossy-page entry is encoded by a ItemPointer with max value for + * offset (0xffff), so that it will sort after any exact entries for the + * same page. So we'll prefer to return exact pointers not lossy + * pointers, which is good. */ ItemPointerSetMax(&minItem); @@ -782,28 +782,27 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key) /* * Lossy-page entries pose a problem, since we don't know the correct - * entryRes state to pass to the consistentFn, and we also don't know - * what its combining logic will be (could be AND, OR, or even NOT). - * If the logic is OR then the consistentFn might succeed for all - * items in the lossy page even when none of the other entries match. + * entryRes state to pass to the consistentFn, and we also don't know what + * its combining logic will be (could be AND, OR, or even NOT). If the + * logic is OR then the consistentFn might succeed for all items in the + * lossy page even when none of the other entries match. * * If we have a single lossy-page entry then we check to see if the - * consistentFn will succeed with only that entry TRUE. If so, - * we return a lossy-page pointer to indicate that the whole heap - * page must be checked. (On subsequent calls, we'll do nothing until - * minItem is past the page altogether, thus ensuring that we never return - * both regular and lossy pointers for the same page.) + * consistentFn will succeed with only that entry TRUE. If so, we return + * a lossy-page pointer to indicate that the whole heap page must be + * checked. (On subsequent calls, we'll do nothing until minItem is past + * the page altogether, thus ensuring that we never return both regular + * and lossy pointers for the same page.) * - * This idea could be generalized to more than one lossy-page entry, - * but ideally lossy-page entries should be infrequent so it would - * seldom be the case that we have more than one at once. So it - * doesn't seem worth the extra complexity to optimize that case. - * If we do find more than one, we just punt and return a lossy-page - * pointer always. + * This idea could be generalized to more than one lossy-page entry, but + * ideally lossy-page entries should be infrequent so it would seldom be + * the case that we have more than one at once. So it doesn't seem worth + * the extra complexity to optimize that case. If we do find more than + * one, we just punt and return a lossy-page pointer always. * - * Note that only lossy-page entries pointing to the current item's - * page should trigger this processing; we might have future lossy - * pages in the entry array, but they aren't relevant yet. + * Note that only lossy-page entries pointing to the current item's page + * should trigger this processing; we might have future lossy pages in the + * entry array, but they aren't relevant yet. */ ItemPointerSetLossyPage(&curPageLossy, GinItemPointerGetBlockNumber(&key->curItem)); @@ -853,15 +852,14 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key) } /* - * At this point we know that we don't need to return a lossy - * whole-page pointer, but we might have matches for individual exact - * item pointers, possibly in combination with a lossy pointer. Our - * strategy if there's a lossy pointer is to try the consistentFn both - * ways and return a hit if it accepts either one (forcing the hit to - * be marked lossy so it will be rechecked). An exception is that - * we don't need to try it both ways if the lossy pointer is in a - * "hidden" entry, because the consistentFn's result can't depend on - * that. + * At this point we know that we don't need to return a lossy whole-page + * pointer, but we might have matches for individual exact item pointers, + * possibly in combination with a lossy pointer. Our strategy if there's + * a lossy pointer is to try the consistentFn both ways and return a hit + * if it accepts either one (forcing the hit to be marked lossy so it will + * be rechecked). An exception is that we don't need to try it both ways + * if the lossy pointer is in a "hidden" entry, because the consistentFn's + * result can't depend on that. * * Prepare entryRes array to be passed to consistentFn. */ @@ -960,7 +958,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast, keyGetItem(&so->ginstate, so->tempCtx, key); if (key->isFinished) - return false; /* finished one of keys */ + return false; /* finished one of keys */ if (ginCompareItemPointers(&key->curItem, item) < 0) *item = key->curItem; @@ -975,7 +973,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast, * that exact TID, or a lossy reference to the same page. * * This logic works only if a keyGetItem stream can never contain both - * exact and lossy pointers for the same page. Else we could have a + * exact and lossy pointers for the same page. Else we could have a * case like * * stream 1 stream 2 @@ -1011,8 +1009,8 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast, break; /* - * No hit. Update myAdvancePast to this TID, so that on the next - * pass we'll move to the next possible entry. + * No hit. Update myAdvancePast to this TID, so that on the next pass + * we'll move to the next possible entry. */ myAdvancePast = *item; } @@ -1118,8 +1116,8 @@ scanGetCandidate(IndexScanDesc scan, pendingPosition *pos) /* * Now pos->firstOffset points to the first tuple of current heap - * row, pos->lastOffset points to the first tuple of next heap - * row (or to the end of page) + * row, pos->lastOffset points to the first tuple of next heap row + * (or to the end of page) */ break; } @@ -1181,7 +1179,7 @@ matchPartialInPendingList(GinState *ginstate, Page page, entry->queryKey, datum[off - 1], UInt16GetDatum(entry->strategy), - PointerGetDatum(entry->extra_data))); + PointerGetDatum(entry->extra_data))); if (cmp == 0) return true; else if (cmp > 0) @@ -1227,8 +1225,8 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) memset(pos->hasMatchKey, FALSE, so->nkeys); /* - * Outer loop iterates over multiple pending-list pages when a single - * heap row has entries spanning those pages. + * Outer loop iterates over multiple pending-list pages when a single heap + * row has entries spanning those pages. */ for (;;) { @@ -1322,11 +1320,11 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) if (res == 0) { /* - * Found exact match (there can be only one, except - * in EMPTY_QUERY mode). + * Found exact match (there can be only one, except in + * EMPTY_QUERY mode). * - * If doing partial match, scan forward from - * here to end of page to check for matches. + * If doing partial match, scan forward from here to + * end of page to check for matches. * * See comment above about tuple's ordering. */ @@ -1355,13 +1353,12 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) if (StopLow >= StopHigh && entry->isPartialMatch) { /* - * No exact match on this page. If doing partial - * match, scan from the first tuple greater than - * target value to end of page. Note that since we - * don't remember whether the comparePartialFn told us - * to stop early on a previous page, we will uselessly - * apply comparePartialFn to the first tuple on each - * subsequent page. + * No exact match on this page. If doing partial match, + * scan from the first tuple greater than target value to + * end of page. Note that since we don't remember whether + * the comparePartialFn told us to stop early on a + * previous page, we will uselessly apply comparePartialFn + * to the first tuple on each subsequent page. */ key->entryRes[j] = matchPartialInPendingList(&so->ginstate, diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index af5068906f..3e32af94a9 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -97,7 +97,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems) * Adds array of item pointers to tuple's posting list, or * creates posting tree and tuple pointing to tree in case * of not enough space. Max size of tuple is defined in - * GinFormTuple(). Returns a new, modified index tuple. + * GinFormTuple(). Returns a new, modified index tuple. * items[] must be in sorted order with no duplicates. */ static IndexTuple @@ -195,14 +195,14 @@ buildFreshLeafTuple(GinState *ginstate, BlockNumber postingRoot; /* - * Build posting-tree-only result tuple. We do this first so as - * to fail quickly if the key is too big. + * Build posting-tree-only result tuple. We do this first so as to + * fail quickly if the key is too big. */ res = GinFormTuple(ginstate, attnum, key, category, NULL, 0, true); /* - * Initialize posting tree with as many TIDs as will fit on the - * first page. + * Initialize posting tree with as many TIDs as will fit on the first + * page. */ postingRoot = createPostingTree(ginstate->index, items, @@ -361,7 +361,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values, ginBeginBAScan(&buildstate->accum); while ((list = ginGetBAEntry(&buildstate->accum, - &attnum, &key, &category, &nlist)) != NULL) + &attnum, &key, &category, &nlist)) != NULL) { /* there could be many entries, so be willing to abort here */ CHECK_FOR_INTERRUPTS(); diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c index 25f60e15a0..37b08c0df6 100644 --- a/src/backend/access/gin/ginscan.c +++ b/src/backend/access/gin/ginscan.c @@ -199,7 +199,7 @@ ginFillScanKey(GinScanOpaque so, OffsetNumber attnum, break; default: elog(ERROR, "unexpected searchMode: %d", searchMode); - queryCategory = 0; /* keep compiler quiet */ + queryCategory = 0; /* keep compiler quiet */ break; } isPartialMatch = false; @@ -294,8 +294,8 @@ ginNewScanKey(IndexScanDesc scan) int32 searchMode = GIN_SEARCH_MODE_DEFAULT; /* - * We assume that GIN-indexable operators are strict, so a null - * query argument means an unsatisfiable query. + * We assume that GIN-indexable operators are strict, so a null query + * argument means an unsatisfiable query. */ if (skey->sk_flags & SK_ISNULL) { @@ -315,8 +315,8 @@ ginNewScanKey(IndexScanDesc scan) PointerGetDatum(&searchMode))); /* - * If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL; - * note in particular we don't allow extractQueryFn to select + * If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL; note + * in particular we don't allow extractQueryFn to select * GIN_SEARCH_MODE_EVERYTHING. */ if (searchMode < GIN_SEARCH_MODE_DEFAULT || @@ -344,20 +344,20 @@ ginNewScanKey(IndexScanDesc scan) * If the extractQueryFn didn't create a nullFlags array, create one, * assuming that everything's non-null. Otherwise, run through the * array and make sure each value is exactly 0 or 1; this ensures - * binary compatibility with the GinNullCategory representation. - * While at it, detect whether any null keys are present. + * binary compatibility with the GinNullCategory representation. While + * at it, detect whether any null keys are present. */ if (nullFlags == NULL) nullFlags = (bool *) palloc0(nQueryValues * sizeof(bool)); else { - int32 j; + int32 j; for (j = 0; j < nQueryValues; j++) { if (nullFlags[j]) { - nullFlags[j] = true; /* not any other nonzero value */ + nullFlags[j] = true; /* not any other nonzero value */ hasNullQuery = true; } } @@ -387,11 +387,11 @@ ginNewScanKey(IndexScanDesc scan) /* * If the index is version 0, it may be missing null and placeholder * entries, which would render searches for nulls and full-index scans - * unreliable. Throw an error if so. + * unreliable. Throw an error if so. */ if (hasNullQuery && !so->isVoidRes) { - GinStatsData ginStats; + GinStatsData ginStats; ginGetStats(scan->indexRelation, &ginStats); if (ginStats.ginVersion < 1) @@ -410,6 +410,7 @@ ginrescan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1); + /* remaining arguments are ignored */ GinScanOpaque so = (GinScanOpaque) scan->opaque; diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index 392c12d47a..716cf3a734 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -70,7 +70,7 @@ initGinState(GinState *state, Relation index) * However, we may have a collatable storage type for a noncollatable * indexed data type (for instance, hstore uses text index entries). * If there's no index collation then specify default collation in - * case the comparison function needs one. This is harmless if the + * case the comparison function needs one. This is harmless if the * comparison function doesn't care about collation, so we just do it * unconditionally. (We could alternatively call get_typcollation, * but that seems like expensive overkill --- there aren't going to be @@ -359,9 +359,9 @@ cmpEntries(const void *a, const void *b, void *arg) aa->datum, bb->datum)); /* - * Detect if we have any duplicates. If there are equal keys, qsort - * must compare them at some point, else it wouldn't know whether one - * should go before or after the other. + * Detect if we have any duplicates. If there are equal keys, qsort must + * compare them at some point, else it wouldn't know whether one should go + * before or after the other. */ if (res == 0) data->haveDups = true; @@ -422,9 +422,9 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, /* * If the extractValueFn didn't create a nullFlags array, create one, - * assuming that everything's non-null. Otherwise, run through the - * array and make sure each value is exactly 0 or 1; this ensures - * binary compatibility with the GinNullCategory representation. + * assuming that everything's non-null. Otherwise, run through the array + * and make sure each value is exactly 0 or 1; this ensures binary + * compatibility with the GinNullCategory representation. */ if (nullFlags == NULL) nullFlags = (bool *) palloc0(*nentries * sizeof(bool)); @@ -440,8 +440,8 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, * If there's more than one key, sort and unique-ify. * * XXX Using qsort here is notationally painful, and the overhead is - * pretty bad too. For small numbers of keys it'd likely be better to - * use a simple insertion sort. + * pretty bad too. For small numbers of keys it'd likely be better to use + * a simple insertion sort. */ if (*nentries > 1) { @@ -470,7 +470,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, j = 1; for (i = 1; i < *nentries; i++) { - if (cmpEntries(&keydata[i-1], &keydata[i], &arg) != 0) + if (cmpEntries(&keydata[i - 1], &keydata[i], &arg) != 0) { entries[j] = keydata[i].datum; nullFlags[j] = keydata[i].isnull; @@ -533,9 +533,9 @@ ginoptions(PG_FUNCTION_ARGS) void ginGetStats(Relation index, GinStatsData *stats) { - Buffer metabuffer; - Page metapage; - GinMetaPageData *metadata; + Buffer metabuffer; + Page metapage; + GinMetaPageData *metadata; metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO); LockBuffer(metabuffer, GIN_SHARE); @@ -560,9 +560,9 @@ ginGetStats(Relation index, GinStatsData *stats) void ginUpdateStats(Relation index, const GinStatsData *stats) { - Buffer metabuffer; - Page metapage; - GinMetaPageData *metadata; + Buffer metabuffer; + Page metapage; + GinMetaPageData *metadata; metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO); LockBuffer(metabuffer, GIN_EXCLUSIVE); @@ -580,9 +580,9 @@ ginUpdateStats(Relation index, const GinStatsData *stats) if (RelationNeedsWAL(index)) { - XLogRecPtr recptr; - ginxlogUpdateMeta data; - XLogRecData rdata; + XLogRecPtr recptr; + ginxlogUpdateMeta data; + XLogRecData rdata; data.node = index->rd_node; data.ntuples = 0; diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 41ad382df0..79c54f16b8 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -783,7 +783,7 @@ ginvacuumcleanup(PG_FUNCTION_ARGS) { idxStat.nEntryPages++; - if ( GinPageIsLeaf(page) ) + if (GinPageIsLeaf(page)) idxStat.nEntries += PageGetMaxOffsetNumber(page); } diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index e410959b85..c954bcb12f 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -388,7 +388,7 @@ ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record) else { OffsetNumber i, - *tod; + *tod; IndexTuple itup = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogVacuumPage)); tod = (OffsetNumber *) palloc(sizeof(OffsetNumber) * PageGetMaxOffsetNumber(page)); @@ -513,10 +513,10 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record) if (!XLByteLE(lsn, PageGetLSN(page))) { OffsetNumber l, - off = (PageIsEmpty(page)) ? FirstOffsetNumber : - OffsetNumberNext(PageGetMaxOffsetNumber(page)); + off = (PageIsEmpty(page)) ? FirstOffsetNumber : + OffsetNumberNext(PageGetMaxOffsetNumber(page)); int i, - tupsize; + tupsize; IndexTuple tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta)); for (i = 0; i < data->ntuples; i++) diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 9529413e80..fae3464600 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -34,8 +34,8 @@ typedef struct /* A List of these is used represent a split-in-progress. */ typedef struct { - Buffer buf; /* the split page "half" */ - IndexTuple downlink; /* downlink for this half. */ + Buffer buf; /* the split page "half" */ + IndexTuple downlink; /* downlink for this half. */ } GISTPageSplitInfo; /* non-export function prototypes */ @@ -306,13 +306,13 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, bool is_split; /* - * Refuse to modify a page that's incompletely split. This should - * not happen because we finish any incomplete splits while we walk - * down the tree. However, it's remotely possible that another - * concurrent inserter splits a parent page, and errors out before - * completing the split. We will just throw an error in that case, - * and leave any split we had in progress unfinished too. The next - * insert that comes along will clean up the mess. + * Refuse to modify a page that's incompletely split. This should not + * happen because we finish any incomplete splits while we walk down the + * tree. However, it's remotely possible that another concurrent inserter + * splits a parent page, and errors out before completing the split. We + * will just throw an error in that case, and leave any split we had in + * progress unfinished too. The next insert that comes along will clean up + * the mess. */ if (GistFollowRight(page)) elog(ERROR, "concurrent GiST page split was incomplete"); @@ -338,7 +338,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, SplitedPageLayout *dist = NULL, *ptr; BlockNumber oldrlink = InvalidBlockNumber; - GistNSN oldnsn = { 0, 0 }; + GistNSN oldnsn = {0, 0}; SplitedPageLayout rootpg; BlockNumber blkno = BufferGetBlockNumber(buffer); bool is_rootsplit; @@ -364,8 +364,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, /* * Set up pages to work with. Allocate new buffers for all but the - * leftmost page. The original page becomes the new leftmost page, - * and is just replaced with the new contents. + * leftmost page. The original page becomes the new leftmost page, and + * is just replaced with the new contents. * * For a root-split, allocate new buffers for all child pages, the * original page is overwritten with new root page containing @@ -414,8 +414,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, if (is_rootsplit) { IndexTuple *downlinks; - int ndownlinks = 0; - int i; + int ndownlinks = 0; + int i; rootpg.buffer = buffer; rootpg.page = PageGetTempPageCopySpecial(BufferGetPage(rootpg.buffer)); @@ -443,6 +443,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, for (ptr = dist; ptr; ptr = ptr->next) { GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo)); + si->buf = ptr->buffer; si->downlink = ptr->itup; *splitinfo = lappend(*splitinfo, si); @@ -455,7 +456,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, */ for (ptr = dist; ptr; ptr = ptr->next) { - char *data = (char *) (ptr->list); + char *data = (char *) (ptr->list); + for (i = 0; i < ptr->block.num; i++) { if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber) @@ -495,8 +497,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, MarkBufferDirty(leftchildbuf); /* - * The first page in the chain was a temporary working copy meant - * to replace the old page. Copy it over the old page. + * The first page in the chain was a temporary working copy meant to + * replace the old page. Copy it over the old page. */ PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer)); dist->page = BufferGetPage(dist->buffer); @@ -518,8 +520,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, * Return the new child buffers to the caller. * * If this was a root split, we've already inserted the downlink - * pointers, in the form of a new root page. Therefore we can - * release all the new buffers, and keep just the root page locked. + * pointers, in the form of a new root page. Therefore we can release + * all the new buffers, and keep just the root page locked. */ if (is_rootsplit) { @@ -572,20 +574,20 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, /* * If we inserted the downlink for a child page, set NSN and clear - * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know - * to follow the rightlink if and only if they looked at the parent page + * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to + * follow the rightlink if and only if they looked at the parent page * before we inserted the downlink. * * Note that we do this *after* writing the WAL record. That means that - * the possible full page image in the WAL record does not include - * these changes, and they must be replayed even if the page is restored - * from the full page image. There's a chicken-and-egg problem: if we - * updated the child pages first, we wouldn't know the recptr of the WAL - * record we're about to write. + * the possible full page image in the WAL record does not include these + * changes, and they must be replayed even if the page is restored from + * the full page image. There's a chicken-and-egg problem: if we updated + * the child pages first, we wouldn't know the recptr of the WAL record + * we're about to write. */ if (BufferIsValid(leftchildbuf)) { - Page leftpg = BufferGetPage(leftchildbuf); + Page leftpg = BufferGetPage(leftchildbuf); GistPageGetOpaque(leftpg)->nsn = recptr; GistClearFollowRight(leftpg); @@ -636,8 +638,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) stack->buffer = ReadBuffer(state.r, stack->blkno); /* - * Be optimistic and grab shared lock first. Swap it for an - * exclusive lock later if we need to update the page. + * Be optimistic and grab shared lock first. Swap it for an exclusive + * lock later if we need to update the page. */ if (!xlocked) { @@ -650,9 +652,9 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) Assert(!RelationNeedsWAL(state.r) || !XLogRecPtrIsInvalid(stack->lsn)); /* - * If this page was split but the downlink was never inserted to - * the parent because the inserting backend crashed before doing - * that, fix that now. + * If this page was split but the downlink was never inserted to the + * parent because the inserting backend crashed before doing that, fix + * that now. */ if (GistFollowRight(stack->page)) { @@ -680,8 +682,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) /* * Concurrent split detected. There's no guarantee that the * downlink for this page is consistent with the tuple we're - * inserting anymore, so go back to parent and rechoose the - * best child. + * inserting anymore, so go back to parent and rechoose the best + * child. */ UnlockReleaseBuffer(stack->buffer); xlocked = false; @@ -696,7 +698,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) * Find the child node that has the minimum insertion penalty. */ BlockNumber childblkno; - IndexTuple newtup; + IndexTuple newtup; GISTInsertStack *item; stack->childoffnum = gistchoose(state.r, stack->page, itup, giststate); @@ -722,8 +724,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) if (newtup) { /* - * Swap shared lock for an exclusive one. Beware, the page - * may change while we unlock/lock the page... + * Swap shared lock for an exclusive one. Beware, the page may + * change while we unlock/lock the page... */ if (!xlocked) { @@ -738,6 +740,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) continue; } } + /* * Update the tuple. * @@ -752,8 +755,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) stack->childoffnum, InvalidBuffer)) { /* - * If this was a root split, the root page continues to - * be the parent and the updated tuple went to one of the + * If this was a root split, the root page continues to be + * the parent and the updated tuple went to one of the * child pages, so we just need to retry from the root * page. */ @@ -779,13 +782,13 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) { /* * Leaf page. Insert the new key. We've already updated all the - * parents on the way down, but we might have to split the page - * if it doesn't fit. gistinserthere() will take care of that. + * parents on the way down, but we might have to split the page if + * it doesn't fit. gistinserthere() will take care of that. */ /* - * Swap shared lock for an exclusive one. Be careful, the page - * may change while we unlock/lock the page... + * Swap shared lock for an exclusive one. Be careful, the page may + * change while we unlock/lock the page... */ if (!xlocked) { @@ -798,8 +801,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) if (stack->blkno == GIST_ROOT_BLKNO) { /* - * the only page that can become inner instead of leaf - * is the root page, so for root we should recheck it + * the only page that can become inner instead of leaf is + * the root page, so for root we should recheck it */ if (!GistPageIsLeaf(stack->page)) { @@ -1059,21 +1062,23 @@ static IndexTuple gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, GISTInsertStack *stack) { - Page page = BufferGetPage(buf); + Page page = BufferGetPage(buf); OffsetNumber maxoff; OffsetNumber offset; - IndexTuple downlink = NULL; + IndexTuple downlink = NULL; maxoff = PageGetMaxOffsetNumber(page); for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset)) { IndexTuple ituple = (IndexTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); + if (downlink == NULL) downlink = CopyIndexTuple(ituple); else { - IndexTuple newdownlink; + IndexTuple newdownlink; + newdownlink = gistgetadjusted(rel, downlink, ituple, giststate); if (newdownlink) @@ -1082,19 +1087,18 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, } /* - * If the page is completely empty, we can't form a meaningful - * downlink for it. But we have to insert a downlink for the page. - * Any key will do, as long as its consistent with the downlink of - * parent page, so that we can legally insert it to the parent. - * A minimal one that matches as few scans as possible would be best, - * to keep scans from doing useless work, but we don't know how to - * construct that. So we just use the downlink of the original page - * that was split - that's as far from optimal as it can get but will - * do.. + * If the page is completely empty, we can't form a meaningful downlink + * for it. But we have to insert a downlink for the page. Any key will do, + * as long as its consistent with the downlink of parent page, so that we + * can legally insert it to the parent. A minimal one that matches as few + * scans as possible would be best, to keep scans from doing useless work, + * but we don't know how to construct that. So we just use the downlink of + * the original page that was split - that's as far from optimal as it can + * get but will do.. */ if (!downlink) { - ItemId iid; + ItemId iid; LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE); gistFindCorrectParent(rel, stack); @@ -1131,13 +1135,13 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate) buf = stack->buffer; /* - * Read the chain of split pages, following the rightlinks. Construct - * a downlink tuple for each page. + * Read the chain of split pages, following the rightlinks. Construct a + * downlink tuple for each page. */ for (;;) { GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo)); - IndexTuple downlink; + IndexTuple downlink; page = BufferGetPage(buf); @@ -1182,8 +1186,8 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, IndexTuple *tuples, int ntup, OffsetNumber oldoffnum, Buffer leftchild) { - List *splitinfo; - bool is_split; + List *splitinfo; + bool is_split; is_split = gistplacetopage(state, giststate, stack->buffer, tuples, ntup, oldoffnum, @@ -1204,21 +1208,21 @@ static void gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, List *splitinfo) { - ListCell *lc; - List *reversed; + ListCell *lc; + List *reversed; GISTPageSplitInfo *right; GISTPageSplitInfo *left; - IndexTuple tuples[2]; + IndexTuple tuples[2]; /* A split always contains at least two halves */ Assert(list_length(splitinfo) >= 2); /* - * We need to insert downlinks for each new page, and update the - * downlink for the original (leftmost) page in the split. Begin at - * the rightmost page, inserting one downlink at a time until there's - * only two pages left. Finally insert the downlink for the last new - * page and update the downlink for the original page as one operation. + * We need to insert downlinks for each new page, and update the downlink + * for the original (leftmost) page in the split. Begin at the rightmost + * page, inserting one downlink at a time until there's only two pages + * left. Finally insert the downlink for the last new page and update the + * downlink for the original page as one operation. */ /* for convenience, create a copy of the list in reverse order */ @@ -1231,7 +1235,7 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE); gistFindCorrectParent(state->r, stack); - while(list_length(reversed) > 2) + while (list_length(reversed) > 2) { right = (GISTPageSplitInfo *) linitial(reversed); left = (GISTPageSplitInfo *) lsecond(reversed); @@ -1386,7 +1390,7 @@ initGISTstate(GISTSTATE *giststate, Relation index) /* opclasses are not required to provide a Distance method */ if (OidIsValid(index_getprocid(index, i + 1, GIST_DISTANCE_PROC))) fmgr_info_copy(&(giststate->distanceFn[i]), - index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC), + index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC), CurrentMemoryContext); else giststate->distanceFn[i].fn_oid = InvalidOid; diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 8355081553..e4488a925d 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -32,7 +32,7 @@ * * On success return for a heap tuple, *recheck_p is set to indicate * whether recheck is needed. We recheck if any of the consistent() functions - * request it. recheck is not interesting when examining a non-leaf entry, + * request it. recheck is not interesting when examining a non-leaf entry, * since we must visit the lower index page if there's any doubt. * * If we are doing an ordered scan, so->distances[] is filled with distance @@ -62,15 +62,15 @@ gistindex_keytest(IndexScanDesc scan, *recheck_p = false; /* - * If it's a leftover invalid tuple from pre-9.1, treat it as a match - * with minimum possible distances. This means we'll always follow it - * to the referenced page. + * If it's a leftover invalid tuple from pre-9.1, treat it as a match with + * minimum possible distances. This means we'll always follow it to the + * referenced page. */ if (GistTupleIsInvalid(tuple)) { - int i; + int i; - if (GistPageIsLeaf(page)) /* shouldn't happen */ + if (GistPageIsLeaf(page)) /* shouldn't happen */ elog(ERROR, "invalid GIST tuple found on leaf page"); for (i = 0; i < scan->numberOfOrderBys; i++) so->distances[i] = -get_float8_infinity(); @@ -191,8 +191,8 @@ gistindex_keytest(IndexScanDesc scan, * always be zero, but might as well pass it for possible future * use.) * - * Note that Distance functions don't get a recheck argument. - * We can't tolerate lossy distance calculations on leaf tuples; + * Note that Distance functions don't get a recheck argument. We + * can't tolerate lossy distance calculations on leaf tuples; * there is no opportunity to re-sort the tuples afterwards. */ dist = FunctionCall4(&key->sk_func, @@ -223,7 +223,7 @@ gistindex_keytest(IndexScanDesc scan, * ntids: if not NULL, gistgetbitmap's output tuple counter * * If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap - * tuples should be reported directly into the bitmap. If they are NULL, + * tuples should be reported directly into the bitmap. If they are NULL, * we're doing a plain or ordered indexscan. For a plain indexscan, heap * tuple TIDs are returned into so->pageData[]. For an ordered indexscan, * heap tuple TIDs are pushed into individual search queue items. @@ -525,8 +525,8 @@ gistgettuple(PG_FUNCTION_ARGS) /* * While scanning a leaf page, ItemPointers of matching heap * tuples are stored in so->pageData. If there are any on - * this page, we fall out of the inner "do" and loop around - * to return them. + * this page, we fall out of the inner "do" and loop around to + * return them. */ gistScanPage(scan, item, so->curTreeItem->distances, NULL, NULL); diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index 86a5d90f95..43c4b1251b 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -904,7 +904,7 @@ gist_point_compress(PG_FUNCTION_ARGS) PG_RETURN_POINTER(entry); } -#define point_point_distance(p1,p2) \ +#define point_point_distance(p1,p2) \ DatumGetFloat8(DirectFunctionCall2(point_distance, \ PointPGetDatum(p1), PointPGetDatum(p2))) @@ -949,8 +949,8 @@ computeDistance(bool isLeaf, BOX *box, Point *point) else { /* closest point will be a vertex */ - Point p; - double subresult; + Point p; + double subresult; result = point_point_distance(point, &box->low); diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 0a125e772d..67308ed37e 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -57,9 +57,9 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg) /* * If new item is heap tuple, it goes to front of chain; otherwise insert - * it before the first index-page item, so that index pages are visited - * in LIFO order, ensuring depth-first search of index pages. See - * comments in gist_private.h. + * it before the first index-page item, so that index pages are visited in + * LIFO order, ensuring depth-first search of index pages. See comments + * in gist_private.h. */ if (GISTSearchItemIsHeap(*newitem)) { @@ -136,6 +136,7 @@ gistrescan(PG_FUNCTION_ARGS) IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey key = (ScanKey) PG_GETARG_POINTER(1); ScanKey orderbys = (ScanKey) PG_GETARG_POINTER(3); + /* nkeys and norderbys arguments are ignored */ GISTScanOpaque so = (GISTScanOpaque) scan->opaque; int i; @@ -164,8 +165,8 @@ gistrescan(PG_FUNCTION_ARGS) scan->numberOfKeys * sizeof(ScanKeyData)); /* - * Modify the scan key so that the Consistent method is called for - * all comparisons. The original operator is passed to the Consistent + * Modify the scan key so that the Consistent method is called for all + * comparisons. The original operator is passed to the Consistent * function in the form of its strategy number, which is available * from the sk_strategy field, and its subtype from the sk_subtype * field. Also, preserve sk_func.fn_collation which is the input diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index 6736fd166c..e8bbd564c7 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -503,11 +503,12 @@ gistFormTuple(GISTSTATE *giststate, Relation r, } res = index_form_tuple(giststate->tupdesc, compatt, isnull); + /* * The offset number on tuples on internal pages is unused. For historical * reasons, it is set 0xffff. */ - ItemPointerSetOffsetNumber( &(res->t_tid), 0xffff); + ItemPointerSetOffsetNumber(&(res->t_tid), 0xffff); return res; } diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index 0f406e16c4..51354c1c18 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -41,12 +41,12 @@ static void gistRedoClearFollowRight(RelFileNode node, XLogRecPtr lsn, BlockNumber leftblkno) { - Buffer buffer; + Buffer buffer; buffer = XLogReadBuffer(node, leftblkno, false); if (BufferIsValid(buffer)) { - Page page = (Page) BufferGetPage(buffer); + Page page = (Page) BufferGetPage(buffer); /* * Note that we still update the page even if page LSN is equal to the @@ -103,6 +103,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record) { int i; OffsetNumber *todelete = (OffsetNumber *) data; + data += sizeof(OffsetNumber) * xldata->ntodelete; for (i = 0; i < xldata->ntodelete; i++) @@ -115,12 +116,14 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record) if (data - begin < record->xl_len) { OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber : - OffsetNumberNext(PageGetMaxOffsetNumber(page)); + OffsetNumberNext(PageGetMaxOffsetNumber(page)); + while (data - begin < record->xl_len) { - IndexTuple itup = (IndexTuple) data; + IndexTuple itup = (IndexTuple) data; Size sz = IndexTupleSize(itup); OffsetNumber l; + data += sz; l = PageAddItem(page, (Item) itup, sz, off, false, false); @@ -418,7 +421,7 @@ gistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf, SplitedPageLayout *ptr; int npage = 0, cur; - XLogRecPtr recptr; + XLogRecPtr recptr; for (ptr = dist; ptr; ptr = ptr->next) npage++; @@ -540,8 +543,8 @@ gistXLogUpdate(RelFileNode node, Buffer buffer, } /* - * Include a full page image of the child buf. (only necessary if - * a checkpoint happened since the child page was split) + * Include a full page image of the child buf. (only necessary if a + * checkpoint happened since the child page was split) */ if (BufferIsValid(leftchildbuf)) { diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index f19e5627f8..4cb29b2bb4 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -413,6 +413,7 @@ hashrescan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1); + /* remaining arguments are ignored */ HashScanOpaque so = (HashScanOpaque) scan->opaque; Relation rel = scan->indexRelation; diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 89697f6ff5..1fbd8b39b4 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1070,7 +1070,7 @@ relation_close(Relation relation, LOCKMODE lockmode) * This is essentially relation_open plus check that the relation * is not an index nor a composite type. (The caller should also * check that it's not a view or foreign table before assuming it has - * storage.) + * storage.) * ---------------- */ Relation @@ -1922,8 +1922,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, /* * We're about to do the actual insert -- check for conflict at the - * relation or buffer level first, to avoid possibly having to roll - * back work we've just done. + * relation or buffer level first, to avoid possibly having to roll back + * work we've just done. */ CheckForSerializableConflictIn(relation, NULL, buffer); @@ -2228,8 +2228,8 @@ l1: } /* - * We're about to do the actual delete -- check for conflict first, - * to avoid possibly having to roll back work we've just done. + * We're about to do the actual delete -- check for conflict first, to + * avoid possibly having to roll back work we've just done. */ CheckForSerializableConflictIn(relation, &tp, buffer); @@ -2587,8 +2587,8 @@ l2: } /* - * We're about to do the actual update -- check for conflict first, - * to avoid possibly having to roll back work we've just done. + * We're about to do the actual update -- check for conflict first, to + * avoid possibly having to roll back work we've just done. */ CheckForSerializableConflictIn(relation, &oldtup, buffer); @@ -2737,8 +2737,8 @@ l2: } /* - * We're about to create the new tuple -- check for conflict first, - * to avoid possibly having to roll back work we've just done. + * We're about to create the new tuple -- check for conflict first, to + * avoid possibly having to roll back work we've just done. * * NOTE: For a tuple insert, we only need to check for table locks, since * predicate locking at the index level will cover ranges for anything @@ -3860,12 +3860,12 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, } /* - * Ignore tuples inserted by an aborted transaction or - * if the tuple was updated/deleted by the inserting transaction. + * Ignore tuples inserted by an aborted transaction or if the tuple was + * updated/deleted by the inserting transaction. * * Look for a committed hint bit, or if no xmin bit is set, check clog. - * This needs to work on both master and standby, where it is used - * to assess btree delete records. + * This needs to work on both master and standby, where it is used to + * assess btree delete records. */ if ((tuple->t_infomask & HEAP_XMIN_COMMITTED) || (!(tuple->t_infomask & HEAP_XMIN_COMMITTED) && @@ -3874,7 +3874,7 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, { if (xmax != xmin && TransactionIdFollows(xmax, *latestRemovedXid)) - *latestRemovedXid = xmax; + *latestRemovedXid = xmax; } /* *latestRemovedXid may still be invalid at end */ @@ -4158,8 +4158,8 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata); /* - * The page may be uninitialized. If so, we can't set the LSN - * and TLI because that would corrupt the page. + * The page may be uninitialized. If so, we can't set the LSN and TLI + * because that would corrupt the page. */ if (!PageIsNew(page)) { @@ -4352,8 +4352,8 @@ heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record) memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ); /* - * The page may be uninitialized. If so, we can't set the LSN - * and TLI because that would corrupt the page. + * The page may be uninitialized. If so, we can't set the LSN and TLI + * because that would corrupt the page. */ if (!PageIsNew(page)) { diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 2849992528..72a69e52b0 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -150,7 +150,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock, Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, - struct BulkInsertStateData *bistate) + struct BulkInsertStateData * bistate) { bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM); Buffer buffer = InvalidBuffer; diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index c710f1d316..e56140950a 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -131,7 +131,7 @@ typedef struct RewriteStateData * them */ HTAB *rs_unresolved_tups; /* unmatched A tuples */ HTAB *rs_old_new_tid_map; /* unmatched B tuples */ -} RewriteStateData; +} RewriteStateData; /* * The lookup keys for the hash tables are tuple TID and xmin (we must check @@ -277,7 +277,7 @@ end_heap_rewrite(RewriteState state) } /* - * If the rel is WAL-logged, must fsync before commit. We use heap_sync + * If the rel is WAL-logged, must fsync before commit. We use heap_sync * to ensure that the toast table gets fsync'd too. * * It's obvious that we must do this when not WAL-logging. It's less diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 88f73e8241..66af2c37c5 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -872,7 +872,7 @@ index_getprocinfo(Relation irel, procnum, attnum, RelationGetRelationName(irel)); fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt); - fmgr_info_set_collation(irel->rd_indcollation[attnum-1], locinfo); + fmgr_info_set_collation(irel->rd_indcollation[attnum - 1], locinfo); } return locinfo; diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 0dd745f19a..219f94fd0d 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -179,8 +179,8 @@ top: * The only conflict predicate locking cares about for indexes is when * an index tuple insert conflicts with an existing lock. Since the * actual location of the insert is hard to predict because of the - * random search used to prevent O(N^2) performance when there are many - * duplicate entries, we can just use the "first valid" page. + * random search used to prevent O(N^2) performance when there are + * many duplicate entries, we can just use the "first valid" page. */ CheckForSerializableConflictIn(rel, NULL, buf); /* do the insertion */ @@ -915,13 +915,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, /* * origpage is the original page to be split. leftpage is a temporary * buffer that receives the left-sibling data, which will be copied back - * into origpage on success. rightpage is the new page that receives - * the right-sibling data. If we fail before reaching the critical - * section, origpage hasn't been modified and leftpage is only workspace. - * In principle we shouldn't need to worry about rightpage either, - * because it hasn't been linked into the btree page structure; but to - * avoid leaving possibly-confusing junk behind, we are careful to rewrite - * rightpage as zeroes before throwing any error. + * into origpage on success. rightpage is the new page that receives the + * right-sibling data. If we fail before reaching the critical section, + * origpage hasn't been modified and leftpage is only workspace. In + * principle we shouldn't need to worry about rightpage either, because it + * hasn't been linked into the btree page structure; but to avoid leaving + * possibly-confusing junk behind, we are careful to rewrite rightpage as + * zeroes before throwing any error. */ origpage = BufferGetPage(buf); leftpage = PageGetTempPage(origpage); @@ -1118,7 +1118,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, { memset(rightpage, 0, BufferGetPageSize(rbuf)); elog(ERROR, "right sibling's left-link doesn't match: " - "block %u links to %u instead of expected %u in index \"%s\"", + "block %u links to %u instead of expected %u in index \"%s\"", oopaque->btpo_next, sopaque->btpo_prev, origpagenumber, RelationGetRelationName(rel)); } diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 27964455f7..2477736281 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -1268,9 +1268,9 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack) /* * Check that the parent-page index items we're about to delete/overwrite - * contain what we expect. This can fail if the index has become - * corrupt for some reason. We want to throw any error before entering - * the critical section --- otherwise it'd be a PANIC. + * contain what we expect. This can fail if the index has become corrupt + * for some reason. We want to throw any error before entering the + * critical section --- otherwise it'd be a PANIC. * * The test on the target item is just an Assert because _bt_getstackbuf * should have guaranteed it has the expected contents. The test on the diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 7a0e1a9c25..6a7ddd7db4 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -220,7 +220,7 @@ btbuildempty(PG_FUNCTION_ARGS) metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, P_NONE, 0); - /* Write the page. If archiving/streaming, XLOG it. */ + /* Write the page. If archiving/streaming, XLOG it. */ smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE, (char *) metapage, true); if (XLogIsNeeded()) @@ -403,6 +403,7 @@ btrescan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1); + /* remaining arguments are ignored */ BTScanOpaque so = (BTScanOpaque) scan->opaque; diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index cb78a1bae1..91f8cadea5 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -65,7 +65,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, /* If index is empty and access = BT_READ, no root page is created. */ if (!BufferIsValid(*bufP)) { - PredicateLockRelation(rel); /* Nothing finer to lock exists. */ + PredicateLockRelation(rel); /* Nothing finer to lock exists. */ return (BTStack) NULL; } @@ -1364,7 +1364,7 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost) if (!BufferIsValid(buf)) { /* empty index... */ - PredicateLockRelation(rel); /* Nothing finer to lock exists. */ + PredicateLockRelation(rel); /* Nothing finer to lock exists. */ return InvalidBuffer; } @@ -1444,7 +1444,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) if (!BufferIsValid(buf)) { /* empty index... */ - PredicateLockRelation(rel); /* Nothing finer to lock exists. */ + PredicateLockRelation(rel); /* Nothing finer to lock exists. */ so->currPos.buf = InvalidBuffer; return false; } diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index fd0e86a6aa..256a7f9f98 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -799,7 +799,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) /* * If the index is WAL-logged, we must fsync it down to disk before it's - * safe to commit the transaction. (For a non-WAL-logged index we don't + * safe to commit the transaction. (For a non-WAL-logged index we don't * care since the index will be uninteresting after a crash anyway.) * * It's obvious that we must do this when not WAL-logging the build. It's diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index add932d942..d448ba6a50 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -70,8 +70,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup) /* * We can use the cached (default) support procs since no cross-type - * comparison can be needed. The cached support proc entries have - * the right collation for the index, too. + * comparison can be needed. The cached support proc entries have the + * right collation for the index, too. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); arg = index_getattr(itup, i + 1, itupdesc, &null); @@ -120,8 +120,8 @@ _bt_mkscankey_nodata(Relation rel) /* * We can use the cached (default) support procs since no cross-type - * comparison can be needed. The cached support proc entries have - * the right collation for the index, too. + * comparison can be needed. The cached support proc entries have the + * right collation for the index, too. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); flags = SK_ISNULL | (indoption[i] << SK_BT_INDOPTION_SHIFT); diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 729c7b72e0..281268120e 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -120,7 +120,7 @@ typedef struct GlobalTransactionData TransactionId locking_xid; /* top-level XID of backend working on xact */ bool valid; /* TRUE if fully prepared */ char gid[GIDSIZE]; /* The GID assigned to the prepared xact */ -} GlobalTransactionData; +} GlobalTransactionData; /* * Two Phase Commit shared state. Access to this struct is protected @@ -1029,8 +1029,8 @@ EndPrepare(GlobalTransaction gxact) /* If we crash now, we have prepared: WAL replay will fix things */ /* - * Wake up all walsenders to send WAL up to the PREPARE record - * immediately if replication is enabled + * Wake up all walsenders to send WAL up to the PREPARE record immediately + * if replication is enabled */ if (max_wal_senders > 0) WalSndWakeup(); @@ -2043,8 +2043,8 @@ RecordTransactionCommitPrepared(TransactionId xid, /* * Wait for synchronous replication, if required. * - * Note that at this stage we have marked clog, but still show as - * running in the procarray and continue to hold locks. + * Note that at this stage we have marked clog, but still show as running + * in the procarray and continue to hold locks. */ SyncRepWaitForLSN(recptr); } @@ -2130,8 +2130,8 @@ RecordTransactionAbortPrepared(TransactionId xid, /* * Wait for synchronous replication, if required. * - * Note that at this stage we have marked clog, but still show as - * running in the procarray and continue to hold locks. + * Note that at this stage we have marked clog, but still show as running + * in the procarray and continue to hold locks. */ SyncRepWaitForLSN(recptr); } diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index a828b3de48..500335bd6f 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -355,9 +355,9 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) char *oldest_datname; /* - * We can be called when not inside a transaction, for example - * during StartupXLOG(). In such a case we cannot do database - * access, so we must just report the oldest DB's OID. + * We can be called when not inside a transaction, for example during + * StartupXLOG(). In such a case we cannot do database access, so we + * must just report the oldest DB's OID. * * Note: it's also possible that get_database_name fails and returns * NULL, for example because the database just got dropped. We'll diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 55aee87910..8a4c4eccd7 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -420,11 +420,11 @@ AssignTransactionId(TransactionState s) */ if (isSubXact && !TransactionIdIsValid(s->parent->transactionId)) { - TransactionState p = s->parent; - TransactionState *parents; - size_t parentOffset = 0; + TransactionState p = s->parent; + TransactionState *parents; + size_t parentOffset = 0; - parents = palloc(sizeof(TransactionState) * s->nestingLevel); + parents = palloc(sizeof(TransactionState) * s->nestingLevel); while (p != NULL && !TransactionIdIsValid(p->transactionId)) { parents[parentOffset++] = p; @@ -432,8 +432,8 @@ AssignTransactionId(TransactionState s) } /* - * This is technically a recursive call, but the recursion will - * never be more than one layer deep. + * This is technically a recursive call, but the recursion will never + * be more than one layer deep. */ while (parentOffset != 0) AssignTransactionId(parents[--parentOffset]); @@ -1037,16 +1037,17 @@ RecordTransactionCommit(void) /* * Check if we want to commit asynchronously. We can allow the XLOG flush * to happen asynchronously if synchronous_commit=off, or if the current - * transaction has not performed any WAL-logged operation. The latter case - * can arise if the current transaction wrote only to temporary and/or - * unlogged tables. In case of a crash, the loss of such a transaction - * will be irrelevant since temp tables will be lost anyway, and unlogged - * tables will be truncated. (Given the foregoing, you might think that it - * would be unnecessary to emit the XLOG record at all in this case, but we - * don't currently try to do that. It would certainly cause problems at - * least in Hot Standby mode, where the KnownAssignedXids machinery - * requires tracking every XID assignment. It might be OK to skip it only - * when wal_level < hot_standby, but for now we don't.) + * transaction has not performed any WAL-logged operation. The latter + * case can arise if the current transaction wrote only to temporary + * and/or unlogged tables. In case of a crash, the loss of such a + * transaction will be irrelevant since temp tables will be lost anyway, + * and unlogged tables will be truncated. (Given the foregoing, you might + * think that it would be unnecessary to emit the XLOG record at all in + * this case, but we don't currently try to do that. It would certainly + * cause problems at least in Hot Standby mode, where the + * KnownAssignedXids machinery requires tracking every XID assignment. It + * might be OK to skip it only when wal_level < hot_standby, but for now + * we don't.) * * However, if we're doing cleanup of any non-temp rels or committing any * command that wanted to force sync commit, then we must flush XLOG @@ -1130,8 +1131,8 @@ RecordTransactionCommit(void) /* * Wait for synchronous replication, if required. * - * Note that at this stage we have marked clog, but still show as - * running in the procarray and continue to hold locks. + * Note that at this stage we have marked clog, but still show as running + * in the procarray and continue to hold locks. */ SyncRepWaitForLSN(XactLastRecEnd); @@ -1785,10 +1786,10 @@ CommitTransaction(void) } /* - * The remaining actions cannot call any user-defined code, so it's - * safe to start shutting down within-transaction services. But note - * that most of this stuff could still throw an error, which would - * switch us into the transaction-abort path. + * The remaining actions cannot call any user-defined code, so it's safe + * to start shutting down within-transaction services. But note that most + * of this stuff could still throw an error, which would switch us into + * the transaction-abort path. */ /* Shut down the deferred-trigger manager */ @@ -1805,8 +1806,8 @@ CommitTransaction(void) /* * Mark serializable transaction as complete for predicate locking - * purposes. This should be done as late as we can put it and still - * allow errors to be raised for failure patterns found at commit. + * purposes. This should be done as late as we can put it and still allow + * errors to be raised for failure patterns found at commit. */ PreCommit_CheckForSerializationFailure(); @@ -1988,10 +1989,10 @@ PrepareTransaction(void) } /* - * The remaining actions cannot call any user-defined code, so it's - * safe to start shutting down within-transaction services. But note - * that most of this stuff could still throw an error, which would - * switch us into the transaction-abort path. + * The remaining actions cannot call any user-defined code, so it's safe + * to start shutting down within-transaction services. But note that most + * of this stuff could still throw an error, which would switch us into + * the transaction-abort path. */ /* Shut down the deferred-trigger manager */ @@ -2008,8 +2009,8 @@ PrepareTransaction(void) /* * Mark serializable transaction as complete for predicate locking - * purposes. This should be done as late as we can put it and still - * allow errors to be raised for failure patterns found at commit. + * purposes. This should be done as late as we can put it and still allow + * errors to be raised for failure patterns found at commit. */ PreCommit_CheckForSerializationFailure(); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index b31c79ebbd..9c45759661 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -64,7 +64,7 @@ /* File path names (all relative to $PGDATA) */ #define RECOVERY_COMMAND_FILE "recovery.conf" #define RECOVERY_COMMAND_DONE "recovery.done" -#define PROMOTE_SIGNAL_FILE "promote" +#define PROMOTE_SIGNAL_FILE "promote" /* User-settable parameters */ @@ -160,6 +160,7 @@ static XLogRecPtr LastRec; * known, need to check the shared state". */ static bool LocalRecoveryInProgress = true; + /* * Local copy of SharedHotStandbyActive variable. False actually means "not * known, need to check the shared state". @@ -355,10 +356,9 @@ typedef struct XLogCtlInsert /* * exclusiveBackup is true if a backup started with pg_start_backup() is * in progress, and nonExclusiveBackups is a counter indicating the number - * of streaming base backups currently in progress. forcePageWrites is - * set to true when either of these is non-zero. lastBackupStart is the - * latest checkpoint redo location used as a starting point for an online - * backup. + * of streaming base backups currently in progress. forcePageWrites is set + * to true when either of these is non-zero. lastBackupStart is the latest + * checkpoint redo location used as a starting point for an online backup. */ bool exclusiveBackup; int nonExclusiveBackups; @@ -388,7 +388,7 @@ typedef struct XLogCtlData XLogwrtResult LogwrtResult; uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */ TransactionId ckptXid; - XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */ + XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */ uint32 lastRemovedLog; /* latest removed/recycled XLOG segment */ uint32 lastRemovedSeg; @@ -425,9 +425,9 @@ typedef struct XLogCtlData bool SharedHotStandbyActive; /* - * recoveryWakeupLatch is used to wake up the startup process to - * continue WAL replay, if it is waiting for WAL to arrive or failover - * trigger file to appear. + * recoveryWakeupLatch is used to wake up the startup process to continue + * WAL replay, if it is waiting for WAL to arrive or failover trigger file + * to appear. */ Latch recoveryWakeupLatch; @@ -576,7 +576,7 @@ typedef struct xl_parameter_change /* logs restore point */ typedef struct xl_restore_point { - TimestampTz rp_time; + TimestampTz rp_time; char rp_name[MAXFNAMELEN]; } xl_restore_point; @@ -4272,27 +4272,29 @@ existsTimeLineHistory(TimeLineID probeTLI) static bool rescanLatestTimeLine(void) { - TimeLineID newtarget; + TimeLineID newtarget; + newtarget = findNewestTimeLine(recoveryTargetTLI); if (newtarget != recoveryTargetTLI) { /* * Determine the list of expected TLIs for the new TLI */ - List *newExpectedTLIs; + List *newExpectedTLIs; + newExpectedTLIs = readTimeLineHistory(newtarget); /* - * If the current timeline is not part of the history of the - * new timeline, we cannot proceed to it. + * If the current timeline is not part of the history of the new + * timeline, we cannot proceed to it. * * XXX This isn't foolproof: The new timeline might have forked from * the current one, but before the current recovery location. In that * case we will still switch to the new timeline and proceed replaying * from it even though the history doesn't match what we already * replayed. That's not good. We will likely notice at the next online - * checkpoint, as the TLI won't match what we expected, but it's - * not guaranteed. The admin needs to make sure that doesn't happen. + * checkpoint, as the TLI won't match what we expected, but it's not + * guaranteed. The admin needs to make sure that doesn't happen. */ if (!list_member_int(newExpectedTLIs, (int) recoveryTargetTLI)) @@ -4480,7 +4482,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, timestamptz_to_str(recoveryStopTime)); else if (recoveryTarget == RECOVERY_TARGET_NAME) snprintf(buffer, sizeof(buffer), - "%s%u\t%s\tat restore point \"%s\"\n", + "%s%u\t%s\tat restore point \"%s\"\n", (srcfd < 0) ? "" : "\n", parentTLI, xlogfname, @@ -4921,7 +4923,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source) { /* * If we haven't yet changed the boot_val default of -1, just let it - * be. We'll fix it when XLOGShmemSize is called. + * be. We'll fix it when XLOGShmemSize is called. */ if (XLOGbuffers == -1) return true; @@ -4954,8 +4956,8 @@ XLOGShmemSize(void) /* * If the value of wal_buffers is -1, use the preferred auto-tune value. * This isn't an amazingly clean place to do this, but we must wait till - * NBuffers has received its final value, and must do it before using - * the value of XLOGbuffers to do anything important. + * NBuffers has received its final value, and must do it before using the + * value of XLOGbuffers to do anything important. */ if (XLOGbuffers == -1) { @@ -5086,9 +5088,9 @@ BootStrapXLOG(void) /* * Set up information for the initial checkpoint record * - * The initial checkpoint record is written to the beginning of the - * WAL segment with logid=0 logseg=1. The very first WAL segment, 0/0, is - * not used, so that we can use 0/0 to mean "before any valid WAL segment". + * The initial checkpoint record is written to the beginning of the WAL + * segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not + * used, so that we can use 0/0 to mean "before any valid WAL segment". */ checkPoint.redo.xlogid = 0; checkPoint.redo.xrecoff = XLogSegSize + SizeOfXLogLongPHD; @@ -5219,8 +5221,8 @@ readRecoveryCommandFile(void) TimeLineID rtli = 0; bool rtliGiven = false; ConfigVariable *item, - *head = NULL, - *tail = NULL; + *head = NULL, + *tail = NULL; fd = AllocateFile(RECOVERY_COMMAND_FILE, "r"); if (fd == NULL) @@ -5236,7 +5238,7 @@ readRecoveryCommandFile(void) /* * Since we're asking ParseConfigFp() to error out at FATAL, there's no * need to check the return value. - */ + */ ParseConfigFp(fd, RECOVERY_COMMAND_FILE, 0, FATAL, &head, &tail); for (item = head; item; item = item->next) @@ -5312,7 +5314,7 @@ readRecoveryCommandFile(void) * this overrides recovery_target_time */ if (recoveryTarget == RECOVERY_TARGET_XID || - recoveryTarget == RECOVERY_TARGET_NAME) + recoveryTarget == RECOVERY_TARGET_NAME) continue; recoveryTarget = RECOVERY_TARGET_TIME; @@ -5321,7 +5323,7 @@ readRecoveryCommandFile(void) */ recoveryTargetTime = DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in, - CStringGetDatum(item->value), + CStringGetDatum(item->value), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); ereport(DEBUG2, @@ -5610,8 +5612,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) if (recoveryTarget == RECOVERY_TARGET_UNSET) { /* - * Save timestamp of latest transaction commit/abort if this is - * a transaction record + * Save timestamp of latest transaction commit/abort if this is a + * transaction record */ if (record->xl_rmid == RM_XACT_ID) SetLatestXTime(recordXtime); @@ -5636,8 +5638,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) else if (recoveryTarget == RECOVERY_TARGET_NAME) { /* - * There can be many restore points that share the same name, so we stop - * at the first one + * There can be many restore points that share the same name, so we + * stop at the first one */ stopsHere = (strcmp(recordRPName, recoveryTargetName) == 0); @@ -5699,14 +5701,14 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) strncpy(recoveryStopName, recordRPName, MAXFNAMELEN); ereport(LOG, - (errmsg("recovery stopping at restore point \"%s\", time %s", - recoveryStopName, - timestamptz_to_str(recoveryStopTime)))); + (errmsg("recovery stopping at restore point \"%s\", time %s", + recoveryStopName, + timestamptz_to_str(recoveryStopTime)))); } /* - * Note that if we use a RECOVERY_TARGET_TIME then we can stop - * at a restore point since they are timestamped, though the latest + * Note that if we use a RECOVERY_TARGET_TIME then we can stop at a + * restore point since they are timestamped, though the latest * transaction time is not updated. */ if (record->xl_rmid == RM_XACT_ID && recoveryStopAfter) @@ -5732,7 +5734,7 @@ recoveryPausesHere(void) while (RecoveryIsPaused()) { - pg_usleep(1000000L); /* 1000 ms */ + pg_usleep(1000000L); /* 1000 ms */ HandleStartupProcInterrupts(); } } @@ -5742,7 +5744,7 @@ RecoveryIsPaused(void) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - bool recoveryPause; + bool recoveryPause; SpinLockAcquire(&xlogctl->info_lck); recoveryPause = xlogctl->recoveryPause; @@ -5771,7 +5773,7 @@ pg_xlog_replay_pause(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to control recovery")))); + (errmsg("must be superuser to control recovery")))); if (!RecoveryInProgress()) ereport(ERROR, @@ -5793,7 +5795,7 @@ pg_xlog_replay_resume(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to control recovery")))); + (errmsg("must be superuser to control recovery")))); if (!RecoveryInProgress()) ereport(ERROR, @@ -5815,7 +5817,7 @@ pg_is_xlog_replay_paused(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to control recovery")))); + (errmsg("must be superuser to control recovery")))); if (!RecoveryInProgress()) ereport(ERROR, @@ -5870,7 +5872,7 @@ GetLatestXTime(void) Datum pg_last_xact_replay_timestamp(PG_FUNCTION_ARGS) { - TimestampTz xtime; + TimestampTz xtime; xtime = GetLatestXTime(); if (xtime == 0) @@ -6132,10 +6134,10 @@ StartupXLOG(void) InRecovery = true; /* force recovery even if SHUTDOWNED */ /* - * Make sure that REDO location exists. This may not be - * the case if there was a crash during an online backup, - * which left a backup_label around that references a WAL - * segment that's already been archived. + * Make sure that REDO location exists. This may not be the case + * if there was a crash during an online backup, which left a + * backup_label around that references a WAL segment that's + * already been archived. */ if (XLByteLT(checkPoint.redo, checkPointLoc)) { @@ -6150,7 +6152,7 @@ StartupXLOG(void) ereport(FATAL, (errmsg("could not locate required checkpoint record"), errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir))); - wasShutdown = false; /* keep compiler quiet */ + wasShutdown = false; /* keep compiler quiet */ } /* set flag to delete it later */ haveBackupLabel = true; @@ -6330,9 +6332,9 @@ StartupXLOG(void) /* * We're in recovery, so unlogged relations relations may be trashed - * and must be reset. This should be done BEFORE allowing Hot - * Standby connections, so that read-only backends don't try to - * read whatever garbage is left over from before. + * and must be reset. This should be done BEFORE allowing Hot Standby + * connections, so that read-only backends don't try to read whatever + * garbage is left over from before. */ ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP); @@ -6517,7 +6519,8 @@ StartupXLOG(void) if (recoveryStopsHere(record, &recoveryApply)) { /* - * Pause only if users can connect to send a resume message + * Pause only if users can connect to send a resume + * message */ if (recoveryPauseAtTarget && standbyState == STANDBY_SNAPSHOT_READY) { @@ -7003,8 +7006,8 @@ HotStandbyActive(void) { /* * We check shared state each time only until Hot Standby is active. We - * can't de-activate Hot Standby, so there's no need to keep checking after - * the shared variable has once been seen true. + * can't de-activate Hot Standby, so there's no need to keep checking + * after the shared variable has once been seen true. */ if (LocalHotStandbyActive) return true; @@ -7429,14 +7432,14 @@ LogCheckpointEnd(bool restartpoint) */ longest_secs = (long) (CheckpointStats.ckpt_longest_sync / 1000000); longest_usecs = CheckpointStats.ckpt_longest_sync - - (uint64) longest_secs * 1000000; + (uint64) longest_secs *1000000; average_sync_time = 0; - if (CheckpointStats.ckpt_sync_rels > 0) + if (CheckpointStats.ckpt_sync_rels > 0) average_sync_time = CheckpointStats.ckpt_agg_sync_time / CheckpointStats.ckpt_sync_rels; average_secs = (long) (average_sync_time / 1000000); - average_usecs = average_sync_time - (uint64) average_secs * 1000000; + average_usecs = average_sync_time - (uint64) average_secs *1000000; if (restartpoint) elog(LOG, "restartpoint complete: wrote %d buffers (%.1f%%); " @@ -8241,9 +8244,9 @@ RequestXLogSwitch(void) XLogRecPtr XLogRestorePoint(const char *rpName) { - XLogRecPtr RecPtr; - XLogRecData rdata; - xl_restore_point xlrec; + XLogRecPtr RecPtr; + XLogRecData rdata; + xl_restore_point xlrec; xlrec.rp_time = GetCurrentTimestamp(); strncpy(xlrec.rp_name, rpName, MAXFNAMELEN); @@ -8257,7 +8260,7 @@ XLogRestorePoint(const char *rpName) ereport(LOG, (errmsg("restore point \"%s\" created at %X/%X", - rpName, RecPtr.xlogid, RecPtr.xrecoff))); + rpName, RecPtr.xlogid, RecPtr.xrecoff))); return RecPtr; } @@ -8643,7 +8646,7 @@ get_sync_bit(int method) /* * Optimize writes by bypassing kernel cache with O_DIRECT when using - * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are + * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are * disabled, otherwise the archive command or walsender process will read * the WAL soon after writing it, which is guaranteed to cause a physical * read if we bypassed the kernel cache. We also skip the @@ -8775,7 +8778,7 @@ pg_start_backup(PG_FUNCTION_ARGS) text *backupid = PG_GETARG_TEXT_P(0); bool fast = PG_GETARG_BOOL(1); char *backupidstr; - XLogRecPtr startpoint; + XLogRecPtr startpoint; char startxlogstr[MAXFNAMELEN]; backupidstr = text_to_cstring(backupid); @@ -8791,7 +8794,7 @@ pg_start_backup(PG_FUNCTION_ARGS) * do_pg_start_backup is the workhorse of the user-visible pg_start_backup() * function. It creates the necessary starting checkpoint and constructs the * backup label file. - * + * * There are two kind of backups: exclusive and non-exclusive. An exclusive * backup is started with pg_start_backup(), and there can be only one active * at a time. The backup label file of an exclusive backup is written to @@ -8826,7 +8829,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) if (!superuser() && !is_authenticated_user_replication_role()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser or replication role to run a backup"))); + errmsg("must be superuser or replication role to run a backup"))); if (RecoveryInProgress()) ereport(ERROR, @@ -8897,25 +8900,27 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) /* Ensure we release forcePageWrites if fail below */ PG_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) BoolGetDatum(exclusive)); { - bool gotUniqueStartpoint = false; + bool gotUniqueStartpoint = false; + do { /* * Force a CHECKPOINT. Aside from being necessary to prevent torn - * page problems, this guarantees that two successive backup runs will - * have different checkpoint positions and hence different history - * file names, even if nothing happened in between. + * page problems, this guarantees that two successive backup runs + * will have different checkpoint positions and hence different + * history file names, even if nothing happened in between. * - * We use CHECKPOINT_IMMEDIATE only if requested by user (via passing - * fast = true). Otherwise this can take awhile. + * We use CHECKPOINT_IMMEDIATE only if requested by user (via + * passing fast = true). Otherwise this can take awhile. */ RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT | (fast ? CHECKPOINT_IMMEDIATE : 0)); /* - * Now we need to fetch the checkpoint record location, and also its - * REDO pointer. The oldest point in WAL that would be needed to - * restore starting from the checkpoint is precisely the REDO pointer. + * Now we need to fetch the checkpoint record location, and also + * its REDO pointer. The oldest point in WAL that would be needed + * to restore starting from the checkpoint is precisely the REDO + * pointer. */ LWLockAcquire(ControlFileLock, LW_SHARED); checkpointloc = ControlFile->checkPoint; @@ -8923,16 +8928,15 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) LWLockRelease(ControlFileLock); /* - * If two base backups are started at the same time (in WAL - * sender processes), we need to make sure that they use - * different checkpoints as starting locations, because we use - * the starting WAL location as a unique identifier for the base - * backup in the end-of-backup WAL record and when we write the - * backup history file. Perhaps it would be better generate a - * separate unique ID for each backup instead of forcing another - * checkpoint, but taking a checkpoint right after another is - * not that expensive either because only few buffers have been - * dirtied yet. + * If two base backups are started at the same time (in WAL sender + * processes), we need to make sure that they use different + * checkpoints as starting locations, because we use the starting + * WAL location as a unique identifier for the base backup in the + * end-of-backup WAL record and when we write the backup history + * file. Perhaps it would be better generate a separate unique ID + * for each backup instead of forcing another checkpoint, but + * taking a checkpoint right after another is not that expensive + * either because only few buffers have been dirtied yet. */ LWLockAcquire(WALInsertLock, LW_SHARED); if (XLByteLT(XLogCtl->Insert.lastBackupStart, startpoint)) @@ -8941,13 +8945,13 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) gotUniqueStartpoint = true; } LWLockRelease(WALInsertLock); - } while(!gotUniqueStartpoint); + } while (!gotUniqueStartpoint); XLByteToSeg(startpoint, _logId, _logSeg); XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg); /* - * Construct backup label file + * Construct backup label file */ initStringInfo(&labelfbuf); @@ -8970,8 +8974,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) { /* * Check for existing backup label --- implies a backup is already - * running. (XXX given that we checked exclusiveBackup above, maybe - * it would be OK to just unlink any such label file?) + * running. (XXX given that we checked exclusiveBackup above, + * maybe it would be OK to just unlink any such label file?) */ if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0) { @@ -9018,7 +9022,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) static void pg_start_backup_callback(int code, Datum arg) { - bool exclusive = DatumGetBool(arg); + bool exclusive = DatumGetBool(arg); /* Update backup counters and forcePageWrites on failure */ LWLockAcquire(WALInsertLock, LW_EXCLUSIVE); @@ -9101,7 +9105,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) if (!superuser() && !is_authenticated_user_replication_role()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser or replication role to run a backup")))); + (errmsg("must be superuser or replication role to run a backup")))); if (RecoveryInProgress()) ereport(ERROR, @@ -9145,8 +9149,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) /* * Read the existing label file into memory. */ - struct stat statbuf; - int r; + struct stat statbuf; + int r; if (stat(BACKUP_LABEL_FILE, &statbuf)) { @@ -9197,7 +9201,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE))); - remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */ + remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */ /* * Write the backup-end xlog record @@ -9388,8 +9392,8 @@ pg_switch_xlog(PG_FUNCTION_ARGS) Datum pg_create_restore_point(PG_FUNCTION_ARGS) { - text *restore_name = PG_GETARG_TEXT_P(0); - char *restore_name_str; + text *restore_name = PG_GETARG_TEXT_P(0); + char *restore_name_str; XLogRecPtr restorepoint; char location[MAXFNAMELEN]; @@ -9407,7 +9411,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS) if (!XLogIsNeeded()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("WAL level not sufficient for creating a restore point"), + errmsg("WAL level not sufficient for creating a restore point"), errhint("wal_level must be set to \"archive\" or \"hot_standby\" at server start."))); restore_name_str = text_to_cstring(restore_name); @@ -9423,7 +9427,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS) * As a convenience, return the WAL location of the restore point record */ snprintf(location, sizeof(location), "%X/%X", - restorepoint.xlogid, restorepoint.xrecoff); + restorepoint.xlogid, restorepoint.xrecoff); PG_RETURN_TEXT_P(cstring_to_text(location)); } @@ -10177,8 +10181,8 @@ retry: } /* - * If it hasn't been long since last attempt, sleep - * to avoid busy-waiting. + * If it hasn't been long since last attempt, sleep to + * avoid busy-waiting. */ now = (pg_time_t) time(NULL); if ((now - last_fail_time) < 5) @@ -10404,7 +10408,7 @@ static bool CheckForStandbyTrigger(void) { struct stat stat_buf; - static bool triggered = false; + static bool triggered = false; if (triggered) return true; @@ -10446,8 +10450,8 @@ CheckPromoteSignal(void) if (stat(PROMOTE_SIGNAL_FILE, &stat_buf) == 0) { /* - * Since we are in a signal handler, it's not safe - * to elog. We silently ignore any error from unlink. + * Since we are in a signal handler, it's not safe to elog. We + * silently ignore any error from unlink. */ unlink(PROMOTE_SIGNAL_FILE); return true; diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index aa3d59d4c9..693b634398 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -1011,8 +1011,8 @@ SetDefaultACLsInSchemas(InternalDefaultACL *iacls, List *nspnames) /* * Note that we must do the permissions check against the target - * role not the calling user. We require CREATE privileges, - * since without CREATE you won't be able to do anything using the + * role not the calling user. We require CREATE privileges, since + * without CREATE you won't be able to do anything using the * default privs anyway. */ iacls->nspid = get_namespace_oid(nspname, false); @@ -1707,7 +1707,7 @@ ExecGrant_Relation(InternalGrant *istmt) pg_class_tuple->relkind != RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a foreign table", + errmsg("\"%s\" is not a foreign table", NameStr(pg_class_tuple->relname)))); /* Adjust the default permissions based on object type */ @@ -1964,13 +1964,13 @@ ExecGrant_Relation(InternalGrant *istmt) this_privileges &= (AclMode) ACL_SELECT; } else if (pg_class_tuple->relkind == RELKIND_FOREIGN_TABLE && - this_privileges & ~((AclMode) ACL_SELECT)) + this_privileges & ~((AclMode) ACL_SELECT)) { /* Foreign tables have the same restriction as sequences. */ ereport(WARNING, - (errcode(ERRCODE_INVALID_GRANT_OPERATION), - errmsg("foreign table \"%s\" only supports SELECT column privileges", - NameStr(pg_class_tuple->relname)))); + (errcode(ERRCODE_INVALID_GRANT_OPERATION), + errmsg("foreign table \"%s\" only supports SELECT column privileges", + NameStr(pg_class_tuple->relname)))); this_privileges &= (AclMode) ACL_SELECT; } @@ -4768,7 +4768,7 @@ pg_extension_ownercheck(Oid ext_oid, Oid roleid) * Note: roles do not have owners per se; instead we use this test in * places where an ownership-like permissions test is needed for a role. * Be sure to apply it to the role trying to do the operation, not the - * role being operated on! Also note that this generally should not be + * role being operated on! Also note that this generally should not be * considered enough privilege if the target role is a superuser. * (We don't handle that consideration here because we want to give a * separate error message for such cases, so the caller has to deal with it.) diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c index 12935754bc..cbce0072de 100644 --- a/src/backend/catalog/catalog.c +++ b/src/backend/catalog/catalog.c @@ -80,11 +80,11 @@ forkname_to_number(char *forkName) /* * forkname_chars - * We use this to figure out whether a filename could be a relation - * fork (as opposed to an oddly named stray file that somehow ended - * up in the database directory). If the passed string begins with - * a fork name (other than the main fork name), we return its length, - * and set *fork (if not NULL) to the fork number. If not, we return 0. + * We use this to figure out whether a filename could be a relation + * fork (as opposed to an oddly named stray file that somehow ended + * up in the database directory). If the passed string begins with + * a fork name (other than the main fork name), we return its length, + * and set *fork (if not NULL) to the fork number. If not, we return 0. * * Note that the present coding assumes that there are no fork names which * are prefixes of other fork names. @@ -96,7 +96,8 @@ forkname_chars(const char *str, ForkNumber *fork) for (forkNum = 1; forkNum <= MAX_FORKNUM; forkNum++) { - int len = strlen(forkNames[forkNum]); + int len = strlen(forkNames[forkNum]); + if (strncmp(forkNames[forkNum], str, len) == 0) { if (fork) @@ -150,7 +151,7 @@ relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum) { /* OIDCHARS will suffice for an integer, too */ pathlen = 5 + OIDCHARS + 2 + OIDCHARS + 1 + OIDCHARS + 1 - + FORKNAMECHARS + 1; + + FORKNAMECHARS + 1; path = (char *) palloc(pathlen); if (forknum != MAIN_FORKNUM) snprintf(path, pathlen, "base/%u/t%d_%u_%s", @@ -167,8 +168,8 @@ relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum) if (backend == InvalidBackendId) { pathlen = 9 + 1 + OIDCHARS + 1 - + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 1 - + OIDCHARS + 1 + FORKNAMECHARS + 1; + + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 1 + + OIDCHARS + 1 + FORKNAMECHARS + 1; path = (char *) palloc(pathlen); if (forknum != MAIN_FORKNUM) snprintf(path, pathlen, "pg_tblspc/%u/%s/%u/%u_%s", @@ -184,8 +185,8 @@ relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum) { /* OIDCHARS will suffice for an integer, too */ pathlen = 9 + 1 + OIDCHARS + 1 - + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 2 - + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1; + + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 2 + + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1; path = (char *) palloc(pathlen); if (forknum != MAIN_FORKNUM) snprintf(path, pathlen, "pg_tblspc/%u/%s/%u/t%d_%u_%s", diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index de24ef7a09..ec9bb48c63 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -160,7 +160,7 @@ static const Oid object_classes[MAX_OCLASS] = { ForeignServerRelationId, /* OCLASS_FOREIGN_SERVER */ UserMappingRelationId, /* OCLASS_USER_MAPPING */ DefaultAclRelationId, /* OCLASS_DEFACL */ - ExtensionRelationId /* OCLASS_EXTENSION */ + ExtensionRelationId /* OCLASS_EXTENSION */ }; @@ -1021,8 +1021,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel) /* * Delete any comments or security labels associated with this object. - * (This is a convenient place to do these things, rather than having every - * object type know to do it.) + * (This is a convenient place to do these things, rather than having + * every object type know to do it.) */ DeleteComments(object->objectId, object->classId, object->objectSubId); DeleteSecurityLabel(object); @@ -1263,7 +1263,7 @@ recordDependencyOnExpr(const ObjectAddress *depender, * whereas 'behavior' is used for everything else. * * NOTE: the caller should ensure that a whole-table dependency on the - * specified relation is created separately, if one is needed. In particular, + * specified relation is created separately, if one is needed. In particular, * a whole-row Var "relation.*" will not cause this routine to emit any * dependency item. This is appropriate behavior for subexpressions of an * ordinary query, so other cases need to cope as necessary. @@ -1383,7 +1383,7 @@ find_expr_references_walker(Node *node, /* * A whole-row Var references no specific columns, so adds no new - * dependency. (We assume that there is a whole-table dependency + * dependency. (We assume that there is a whole-table dependency * arising from each underlying rangetable entry. While we could * record such a dependency when finding a whole-row Var that * references a relation directly, it's quite unclear how to extend @@ -1431,8 +1431,8 @@ find_expr_references_walker(Node *node, /* * We must also depend on the constant's collation: it could be - * different from the datatype's, if a CollateExpr was const-folded - * to a simple constant. However we can save work in the most common + * different from the datatype's, if a CollateExpr was const-folded to + * a simple constant. However we can save work in the most common * case where the collation is "default", since we know that's pinned. */ if (OidIsValid(con->constcollid) && @@ -1695,7 +1695,7 @@ find_expr_references_walker(Node *node, } foreach(ct, rte->funccolcollations) { - Oid collid = lfirst_oid(ct); + Oid collid = lfirst_oid(ct); if (OidIsValid(collid) && collid != DEFAULT_COLLATION_OID) @@ -2224,12 +2224,12 @@ getObjectDescription(const ObjectAddress *object) HeapTuple collTup; collTup = SearchSysCache1(COLLOID, - ObjectIdGetDatum(object->objectId)); + ObjectIdGetDatum(object->objectId)); if (!HeapTupleIsValid(collTup)) elog(ERROR, "cache lookup failed for collation %u", object->objectId); appendStringInfo(&buffer, _("collation %s"), - NameStr(((Form_pg_collation) GETSTRUCT(collTup))->collname)); + NameStr(((Form_pg_collation) GETSTRUCT(collTup))->collname)); ReleaseSysCache(collTup); break; } @@ -2796,7 +2796,7 @@ getObjectDescription(const ObjectAddress *object) char * getObjectDescriptionOids(Oid classid, Oid objid) { - ObjectAddress address; + ObjectAddress address; address.classId = classid; address.objectId = objid; diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 5d25ce9ec8..09b26a5c72 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -431,7 +431,7 @@ CheckAttributeNamesTypes(TupleDesc tupdesc, char relkind, CheckAttributeType(NameStr(tupdesc->attrs[i]->attname), tupdesc->attrs[i]->atttypid, tupdesc->attrs[i]->attcollation, - NIL, /* assume we're creating a new rowtype */ + NIL, /* assume we're creating a new rowtype */ allow_system_table_mods); } } @@ -497,7 +497,7 @@ CheckAttributeType(const char *attname, int i; /* - * Check for self-containment. Eventually we might be able to allow + * Check for self-containment. Eventually we might be able to allow * this (just return without complaint, if so) but it's not clear how * many other places would require anti-recursion defenses before it * would be safe to allow tables to contain their own rowtype. @@ -505,8 +505,8 @@ CheckAttributeType(const char *attname, if (list_member_oid(containing_rowtypes, atttypid)) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("composite type %s cannot be made a member of itself", - format_type_be(atttypid)))); + errmsg("composite type %s cannot be made a member of itself", + format_type_be(atttypid)))); containing_rowtypes = lcons_oid(atttypid, containing_rowtypes); @@ -541,15 +541,15 @@ CheckAttributeType(const char *attname, } /* - * This might not be strictly invalid per SQL standard, but it is - * pretty useless, and it cannot be dumped, so we must disallow it. + * This might not be strictly invalid per SQL standard, but it is pretty + * useless, and it cannot be dumped, so we must disallow it. */ if (!OidIsValid(attcollation) && type_is_collatable(atttypid)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("no collation was derived for column \"%s\" with collatable type %s", - attname, format_type_be(atttypid)), - errhint("Use the COLLATE clause to set the collation explicitly."))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("no collation was derived for column \"%s\" with collatable type %s", + attname, format_type_be(atttypid)), + errhint("Use the COLLATE clause to set the collation explicitly."))); } /* @@ -921,7 +921,7 @@ AddNewRelationType(const char *typeName, -1, /* typmod */ 0, /* array dimensions for typBaseType */ false, /* Type NOT NULL */ - InvalidOid); /* typcollation */ + InvalidOid); /* typcollation */ } /* -------------------------------- @@ -992,9 +992,9 @@ heap_create_with_catalog(const char *relname, CheckAttributeNamesTypes(tupdesc, relkind, allow_system_table_mods); /* - * If the relation already exists, it's an error, unless the user specifies - * "IF NOT EXISTS". In that case, we just print a notice and do nothing - * further. + * If the relation already exists, it's an error, unless the user + * specifies "IF NOT EXISTS". In that case, we just print a notice and do + * nothing further. */ existing_relid = get_relname_relid(relname, relnamespace); if (existing_relid != InvalidOid) @@ -1004,7 +1004,7 @@ heap_create_with_catalog(const char *relname, ereport(NOTICE, (errcode(ERRCODE_DUPLICATE_TABLE), errmsg("relation \"%s\" already exists, skipping", - relname))); + relname))); heap_close(pg_class_desc, RowExclusiveLock); return InvalidOid; } @@ -1048,8 +1048,8 @@ heap_create_with_catalog(const char *relname, if (!OidIsValid(relid)) { /* - * Use binary-upgrade override for pg_class.oid/relfilenode, - * if supplied. + * Use binary-upgrade override for pg_class.oid/relfilenode, if + * supplied. */ if (OidIsValid(binary_upgrade_next_heap_pg_class_oid) && (relkind == RELKIND_RELATION || relkind == RELKIND_SEQUENCE || @@ -1183,7 +1183,7 @@ heap_create_with_catalog(const char *relname, -1, /* typmod */ 0, /* array dimensions for typBaseType */ false, /* Type NOT NULL */ - InvalidOid); /* typcollation */ + InvalidOid); /* typcollation */ pfree(relarrayname); } @@ -1285,12 +1285,12 @@ heap_create_with_catalog(const char *relname, register_on_commit_action(relid, oncommit); /* - * If this is an unlogged relation, it needs an init fork so that it - * can be correctly reinitialized on restart. Since we're going to - * do an immediate sync, we ony need to xlog this if archiving or - * streaming is enabled. And the immediate sync is required, because - * otherwise there's no guarantee that this will hit the disk before - * the next checkpoint moves the redo pointer. + * If this is an unlogged relation, it needs an init fork so that it can + * be correctly reinitialized on restart. Since we're going to do an + * immediate sync, we ony need to xlog this if archiving or streaming is + * enabled. And the immediate sync is required, because otherwise there's + * no guarantee that this will hit the disk before the next checkpoint + * moves the redo pointer. */ if (relpersistence == RELPERSISTENCE_UNLOGGED) { @@ -1654,8 +1654,8 @@ heap_drop_with_catalog(Oid relid) /* * There can no longer be anyone *else* touching the relation, but we - * might still have open queries or cursors, or pending trigger events, - * in our own session. + * might still have open queries or cursors, or pending trigger events, in + * our own session. */ CheckTableNotInUse(rel, "DROP TABLE"); @@ -1664,8 +1664,8 @@ heap_drop_with_catalog(Oid relid) */ if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) { - Relation rel; - HeapTuple tuple; + Relation rel; + HeapTuple tuple; rel = heap_open(ForeignTableRelationId, RowExclusiveLock); @@ -1899,7 +1899,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr, CONSTRAINT_CHECK, /* Constraint Type */ false, /* Is Deferrable */ false, /* Is Deferred */ - true, /* Is Validated */ + true, /* Is Validated */ RelationGetRelid(rel), /* relation */ attNos, /* attrs in the constraint */ keycount, /* # attrs in the constraint */ diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 679255a199..1bf74b3d4f 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -187,18 +187,18 @@ index_check_primary_key(Relation heapRel, int i; /* - * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In - * CREATE TABLE, we have faith that the parser rejected multiple pkey - * clauses; and CREATE INDEX doesn't have a way to say PRIMARY KEY, so - * it's no problem either. + * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In CREATE + * TABLE, we have faith that the parser rejected multiple pkey clauses; + * and CREATE INDEX doesn't have a way to say PRIMARY KEY, so it's no + * problem either. */ if (is_alter_table && relationHasPrimaryKey(heapRel)) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("multiple primary keys for table \"%s\" are not allowed", - RelationGetRelationName(heapRel)))); + errmsg("multiple primary keys for table \"%s\" are not allowed", + RelationGetRelationName(heapRel)))); } /* @@ -222,7 +222,7 @@ index_check_primary_key(Relation heapRel, continue; atttuple = SearchSysCache2(ATTNUM, - ObjectIdGetDatum(RelationGetRelid(heapRel)), + ObjectIdGetDatum(RelationGetRelid(heapRel)), Int16GetDatum(attnum)); if (!HeapTupleIsValid(atttuple)) elog(ERROR, "cache lookup failed for attribute %d of relation %u", @@ -243,15 +243,14 @@ index_check_primary_key(Relation heapRel, } /* - * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child - * tables? Currently, since the PRIMARY KEY itself doesn't cascade, - * we don't cascade the notnull constraint(s) either; but this is - * pretty debatable. + * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child tables? + * Currently, since the PRIMARY KEY itself doesn't cascade, we don't + * cascade the notnull constraint(s) either; but this is pretty debatable. * - * XXX: possible future improvement: when being called from ALTER - * TABLE, it would be more efficient to merge this with the outer - * ALTER TABLE, so as to avoid two scans. But that seems to - * complicate DefineIndex's API unduly. + * XXX: possible future improvement: when being called from ALTER TABLE, + * it would be more efficient to merge this with the outer ALTER TABLE, so + * as to avoid two scans. But that seems to complicate DefineIndex's API + * unduly. */ if (cmds) AlterTableInternal(RelationGetRelid(heapRel), cmds, false); @@ -788,8 +787,8 @@ index_create(Relation heapRelation, if (!OidIsValid(indexRelationId)) { /* - * Use binary-upgrade override for pg_class.oid/relfilenode, - * if supplied. + * Use binary-upgrade override for pg_class.oid/relfilenode, if + * supplied. */ if (OidIsValid(binary_upgrade_next_index_pg_class_oid)) { @@ -872,7 +871,7 @@ index_create(Relation heapRelation, * ---------------- */ UpdateIndexRelation(indexRelationId, heapRelationId, indexInfo, - collationObjectId, classObjectId, coloptions, isprimary, is_exclusion, + collationObjectId, classObjectId, coloptions, isprimary, is_exclusion, !deferrable, !concurrent); @@ -947,7 +946,7 @@ index_create(Relation heapRelation, /* * If there are no simply-referenced columns, give the index an - * auto dependency on the whole table. In most cases, this will + * auto dependency on the whole table. In most cases, this will * be redundant, but it might not be if the index expressions and * predicate contain no Vars or only whole-row Vars. */ @@ -1067,7 +1066,7 @@ index_create(Relation heapRelation, /* * Close the index; but we keep the lock that we acquired above until end - * of transaction. Closing the heap is caller's responsibility. + * of transaction. Closing the heap is caller's responsibility. */ index_close(indexRelation, NoLock); @@ -1176,8 +1175,8 @@ index_constraint_create(Relation heapRelation, /* * If the constraint is deferrable, create the deferred uniqueness - * checking trigger. (The trigger will be given an internal - * dependency on the constraint by CreateTrigger.) + * checking trigger. (The trigger will be given an internal dependency on + * the constraint by CreateTrigger.) */ if (deferrable) { @@ -1213,7 +1212,7 @@ index_constraint_create(Relation heapRelation, * have been so marked already, so no need to clear the flag in the other * case. * - * Note: this might better be done by callers. We do it here to avoid + * Note: this might better be done by callers. We do it here to avoid * exposing index_update_stats() globally, but that wouldn't be necessary * if relhaspkey went away. */ @@ -1235,10 +1234,10 @@ index_constraint_create(Relation heapRelation, */ if (update_pgindex && (mark_as_primary || deferrable)) { - Relation pg_index; - HeapTuple indexTuple; - Form_pg_index indexForm; - bool dirty = false; + Relation pg_index; + HeapTuple indexTuple; + Form_pg_index indexForm; + bool dirty = false; pg_index = heap_open(IndexRelationId, RowExclusiveLock); @@ -1303,8 +1302,8 @@ index_drop(Oid indexId) userIndexRelation = index_open(indexId, AccessExclusiveLock); /* - * There can no longer be anyone *else* touching the index, but we - * might still have open queries using it in our own session. + * There can no longer be anyone *else* touching the index, but we might + * still have open queries using it in our own session. */ CheckTableNotInUse(userIndexRelation, "DROP INDEX"); @@ -1739,7 +1738,8 @@ index_build(Relation heapRelation, */ if (heapRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED) { - RegProcedure ambuildempty = indexRelation->rd_am->ambuildempty; + RegProcedure ambuildempty = indexRelation->rd_am->ambuildempty; + RelationOpenSmgr(indexRelation); smgrcreate(indexRelation->rd_smgr, INIT_FORKNUM, false); OidFunctionCall1(ambuildempty, PointerGetDatum(indexRelation)); @@ -2410,7 +2410,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot) ivinfo.strategy = NULL; state.tuplesort = tuplesort_begin_datum(TIDOID, - TIDLessOperator, InvalidOid, false, + TIDLessOperator, InvalidOid, false, maintenance_work_mem, false); state.htups = state.itups = state.tups_inserted = 0; @@ -2834,7 +2834,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks) * use catalog indexes while collecting the list.) * * To avoid deadlocks, VACUUM FULL or CLUSTER on a system catalog must omit the - * REINDEX_CHECK_CONSTRAINTS flag. REINDEX should be used to rebuild an index + * REINDEX_CHECK_CONSTRAINTS flag. REINDEX should be used to rebuild an index * if constraint inconsistency is suspected. For optimal performance, other * callers should include the flag only after transforming the data in a manner * that risks a change in constraint validity. diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 734581e485..f8fd827693 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -2446,10 +2446,10 @@ CheckSetNamespace(Oid oldNspOid, Oid nspOid, Oid classid, Oid objid) if (oldNspOid == nspOid) ereport(ERROR, (classid == RelationRelationId ? - errcode(ERRCODE_DUPLICATE_TABLE) : + errcode(ERRCODE_DUPLICATE_TABLE) : classid == ProcedureRelationId ? - errcode(ERRCODE_DUPLICATE_FUNCTION) : - errcode(ERRCODE_DUPLICATE_OBJECT), + errcode(ERRCODE_DUPLICATE_FUNCTION) : + errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("%s is already in schema \"%s\"", getObjectDescriptionOids(classid, objid), get_namespace_name(nspOid)))); @@ -2458,7 +2458,7 @@ CheckSetNamespace(Oid oldNspOid, Oid nspOid, Oid classid, Oid objid) if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move objects into or out of temporary schemas"))); + errmsg("cannot move objects into or out of temporary schemas"))); /* same for TOAST schema */ if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE) @@ -2525,7 +2525,7 @@ QualifiedNameGetCreationNamespace(List *names, char **objname_p) /* * get_namespace_oid - given a namespace name, look up the OID * - * If missing_ok is false, throw an error if namespace name not found. If + * If missing_ok is false, throw an error if namespace name not found. If * true, just return InvalidOid. */ Oid @@ -2535,9 +2535,9 @@ get_namespace_oid(const char *nspname, bool missing_ok) oid = GetSysCacheOid1(NAMESPACENAME, CStringGetDatum(nspname)); if (!OidIsValid(oid) && !missing_ok) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("schema \"%s\" does not exist", nspname))); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_SCHEMA), + errmsg("schema \"%s\" does not exist", nspname))); return oid; } @@ -2727,7 +2727,7 @@ GetTempNamespaceBackendId(Oid namespaceId) /* See if the namespace name starts with "pg_temp_" or "pg_toast_temp_" */ nspname = get_namespace_name(namespaceId); if (!nspname) - return InvalidBackendId; /* no such namespace? */ + return InvalidBackendId; /* no such namespace? */ if (strncmp(nspname, "pg_temp_", 8) == 0) result = atoi(nspname + 8); else if (strncmp(nspname, "pg_toast_temp_", 14) == 0) @@ -2798,8 +2798,8 @@ GetOverrideSearchPath(MemoryContext context) * * It's possible that newpath->useTemp is set but there is no longer any * active temp namespace, if the path was saved during a transaction that - * created a temp namespace and was later rolled back. In that case we just - * ignore useTemp. A plausible alternative would be to create a new temp + * created a temp namespace and was later rolled back. In that case we just + * ignore useTemp. A plausible alternative would be to create a new temp * namespace, but for existing callers that's not necessary because an empty * temp namespace wouldn't affect their results anyway. * @@ -3522,7 +3522,7 @@ check_search_path(char **newval, void **extra, GucSource source) if (source == PGC_S_TEST) ereport(NOTICE, (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("schema \"%s\" does not exist", curname))); + errmsg("schema \"%s\" does not exist", curname))); else { GUC_check_errdetail("schema \"%s\" does not exist", curname); diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 0d21d310a6..bf25091582 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -78,7 +78,7 @@ static Relation get_relation_by_qualified_name(ObjectType objtype, static ObjectAddress get_object_address_relobject(ObjectType objtype, List *objname, Relation *relp); static ObjectAddress get_object_address_attribute(ObjectType objtype, - List *objname, Relation *relp, LOCKMODE lockmode); + List *objname, Relation *relp, LOCKMODE lockmode); static ObjectAddress get_object_address_opcf(ObjectType objtype, List *objname, List *objargs); static bool object_exists(ObjectAddress address); @@ -108,8 +108,8 @@ ObjectAddress get_object_address(ObjectType objtype, List *objname, List *objargs, Relation *relp, LOCKMODE lockmode) { - ObjectAddress address; - Relation relation = NULL; + ObjectAddress address; + Relation relation = NULL; /* Some kind of lock must be taken. */ Assert(lockmode != NoLock); @@ -130,7 +130,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, case OBJECT_COLUMN: address = get_object_address_attribute(objtype, objname, &relation, - lockmode); + lockmode); break; case OBJECT_RULE: case OBJECT_TRIGGER: @@ -201,10 +201,10 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, break; case OBJECT_CAST: { - TypeName *sourcetype = (TypeName *) linitial(objname); - TypeName *targettype = (TypeName *) linitial(objargs); - Oid sourcetypeid = typenameTypeId(NULL, sourcetype); - Oid targettypeid = typenameTypeId(NULL, targettype); + TypeName *sourcetype = (TypeName *) linitial(objname); + TypeName *targettype = (TypeName *) linitial(objargs); + Oid sourcetypeid = typenameTypeId(NULL, sourcetype); + Oid targettypeid = typenameTypeId(NULL, targettype); address.classId = CastRelationId; address.objectId = @@ -242,8 +242,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, /* * If we're dealing with a relation or attribute, then the relation is - * already locked. If we're dealing with any other type of object, we need - * to lock it and then verify that it still exists. + * already locked. If we're dealing with any other type of object, we + * need to lock it and then verify that it still exists. */ if (address.classId != RelationRelationId) { @@ -308,7 +308,7 @@ get_object_address_unqualified(ObjectType objtype, List *qualname) break; default: elog(ERROR, "unrecognized objtype: %d", (int) objtype); - msg = NULL; /* placate compiler */ + msg = NULL; /* placate compiler */ } ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -379,7 +379,7 @@ static Relation get_relation_by_qualified_name(ObjectType objtype, List *objname, LOCKMODE lockmode) { - Relation relation; + Relation relation; relation = relation_openrv(makeRangeVarFromNameList(objname), lockmode); switch (objtype) @@ -449,7 +449,7 @@ get_object_address_relobject(ObjectType objtype, List *objname, Relation *relp) nnames = list_length(objname); if (nnames < 2) { - Oid reloid; + Oid reloid; /* * For compatibility with very old releases, we sometimes allow users @@ -514,7 +514,7 @@ static ObjectAddress get_object_address_attribute(ObjectType objtype, List *objname, Relation *relp, LOCKMODE lockmode) { - ObjectAddress address; + ObjectAddress address; List *relname; Oid reloid; Relation relation; @@ -534,7 +534,7 @@ get_object_address_attribute(ObjectType objtype, List *objname, ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" of relation \"%s\" does not exist", - attname, RelationGetRelationName(relation)))); + attname, RelationGetRelationName(relation)))); *relp = relation; return address; @@ -584,8 +584,8 @@ object_exists(ObjectAddress address) int cache = -1; Oid indexoid = InvalidOid; Relation rel; - ScanKeyData skey[1]; - SysScanDesc sd; + ScanKeyData skey[1]; + SysScanDesc sd; bool found; /* Sub-objects require special treatment. */ @@ -609,9 +609,9 @@ object_exists(ObjectAddress address) /* * For object types that have a relevant syscache, we use it; for - * everything else, we'll have to do an index-scan. This switch - * sets either the cache to be used for the syscache lookup, or the - * index to be used for the index scan. + * everything else, we'll have to do an index-scan. This switch sets + * either the cache to be used for the syscache lookup, or the index to be + * used for the index scan. */ switch (address.classId) { @@ -664,6 +664,7 @@ object_exists(ObjectAddress address) cache = OPFAMILYOID; break; case LargeObjectRelationId: + /* * Weird backward compatibility hack: ObjectAddress notation uses * LargeObjectRelationId for large objects, but since PostgreSQL @@ -816,15 +817,15 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address, ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be owner of large object %u", - address.objectId))); + address.objectId))); break; case OBJECT_CAST: { /* We can only check permissions on the source/target types */ - TypeName *sourcetype = (TypeName *) linitial(objname); - TypeName *targettype = (TypeName *) linitial(objargs); - Oid sourcetypeid = typenameTypeId(NULL, sourcetype); - Oid targettypeid = typenameTypeId(NULL, targettype); + TypeName *sourcetype = (TypeName *) linitial(objname); + TypeName *targettype = (TypeName *) linitial(objargs); + Oid sourcetypeid = typenameTypeId(NULL, sourcetype); + Oid targettypeid = typenameTypeId(NULL, targettype); if (!pg_type_ownercheck(sourcetypeid, roleid) && !pg_type_ownercheck(targettypeid, roleid)) @@ -851,6 +852,7 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address, NameListToString(objname)); break; case OBJECT_ROLE: + /* * We treat roles as being "owned" by those with CREATEROLE priv, * except that superusers are only owned by superusers. diff --git a/src/backend/catalog/pg_collation.c b/src/backend/catalog/pg_collation.c index 708078463b..5b92a4c0c2 100644 --- a/src/backend/catalog/pg_collation.c +++ b/src/backend/catalog/pg_collation.c @@ -46,7 +46,9 @@ CollationCreate(const char *collname, Oid collnamespace, HeapTuple tup; Datum values[Natts_pg_collation]; bool nulls[Natts_pg_collation]; - NameData name_name, name_collate, name_ctype; + NameData name_name, + name_collate, + name_ctype; Oid oid; ObjectAddress myself, referenced; @@ -60,9 +62,9 @@ CollationCreate(const char *collname, Oid collnamespace, /* * Make sure there is no existing collation of same name & encoding. * - * This would be caught by the unique index anyway; we're just giving - * a friendlier error message. The unique index provides a backstop - * against race conditions. + * This would be caught by the unique index anyway; we're just giving a + * friendlier error message. The unique index provides a backstop against + * race conditions. */ if (SearchSysCacheExists3(COLLNAMEENCNSP, PointerGetDatum(collname), @@ -74,9 +76,9 @@ CollationCreate(const char *collname, Oid collnamespace, collname, pg_encoding_to_char(collencoding)))); /* - * Also forbid matching an any-encoding entry. This test of course is - * not backed up by the unique index, but it's not a problem since we - * don't support adding any-encoding entries after initdb. + * Also forbid matching an any-encoding entry. This test of course is not + * backed up by the unique index, but it's not a problem since we don't + * support adding any-encoding entries after initdb. */ if (SearchSysCacheExists3(COLLNAMEENCNSP, PointerGetDatum(collname), diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index 6619eed431..69979942af 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -799,10 +799,10 @@ get_constraint_oid(Oid relid, const char *conname, bool missing_ok) * the rel of interest are Vars with the indicated varno/varlevelsup. * * Currently we only check to see if the rel has a primary key that is a - * subset of the grouping_columns. We could also use plain unique constraints + * subset of the grouping_columns. We could also use plain unique constraints * if all their columns are known not null, but there's a problem: we need * to be able to represent the not-null-ness as part of the constraints added - * to *constraintDeps. FIXME whenever not-null constraints get represented + * to *constraintDeps. FIXME whenever not-null constraints get represented * in pg_constraint. */ bool @@ -852,7 +852,7 @@ check_functional_grouping(Oid relid, if (isNull) elog(ERROR, "null conkey for constraint %u", HeapTupleGetOid(tuple)); - arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */ + arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */ numkeys = ARR_DIMS(arr)[0]; if (ARR_NDIM(arr) != 1 || numkeys < 0 || diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index 2bb7bb3d5f..67aad86d4e 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -126,7 +126,7 @@ recordMultipleDependencies(const ObjectAddress *depender, /* * If we are executing a CREATE EXTENSION operation, mark the given object - * as being a member of the extension. Otherwise, do nothing. + * as being a member of the extension. Otherwise, do nothing. * * This must be called during creation of any user-definable object type * that could be a member of an extension. @@ -136,7 +136,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object) { if (creating_extension) { - ObjectAddress extension; + ObjectAddress extension; extension.classId = ExtensionRelationId; extension.objectId = CurrentExtensionObject; @@ -155,7 +155,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object) * (possibly with some differences from before). * * If skipExtensionDeps is true, we do not delete any dependencies that - * show that the given object is a member of an extension. This avoids + * show that the given object is a member of an extension. This avoids * needing a lot of extra logic to fetch and recreate that dependency. */ long @@ -185,7 +185,7 @@ deleteDependencyRecordsFor(Oid classId, Oid objectId, while (HeapTupleIsValid(tup = systable_getnext(scan))) { if (skipExtensionDeps && - ((Form_pg_depend) GETSTRUCT(tup))->deptype == DEPENDENCY_EXTENSION) + ((Form_pg_depend) GETSTRUCT(tup))->deptype == DEPENDENCY_EXTENSION) continue; simple_heap_delete(depRel, &tup->t_self); diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c index e87a9311bd..08d8aa13f3 100644 --- a/src/backend/catalog/pg_enum.c +++ b/src/backend/catalog/pg_enum.c @@ -29,7 +29,7 @@ /* Potentially set by contrib/pg_upgrade_support functions */ -Oid binary_upgrade_next_pg_enum_oid = InvalidOid; +Oid binary_upgrade_next_pg_enum_oid = InvalidOid; static void RenumberEnumType(Relation pg_enum, HeapTuple *existing, int nelems); static int oid_cmp(const void *p1, const void *p2); @@ -58,9 +58,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals) num_elems = list_length(vals); /* - * We do not bother to check the list of values for duplicates --- if - * you have any, you'll get a less-than-friendly unique-index violation. - * It is probably not worth trying harder. + * We do not bother to check the list of values for duplicates --- if you + * have any, you'll get a less-than-friendly unique-index violation. It is + * probably not worth trying harder. */ pg_enum = heap_open(EnumRelationId, RowExclusiveLock); @@ -69,10 +69,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals) * Allocate OIDs for the enum's members. * * While this method does not absolutely guarantee that we generate no - * duplicate OIDs (since we haven't entered each oid into the table - * before allocating the next), trouble could only occur if the OID - * counter wraps all the way around before we finish. Which seems - * unlikely. + * duplicate OIDs (since we haven't entered each oid into the table before + * allocating the next), trouble could only occur if the OID counter wraps + * all the way around before we finish. Which seems unlikely. */ oids = (Oid *) palloc(num_elems * sizeof(Oid)); @@ -83,9 +82,10 @@ EnumValuesCreate(Oid enumTypeOid, List *vals) * tells the comparison functions the OIDs are in the correct sort * order and can be compared directly. */ - Oid new_oid; + Oid new_oid; - do { + do + { new_oid = GetNewOid(pg_enum); } while (new_oid & 1); oids[elemno] = new_oid; @@ -202,9 +202,9 @@ AddEnumLabel(Oid enumTypeOid, /* * Acquire a lock on the enum type, which we won't release until commit. * This ensures that two backends aren't concurrently modifying the same - * enum type. Without that, we couldn't be sure to get a consistent - * view of the enum members via the syscache. Note that this does not - * block other backends from inspecting the type; see comments for + * enum type. Without that, we couldn't be sure to get a consistent view + * of the enum members via the syscache. Note that this does not block + * other backends from inspecting the type; see comments for * RenumberEnumType. */ LockDatabaseObject(TypeRelationId, enumTypeOid, 0, ExclusiveLock); @@ -217,7 +217,7 @@ restart: /* Get the list of existing members of the enum */ list = SearchSysCacheList1(ENUMTYPOIDNAME, ObjectIdGetDatum(enumTypeOid)); - nelems = list->n_members; + nelems = list->n_members; /* Sort the existing members by enumsortorder */ existing = (HeapTuple *) palloc(nelems * sizeof(HeapTuple)); @@ -229,8 +229,8 @@ restart: if (neighbor == NULL) { /* - * Put the new label at the end of the list. - * No change to existing tuples is required. + * Put the new label at the end of the list. No change to existing + * tuples is required. */ if (nelems > 0) { @@ -244,10 +244,10 @@ restart: else { /* BEFORE or AFTER was specified */ - int nbr_index; - int other_nbr_index; - Form_pg_enum nbr_en; - Form_pg_enum other_nbr_en; + int nbr_index; + int other_nbr_index; + Form_pg_enum nbr_en; + Form_pg_enum other_nbr_en; /* Locate the neighbor element */ for (nbr_index = 0; nbr_index < nelems; nbr_index++) @@ -265,14 +265,14 @@ restart: nbr_en = (Form_pg_enum) GETSTRUCT(existing[nbr_index]); /* - * Attempt to assign an appropriate enumsortorder value: one less - * than the smallest member, one more than the largest member, - * or halfway between two existing members. + * Attempt to assign an appropriate enumsortorder value: one less than + * the smallest member, one more than the largest member, or halfway + * between two existing members. * * In the "halfway" case, because of the finite precision of float4, - * we might compute a value that's actually equal to one or the - * other of its neighbors. In that case we renumber the existing - * members and try again. + * we might compute a value that's actually equal to one or the other + * of its neighbors. In that case we renumber the existing members + * and try again. */ if (newValIsAfter) other_nbr_index = nbr_index + 1; @@ -291,10 +291,10 @@ restart: /* * On some machines, newelemorder may be in a register that's - * wider than float4. We need to force it to be rounded to - * float4 precision before making the following comparisons, - * or we'll get wrong results. (Such behavior violates the C - * standard, but fixing the compilers is out of our reach.) + * wider than float4. We need to force it to be rounded to float4 + * precision before making the following comparisons, or we'll get + * wrong results. (Such behavior violates the C standard, but + * fixing the compilers is out of our reach.) */ newelemorder = DatumGetFloat4(Float4GetDatum(newelemorder)); @@ -314,9 +314,9 @@ restart: if (OidIsValid(binary_upgrade_next_pg_enum_oid)) { /* - * Use binary-upgrade override for pg_enum.oid, if supplied. - * During binary upgrade, all pg_enum.oid's are set this way - * so they are guaranteed to be consistent. + * Use binary-upgrade override for pg_enum.oid, if supplied. During + * binary upgrade, all pg_enum.oid's are set this way so they are + * guaranteed to be consistent. */ if (neighbor != NULL) ereport(ERROR, @@ -337,7 +337,7 @@ restart: */ for (;;) { - bool sorts_ok; + bool sorts_ok; /* Get a new OID (different from all existing pg_enum tuples) */ newOid = GetNewOid(pg_enum); @@ -345,8 +345,8 @@ restart: /* * Detect whether it sorts correctly relative to existing * even-numbered labels of the enum. We can ignore existing - * labels with odd Oids, since a comparison involving one of - * those will not take the fast path anyway. + * labels with odd Oids, since a comparison involving one of those + * will not take the fast path anyway. */ sorts_ok = true; for (i = 0; i < nelems; i++) @@ -385,9 +385,9 @@ restart: break; /* - * If it's odd, and sorts OK, loop back to get another OID - * and try again. Probably, the next available even OID - * will sort correctly too, so it's worth trying. + * If it's odd, and sorts OK, loop back to get another OID and + * try again. Probably, the next available even OID will sort + * correctly too, so it's worth trying. */ } else @@ -435,7 +435,7 @@ restart: * We avoid doing this unless absolutely necessary; in most installations * it will never happen. The reason is that updating existing pg_enum * entries creates hazards for other backends that are concurrently reading - * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could + * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could * see both old and new versions of an updated row as valid, or neither of * them, if the commit happens between scanning the two versions. It's * also quite likely for a concurrent scan to see an inconsistent set of @@ -510,10 +510,10 @@ oid_cmp(const void *p1, const void *p2) static int sort_order_cmp(const void *p1, const void *p2) { - HeapTuple v1 = *((const HeapTuple *) p1); - HeapTuple v2 = *((const HeapTuple *) p2); - Form_pg_enum en1 = (Form_pg_enum) GETSTRUCT(v1); - Form_pg_enum en2 = (Form_pg_enum) GETSTRUCT(v2); + HeapTuple v1 = *((const HeapTuple *) p1); + HeapTuple v2 = *((const HeapTuple *) p2); + Form_pg_enum en1 = (Form_pg_enum) GETSTRUCT(v1); + Form_pg_enum en2 = (Form_pg_enum) GETSTRUCT(v2); if (en1->enumsortorder < en2->enumsortorder) return -1; diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index 6138165cc3..47a8ff4d98 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -842,8 +842,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) if (!haspolyarg) { /* - * OK to do full precheck: analyze and rewrite the queries, - * then verify the result type. + * OK to do full precheck: analyze and rewrite the queries, then + * verify the result type. */ SQLFunctionParseInfoPtr pinfo; @@ -858,7 +858,7 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) querytree_sublist = pg_analyze_and_rewrite_params(parsetree, prosrc, - (ParserSetupHook) sql_fn_parser_setup, + (ParserSetupHook) sql_fn_parser_setup, pinfo); querytree_list = list_concat(querytree_list, querytree_sublist); diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index 06301c075b..9e35e73f9c 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -115,7 +115,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId) values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */ values[i++] = Int32GetDatum(-1); /* typtypmod */ values[i++] = Int32GetDatum(0); /* typndims */ - values[i++] = ObjectIdGetDatum(InvalidOid); /* typcollation */ + values[i++] = ObjectIdGetDatum(InvalidOid); /* typcollation */ nulls[i++] = true; /* typdefaultbin */ nulls[i++] = true; /* typdefault */ @@ -352,7 +352,7 @@ TypeCreate(Oid newTypeOid, values[i++] = ObjectIdGetDatum(baseType); /* typbasetype */ values[i++] = Int32GetDatum(typeMod); /* typtypmod */ values[i++] = Int32GetDatum(typNDims); /* typndims */ - values[i++] = ObjectIdGetDatum(typeCollation); /* typcollation */ + values[i++] = ObjectIdGetDatum(typeCollation); /* typcollation */ /* * initialize the default binary value for this type. Check for nulls of diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 221f9f5c12..57987be2c0 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -119,7 +119,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence) break; default: elog(ERROR, "invalid relpersistence: %c", relpersistence); - return; /* placate compiler */ + return; /* placate compiler */ } srel = smgropen(rnode, backend); @@ -379,7 +379,7 @@ smgrDoPendingDeletes(bool isCommit) * *ptr is set to point to a freshly-palloc'd array of RelFileNodes. * If there are no relations to be deleted, *ptr is set to NULL. * - * Only non-temporary relations are included in the returned list. This is OK + * Only non-temporary relations are included in the returned list. This is OK * because the list is used only in contexts where temporary relations don't * matter: we're either writing to the two-phase state file (and transactions * that have touched temp tables can't be prepared) or we're writing to xlog diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c index 5d5496df98..452ca9bef0 100644 --- a/src/backend/catalog/toasting.c +++ b/src/backend/catalog/toasting.c @@ -279,7 +279,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptio list_make2("chunk_id", "chunk_seq"), BTREE_AM_OID, rel->rd_rel->reltablespace, - collationObjectId, classObjectId, coloptions, (Datum) 0, + collationObjectId, classObjectId, coloptions, (Datum) 0, true, false, false, false, true, false, false); diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 99fdd7dba3..215e21cae0 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -282,26 +282,26 @@ AlterObjectNamespace_oid(Oid classId, Oid objid, Oid nspOid) switch (getObjectClass(&dep)) { case OCLASS_CLASS: - { - Relation rel; - Relation classRel; + { + Relation rel; + Relation classRel; - rel = relation_open(objid, AccessExclusiveLock); - oldNspOid = RelationGetNamespace(rel); + rel = relation_open(objid, AccessExclusiveLock); + oldNspOid = RelationGetNamespace(rel); - classRel = heap_open(RelationRelationId, RowExclusiveLock); + classRel = heap_open(RelationRelationId, RowExclusiveLock); - AlterRelationNamespaceInternal(classRel, - objid, - oldNspOid, - nspOid, - true); + AlterRelationNamespaceInternal(classRel, + objid, + oldNspOid, + nspOid, + true); - heap_close(classRel, RowExclusiveLock); + heap_close(classRel, RowExclusiveLock); - relation_close(rel, NoLock); - break; - } + relation_close(rel, NoLock); + break; + } case OCLASS_PROC: oldNspOid = AlterFunctionNamespace_oid(objid, nspOid); @@ -386,9 +386,11 @@ AlterObjectNamespace(Relation rel, int oidCacheId, int nameCacheId, { Oid classId = RelationGetRelid(rel); Oid oldNspOid; - Datum name, namespace; - bool isnull; - HeapTuple tup, newtup; + Datum name, + namespace; + bool isnull; + HeapTuple tup, + newtup; Datum *values; bool *nulls; bool *replaces; @@ -410,7 +412,7 @@ AlterObjectNamespace(Relation rel, int oidCacheId, int nameCacheId, /* Permission checks ... superusers can always do it */ if (!superuser()) { - Datum owner; + Datum owner; Oid ownerId; AclResult aclresult; diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 774bb04471..dde301b89a 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -95,7 +95,7 @@ static void compute_index_stats(Relation onerel, double totalrows, HeapTuple *rows, int numrows, MemoryContext col_context); static VacAttrStats *examine_attribute(Relation onerel, int attnum, - Node *index_expr); + Node *index_expr); static int acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, double *totalrows, double *totaldeadrows); static double random_fract(void); @@ -160,8 +160,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0) ereport(LOG, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), - errmsg("skipping analyze of \"%s\" --- lock not available", - vacstmt->relation->relname))); + errmsg("skipping analyze of \"%s\" --- lock not available", + vacstmt->relation->relname))); } if (!onerel) return; @@ -853,10 +853,10 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr) /* * When analyzing an expression index, believe the expression tree's type * not the column datatype --- the latter might be the opckeytype storage - * type of the opclass, which is not interesting for our purposes. (Note: + * type of the opclass, which is not interesting for our purposes. (Note: * if we did anything with non-expression index columns, we'd need to * figure out where to get the correct type info from, but for now that's - * not a problem.) It's not clear whether anyone will care about the + * not a problem.) It's not clear whether anyone will care about the * typmod, but we store that too just in case. */ if (index_expr) diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 4c4f356e79..2cc2aaa8f6 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -718,7 +718,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, TransactionId OldestXmin; TransactionId FreezeXid; RewriteState rwstate; - bool use_sort; + bool use_sort; Tuplesortstate *tuplesort; double num_tuples = 0, tups_vacuumed = 0, @@ -813,11 +813,11 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, rwstate = begin_heap_rewrite(NewHeap, OldestXmin, FreezeXid, use_wal); /* - * Decide whether to use an indexscan or seqscan-and-optional-sort to - * scan the OldHeap. We know how to use a sort to duplicate the ordering - * of a btree index, and will use seqscan-and-sort for that case if the - * planner tells us it's cheaper. Otherwise, always indexscan if an - * index is provided, else plain seqscan. + * Decide whether to use an indexscan or seqscan-and-optional-sort to scan + * the OldHeap. We know how to use a sort to duplicate the ordering of a + * btree index, and will use seqscan-and-sort for that case if the planner + * tells us it's cheaper. Otherwise, always indexscan if an index is + * provided, else plain seqscan. */ if (OldIndex != NULL && OldIndex->rd_rel->relam == BTREE_AM_OID) use_sort = plan_cluster_use_sort(OIDOldHeap, OIDOldIndex); @@ -869,8 +869,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, /* * Scan through the OldHeap, either in OldIndex order or sequentially; * copy each tuple into the NewHeap, or transiently to the tuplesort - * module. Note that we don't bother sorting dead tuples (they won't - * get to the new table anyway). + * module. Note that we don't bother sorting dead tuples (they won't get + * to the new table anyway). */ for (;;) { @@ -984,8 +984,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, heap_endscan(heapScan); /* - * In scan-and-sort mode, complete the sort, then read out all live - * tuples from the tuplestore and write them to the new relation. + * In scan-and-sort mode, complete the sort, then read out all live tuples + * from the tuplestore and write them to the new relation. */ if (tuplesort != NULL) { @@ -1554,7 +1554,7 @@ reform_and_rewrite_tuple(HeapTuple tuple, bool newRelHasOids, RewriteState rwstate) { HeapTuple copiedTuple; - int i; + int i; heap_deform_tuple(tuple, oldTupDesc, values, isnull); diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c index 2a6938fd04..7f8a108374 100644 --- a/src/backend/commands/collationcmds.c +++ b/src/backend/commands/collationcmds.c @@ -34,7 +34,7 @@ #include "utils/syscache.h" static void AlterCollationOwner_internal(Relation rel, Oid collationOid, - Oid newOwnerId); + Oid newOwnerId); /* * CREATE COLLATION @@ -46,10 +46,10 @@ DefineCollation(List *names, List *parameters) Oid collNamespace; AclResult aclresult; ListCell *pl; - DefElem *fromEl = NULL; - DefElem *localeEl = NULL; - DefElem *lccollateEl = NULL; - DefElem *lcctypeEl = NULL; + DefElem *fromEl = NULL; + DefElem *localeEl = NULL; + DefElem *lccollateEl = NULL; + DefElem *lcctypeEl = NULL; char *collcollate = NULL; char *collctype = NULL; Oid newoid; @@ -63,7 +63,7 @@ DefineCollation(List *names, List *parameters) foreach(pl, parameters) { - DefElem *defel = (DefElem *) lfirst(pl); + DefElem *defel = (DefElem *) lfirst(pl); DefElem **defelp; if (pg_strcasecmp(defel->defname, "from") == 0) @@ -97,7 +97,7 @@ DefineCollation(List *names, List *parameters) Oid collid; HeapTuple tp; - collid = get_collation_oid(defGetQualifiedName(fromEl), false); + collid = get_collation_oid(defGetQualifiedName(fromEl), false); tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for collation %u", collid); @@ -123,7 +123,7 @@ DefineCollation(List *names, List *parameters) if (!collcollate) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("parameter \"lc_collate\" parameter must be specified"))); + errmsg("parameter \"lc_collate\" parameter must be specified"))); if (!collctype) ereport(ERROR, @@ -391,7 +391,7 @@ AlterCollationNamespace(List *name, const char *newschema) Oid AlterCollationNamespace_oid(Oid collOid, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; char *collation_name; diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c index 3fbeefa018..d09bef0682 100644 --- a/src/backend/commands/comment.c +++ b/src/backend/commands/comment.c @@ -37,8 +37,8 @@ void CommentObject(CommentStmt *stmt) { - ObjectAddress address; - Relation relation; + ObjectAddress address; + Relation relation; /* * When loading a dump, we may see a COMMENT ON DATABASE for the old name @@ -46,12 +46,13 @@ CommentObject(CommentStmt *stmt) * (which is really pg_restore's fault, but for now we will work around * the problem here). Consensus is that the best fix is to treat wrong * database name as a WARNING not an ERROR; hence, the following special - * case. (If the length of stmt->objname is not 1, get_object_address will - * throw an error below; that's OK.) + * case. (If the length of stmt->objname is not 1, get_object_address + * will throw an error below; that's OK.) */ if (stmt->objtype == OBJECT_DATABASE && list_length(stmt->objname) == 1) { - char *database = strVal(linitial(stmt->objname)); + char *database = strVal(linitial(stmt->objname)); + if (!OidIsValid(get_database_oid(database, true))) { ereport(WARNING, @@ -62,10 +63,10 @@ CommentObject(CommentStmt *stmt) } /* - * Translate the parser representation that identifies this object into - * an ObjectAddress. get_object_address() will throw an error if the - * object does not exist, and will also acquire a lock on the target - * to guard against concurrent DROP operations. + * Translate the parser representation that identifies this object into an + * ObjectAddress. get_object_address() will throw an error if the object + * does not exist, and will also acquire a lock on the target to guard + * against concurrent DROP operations. */ address = get_object_address(stmt->objtype, stmt->objname, stmt->objargs, &relation, ShareUpdateExclusiveLock); @@ -78,6 +79,7 @@ CommentObject(CommentStmt *stmt) switch (stmt->objtype) { case OBJECT_COLUMN: + /* * Allow comments only on columns of tables, views, composite * types, and foreign tables (which are the only relkinds for diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c index b5e4420ca8..2c1c6da900 100644 --- a/src/backend/commands/conversioncmds.c +++ b/src/backend/commands/conversioncmds.c @@ -335,7 +335,8 @@ AlterConversionOwner_internal(Relation rel, Oid conversionOid, Oid newOwnerId) void AlterConversionNamespace(List *name, const char *newschema) { - Oid convOid, nspOid; + Oid convOid, + nspOid; Relation rel; rel = heap_open(ConversionRelationId, RowExclusiveLock); @@ -361,7 +362,7 @@ AlterConversionNamespace(List *name, const char *newschema) Oid AlterConversionNamespace_oid(Oid convOid, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; rel = heap_open(ConversionRelationId, RowExclusiveLock); diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 3af0b09719..57429035e8 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -115,7 +115,7 @@ typedef struct CopyStateData char *quote; /* CSV quote char (must be 1 byte) */ char *escape; /* CSV escape char (must be 1 byte) */ List *force_quote; /* list of column names */ - bool force_quote_all; /* FORCE QUOTE *? */ + bool force_quote_all; /* FORCE QUOTE *? */ bool *force_quote_flags; /* per-column CSV FQ flags */ List *force_notnull; /* list of column names */ bool *force_notnull_flags; /* per-column CSV FNN flags */ @@ -161,8 +161,8 @@ typedef struct CopyStateData /* field raw data pointers found by COPY FROM */ - int max_fields; - char ** raw_fields; + int max_fields; + char **raw_fields; /* * Similarly, line_buf holds the whole input line being processed. The @@ -266,10 +266,10 @@ static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0"; /* non-export function prototypes */ static CopyState BeginCopy(bool is_from, Relation rel, Node *raw_query, - const char *queryString, List *attnamelist, List *options); + const char *queryString, List *attnamelist, List *options); static void EndCopy(CopyState cstate); static CopyState BeginCopyTo(Relation rel, Node *query, const char *queryString, - const char *filename, List *attnamelist, List *options); + const char *filename, List *attnamelist, List *options); static void EndCopyTo(CopyState cstate); static uint64 DoCopyTo(CopyState cstate); static uint64 CopyTo(CopyState cstate); @@ -278,8 +278,8 @@ static void CopyOneRowTo(CopyState cstate, Oid tupleOid, static uint64 CopyFrom(CopyState cstate); static bool CopyReadLine(CopyState cstate); static bool CopyReadLineText(CopyState cstate); -static int CopyReadAttributesText(CopyState cstate); -static int CopyReadAttributesCSV(CopyState cstate); +static int CopyReadAttributesText(CopyState cstate); +static int CopyReadAttributesCSV(CopyState cstate); static Datum CopyReadBinaryAttribute(CopyState cstate, int column_no, FmgrInfo *flinfo, Oid typioparam, int32 typmod, @@ -748,17 +748,17 @@ DoCopy(const CopyStmt *stmt, const char *queryString) if (stmt->relation) { - TupleDesc tupDesc; - AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT); - RangeTblEntry *rte; - List *attnums; - ListCell *cur; + TupleDesc tupDesc; + AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT); + RangeTblEntry *rte; + List *attnums; + ListCell *cur; Assert(!stmt->query); /* Open and lock the relation, using the appropriate lock type. */ rel = heap_openrv(stmt->relation, - (is_from ? RowExclusiveLock : AccessShareLock)); + (is_from ? RowExclusiveLock : AccessShareLock)); rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; @@ -770,8 +770,8 @@ DoCopy(const CopyStmt *stmt, const char *queryString) attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist); foreach(cur, attnums) { - int attno = lfirst_int(cur) - - FirstLowInvalidHeapAttributeNumber; + int attno = lfirst_int(cur) - + FirstLowInvalidHeapAttributeNumber; if (is_from) rte->modifiedCols = bms_add_member(rte->modifiedCols, attno); @@ -1136,8 +1136,8 @@ BeginCopy(bool is_from, cstate = (CopyStateData *) palloc0(sizeof(CopyStateData)); /* - * We allocate everything used by a cstate in a new memory context. - * This avoids memory leaks during repeated use of COPY in a query. + * We allocate everything used by a cstate in a new memory context. This + * avoids memory leaks during repeated use of COPY in a query. */ cstate->copycontext = AllocSetContextCreate(CurrentMemoryContext, "COPY", @@ -1300,9 +1300,9 @@ BeginCopy(bool is_from, cstate->file_encoding = pg_get_client_encoding(); /* - * Set up encoding conversion info. Even if the file and server - * encodings are the same, we must apply pg_any_to_server() to validate - * data in multibyte encodings. + * Set up encoding conversion info. Even if the file and server encodings + * are the same, we must apply pg_any_to_server() to validate data in + * multibyte encodings. */ cstate->need_transcoding = (cstate->file_encoding != GetDatabaseEncoding() || @@ -1552,8 +1552,8 @@ CopyTo(CopyState cstate) */ if (cstate->need_transcoding) cstate->null_print_client = pg_server_to_any(cstate->null_print, - cstate->null_print_len, - cstate->file_encoding); + cstate->null_print_len, + cstate->file_encoding); /* if a header has been requested send the line */ if (cstate->header_line) @@ -2001,9 +2001,9 @@ CopyFrom(CopyState cstate) { slot = ExecBRInsertTriggers(estate, resultRelInfo, slot); - if (slot == NULL) /* "do nothing" */ + if (slot == NULL) /* "do nothing" */ skip_tuple = true; - else /* trigger might have changed tuple */ + else /* trigger might have changed tuple */ tuple = ExecMaterializeSlot(slot); } @@ -2159,7 +2159,7 @@ BeginCopyFrom(Relation rel, { /* Initialize expressions in copycontext. */ defexprs[num_defaults] = ExecInitExpr( - expression_planner((Expr *) defexpr), NULL); + expression_planner((Expr *) defexpr), NULL); defmap[num_defaults] = attnum - 1; num_defaults++; } @@ -2255,7 +2255,7 @@ BeginCopyFrom(Relation rel, if (!cstate->binary) { AttrNumber attr_count = list_length(cstate->attnumlist); - int nfields = cstate->file_has_oids ? (attr_count + 1) : attr_count; + int nfields = cstate->file_has_oids ? (attr_count + 1) : attr_count; cstate->max_fields = nfields; cstate->raw_fields = (char **) palloc(nfields * sizeof(char *)); @@ -2291,7 +2291,7 @@ NextCopyFromRawFields(CopyState cstate, char ***fields, int *nfields) { cstate->cur_lineno++; if (CopyReadLine(cstate)) - return false; /* done */ + return false; /* done */ } cstate->cur_lineno++; @@ -2300,9 +2300,9 @@ NextCopyFromRawFields(CopyState cstate, char ***fields, int *nfields) done = CopyReadLine(cstate); /* - * EOF at start of line means we're done. If we see EOF after - * some characters, we act as though it was newline followed by - * EOF, ie, process the line and then exit loop on next iteration. + * EOF at start of line means we're done. If we see EOF after some + * characters, we act as though it was newline followed by EOF, ie, + * process the line and then exit loop on next iteration. */ if (done && cstate->line_buf.len == 0) return false; @@ -2341,7 +2341,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, FmgrInfo *in_functions = cstate->in_functions; Oid *typioparams = cstate->typioparams; int i; - int nfields; + int nfields; bool isnull; bool file_has_oids = cstate->file_has_oids; int *defmap = cstate->defmap; @@ -2456,18 +2456,18 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, if (fld_count == -1) { /* - * Received EOF marker. In a V3-protocol copy, wait for - * the protocol-level EOF, and complain if it doesn't come - * immediately. This ensures that we correctly handle - * CopyFail, if client chooses to send that now. + * Received EOF marker. In a V3-protocol copy, wait for the + * protocol-level EOF, and complain if it doesn't come + * immediately. This ensures that we correctly handle CopyFail, + * if client chooses to send that now. * - * Note that we MUST NOT try to read more data in an - * old-protocol copy, since there is no protocol-level EOF - * marker then. We could go either way for copy from file, - * but choose to throw error if there's data after the EOF - * marker, for consistency with the new-protocol case. + * Note that we MUST NOT try to read more data in an old-protocol + * copy, since there is no protocol-level EOF marker then. We + * could go either way for copy from file, but choose to throw + * error if there's data after the EOF marker, for consistency + * with the new-protocol case. */ - char dummy; + char dummy; if (cstate->copy_dest != COPY_OLD_FE && CopyGetData(cstate, &dummy, 1, 1) > 0) @@ -2485,14 +2485,14 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, if (file_has_oids) { - Oid loaded_oid; + Oid loaded_oid; cstate->cur_attname = "oid"; loaded_oid = DatumGetObjectId(CopyReadBinaryAttribute(cstate, 0, - &cstate->oid_in_function, - cstate->oid_typioparam, + &cstate->oid_in_function, + cstate->oid_typioparam, -1, &isnull)); if (isnull || loaded_oid == InvalidOid) @@ -2524,8 +2524,8 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, /* * Now compute and insert any defaults available for the columns not - * provided by the input data. Anything not processed here or above - * will remain NULL. + * provided by the input data. Anything not processed here or above will + * remain NULL. */ for (i = 0; i < num_defaults; i++) { @@ -3023,12 +3023,12 @@ GetDecimalFromHex(char hex) * performing de-escaping as needed. * * The input is in line_buf. We use attribute_buf to hold the result - * strings. cstate->raw_fields[k] is set to point to the k'th attribute - * string, or NULL when the input matches the null marker string. + * strings. cstate->raw_fields[k] is set to point to the k'th attribute + * string, or NULL when the input matches the null marker string. * This array is expanded as necessary. * - * (Note that the caller cannot check for nulls since the returned - * string would be the post-de-escaping equivalent, which may look + * (Note that the caller cannot check for nulls since the returned + * string would be the post-de-escaping equivalent, which may look * the same as some valid data string.) * * delim is the column delimiter string (must be just one byte for now). @@ -3090,8 +3090,8 @@ CopyReadAttributesText(CopyState cstate) if (fieldno >= cstate->max_fields) { cstate->max_fields *= 2; - cstate->raw_fields = - repalloc(cstate->raw_fields, cstate->max_fields*sizeof(char *)); + cstate->raw_fields = + repalloc(cstate->raw_fields, cstate->max_fields * sizeof(char *)); } /* Remember start of field on both input and output sides */ @@ -3307,8 +3307,8 @@ CopyReadAttributesCSV(CopyState cstate) if (fieldno >= cstate->max_fields) { cstate->max_fields *= 2; - cstate->raw_fields = - repalloc(cstate->raw_fields, cstate->max_fields*sizeof(char *)); + cstate->raw_fields = + repalloc(cstate->raw_fields, cstate->max_fields * sizeof(char *)); } /* Remember start of field on both input and output sides */ diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 87d9e545b4..f319eb539c 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -680,8 +680,8 @@ createdb(const CreatedbStmt *stmt) void check_encoding_locale_matches(int encoding, const char *collate, const char *ctype) { - int ctype_encoding = pg_get_encoding_from_locale(ctype, true); - int collate_encoding = pg_get_encoding_from_locale(collate, true); + int ctype_encoding = pg_get_encoding_from_locale(ctype, true); + int collate_encoding = pg_get_encoding_from_locale(collate, true); if (!(ctype_encoding == encoding || ctype_encoding == PG_SQL_ASCII || @@ -1849,10 +1849,10 @@ get_database_oid(const char *dbname, bool missing_ok) heap_close(pg_database, AccessShareLock); if (!OidIsValid(oid) && !missing_ok) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("database \"%s\" does not exist", - dbname))); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_DATABASE), + errmsg("database \"%s\" does not exist", + dbname))); return oid; } diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 1d9586f07d..7a361585bd 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -59,26 +59,26 @@ static void ExplainNode(PlanState *planstate, List *ancestors, const char *relationship, const char *plan_name, ExplainState *es); static void show_plan_tlist(PlanState *planstate, List *ancestors, - ExplainState *es); + ExplainState *es); static void show_expression(Node *node, const char *qlabel, PlanState *planstate, List *ancestors, bool useprefix, ExplainState *es); static void show_qual(List *qual, const char *qlabel, - PlanState *planstate, List *ancestors, - bool useprefix, ExplainState *es); + PlanState *planstate, List *ancestors, + bool useprefix, ExplainState *es); static void show_scan_qual(List *qual, const char *qlabel, - PlanState *planstate, List *ancestors, - ExplainState *es); + PlanState *planstate, List *ancestors, + ExplainState *es); static void show_upper_qual(List *qual, const char *qlabel, - PlanState *planstate, List *ancestors, - ExplainState *es); + PlanState *planstate, List *ancestors, + ExplainState *es); static void show_sort_keys(SortState *sortstate, List *ancestors, - ExplainState *es); + ExplainState *es); static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors, - ExplainState *es); + ExplainState *es); static void show_sort_keys_common(PlanState *planstate, - int nkeys, AttrNumber *keycols, - List *ancestors, ExplainState *es); + int nkeys, AttrNumber *keycols, + List *ancestors, ExplainState *es); static void show_sort_info(SortState *sortstate, ExplainState *es); static void show_hash_info(HashState *hashstate, ExplainState *es); static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es); @@ -89,7 +89,7 @@ static void ExplainTargetRel(Plan *plan, Index rti, ExplainState *es); static void ExplainMemberNodes(List *plans, PlanState **planstates, List *ancestors, ExplainState *es); static void ExplainSubPlans(List *plans, List *ancestors, - const char *relationship, ExplainState *es); + const char *relationship, ExplainState *es); static void ExplainProperty(const char *qlabel, const char *value, bool numeric, ExplainState *es); static void ExplainOpenGroup(const char *objtype, const char *labelname, @@ -1358,7 +1358,7 @@ show_scan_qual(List *qual, const char *qlabel, { bool useprefix; - useprefix = (IsA(planstate->plan, SubqueryScan) || es->verbose); + useprefix = (IsA(planstate->plan, SubqueryScan) ||es->verbose); show_qual(qual, qlabel, planstate, ancestors, useprefix, es); } diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 7c3e8107de..d848926ae5 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -56,8 +56,8 @@ /* Globally visible state variables */ -bool creating_extension = false; -Oid CurrentExtensionObject = InvalidOid; +bool creating_extension = false; +Oid CurrentExtensionObject = InvalidOid; /* * Internal data structure to hold the results of parsing a control file @@ -66,8 +66,8 @@ typedef struct ExtensionControlFile { char *name; /* name of the extension */ char *directory; /* directory for script files */ - char *default_version; /* default install target version, if any */ - char *module_pathname; /* string to substitute for MODULE_PATHNAME */ + char *default_version; /* default install target version, if any */ + char *module_pathname; /* string to substitute for MODULE_PATHNAME */ char *comment; /* comment, if any */ char *schema; /* target schema (allowed if !relocatable) */ bool relocatable; /* is ALTER EXTENSION SET SCHEMA supported? */ @@ -85,9 +85,9 @@ typedef struct ExtensionVersionInfo List *reachable; /* List of ExtensionVersionInfo's */ bool installable; /* does this version have an install script? */ /* working state for Dijkstra's algorithm: */ - bool distance_known; /* is distance from start known yet? */ + bool distance_known; /* is distance from start known yet? */ int distance; /* current worst-case distance estimate */ - struct ExtensionVersionInfo *previous; /* current best predecessor */ + struct ExtensionVersionInfo *previous; /* current best predecessor */ } ExtensionVersionInfo; /* Local functions */ @@ -107,7 +107,7 @@ static void ApplyExtensionUpdates(Oid extensionOid, /* * get_extension_oid - given an extension name, look up the OID * - * If missing_ok is false, throw an error if extension name not found. If + * If missing_ok is false, throw an error if extension name not found. If * true, just return InvalidOid. */ Oid @@ -142,10 +142,10 @@ get_extension_oid(const char *extname, bool missing_ok) heap_close(rel, AccessShareLock); if (!OidIsValid(result) && !missing_ok) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("extension \"%s\" does not exist", - extname))); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("extension \"%s\" does not exist", + extname))); return result; } @@ -237,8 +237,8 @@ check_valid_extension_name(const char *extensionname) int namelen = strlen(extensionname); /* - * Disallow empty names (the parser rejects empty identifiers anyway, - * but let's check). + * Disallow empty names (the parser rejects empty identifiers anyway, but + * let's check). */ if (namelen == 0) ereport(ERROR, @@ -256,16 +256,16 @@ check_valid_extension_name(const char *extensionname) errdetail("Extension names must not contain \"--\"."))); /* - * No leading or trailing dash either. (We could probably allow this, - * but it would require much care in filename parsing and would make - * filenames visually if not formally ambiguous. Since there's no - * real-world use case, let's just forbid it.) + * No leading or trailing dash either. (We could probably allow this, but + * it would require much care in filename parsing and would make filenames + * visually if not formally ambiguous. Since there's no real-world use + * case, let's just forbid it.) */ if (extensionname[0] == '-' || extensionname[namelen - 1] == '-') ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid extension name: \"%s\"", extensionname), - errdetail("Extension names must not begin or end with \"-\"."))); + errdetail("Extension names must not begin or end with \"-\"."))); /* * No directory separators either (this is sufficient to prevent ".." @@ -290,7 +290,7 @@ check_valid_version_name(const char *versionname) if (namelen == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid extension version name: \"%s\"", versionname), + errmsg("invalid extension version name: \"%s\"", versionname), errdetail("Version names must not be empty."))); /* @@ -299,7 +299,7 @@ check_valid_version_name(const char *versionname) if (strstr(versionname, "--")) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid extension version name: \"%s\"", versionname), + errmsg("invalid extension version name: \"%s\"", versionname), errdetail("Version names must not contain \"--\"."))); /* @@ -308,8 +308,8 @@ check_valid_version_name(const char *versionname) if (versionname[0] == '-' || versionname[namelen - 1] == '-') ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid extension version name: \"%s\"", versionname), - errdetail("Version names must not begin or end with \"-\"."))); + errmsg("invalid extension version name: \"%s\"", versionname), + errdetail("Version names must not begin or end with \"-\"."))); /* * No directory separators either (this is sufficient to prevent ".." @@ -318,7 +318,7 @@ check_valid_version_name(const char *versionname) if (first_dir_separator(versionname) != NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid extension version name: \"%s\"", versionname), + errmsg("invalid extension version name: \"%s\"", versionname), errdetail("Version names must not contain directory separator characters."))); } @@ -386,7 +386,7 @@ get_extension_script_directory(ExtensionControlFile *control) get_share_path(my_exec_path, sharepath); result = (char *) palloc(MAXPGPATH); - snprintf(result, MAXPGPATH, "%s/%s", sharepath, control->directory); + snprintf(result, MAXPGPATH, "%s/%s", sharepath, control->directory); return result; } @@ -434,7 +434,7 @@ get_extension_script_filename(ExtensionControlFile *control, /* * Parse contents of primary or auxiliary control file, and fill in - * fields of *control. We parse primary file if version == NULL, + * fields of *control. We parse primary file if version == NULL, * else the optional auxiliary file for that version. * * Control files are supposed to be very short, half a dozen lines, @@ -448,8 +448,8 @@ parse_extension_control_file(ExtensionControlFile *control, char *filename; FILE *file; ConfigVariable *item, - *head = NULL, - *tail = NULL; + *head = NULL, + *tail = NULL; /* * Locate the file to read. Auxiliary files are optional. @@ -553,8 +553,8 @@ parse_extension_control_file(ExtensionControlFile *control, /* syntax error in name list */ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("parameter \"%s\" must be a list of extension names", - item->name))); + errmsg("parameter \"%s\" must be a list of extension names", + item->name))); } } else @@ -632,12 +632,12 @@ static char * read_extension_script_file(const ExtensionControlFile *control, const char *filename) { - int src_encoding; - int dest_encoding = GetDatabaseEncoding(); - bytea *content; + int src_encoding; + int dest_encoding = GetDatabaseEncoding(); + bytea *content; char *src_str; - char *dest_str; - int len; + char *dest_str; + int len; content = read_binary_file(filename, 0, -1); @@ -675,7 +675,7 @@ read_extension_script_file(const ExtensionControlFile *control, * filename is used only to report errors. * * Note: it's tempting to just use SPI to execute the string, but that does - * not work very well. The really serious problem is that SPI will parse, + * not work very well. The really serious problem is that SPI will parse, * analyze, and plan the whole string before executing any of it; of course * this fails if there are any plannable statements referring to objects * created earlier in the script. A lesser annoyance is that SPI insists @@ -774,7 +774,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, List *requiredSchemas, const char *schemaName, Oid schemaOid) { - char *filename; + char *filename; char *save_client_min_messages, *save_log_min_messages, *save_search_path; @@ -809,8 +809,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, * so that we won't spam the user with useless NOTICE messages from common * script actions like creating shell types. * - * We use the equivalent of SET LOCAL to ensure the setting is undone - * upon error. + * We use the equivalent of SET LOCAL to ensure the setting is undone upon + * error. */ save_client_min_messages = pstrdup(GetConfigOption("client_min_messages", false)); @@ -832,8 +832,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, * makes the target schema be the default creation target namespace. * * Note: it might look tempting to use PushOverrideSearchPath for this, - * but we cannot do that. We have to actually set the search_path GUC - * in case the extension script examines or changes it. + * but we cannot do that. We have to actually set the search_path GUC in + * case the extension script examines or changes it. */ save_search_path = pstrdup(GetConfigOption("search_path", false)); @@ -855,32 +855,32 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, /* * Set creating_extension and related variables so that * recordDependencyOnCurrentExtension and other functions do the right - * things. On failure, ensure we reset these variables. + * things. On failure, ensure we reset these variables. */ creating_extension = true; CurrentExtensionObject = extensionOid; PG_TRY(); { - char *sql = read_extension_script_file(control, filename); + char *sql = read_extension_script_file(control, filename); /* * If it's not relocatable, substitute the target schema name for * occcurrences of @extschema@. * - * For a relocatable extension, we just run the script as-is. - * There cannot be any need for @extschema@, else it wouldn't - * be relocatable. + * For a relocatable extension, we just run the script as-is. There + * cannot be any need for @extschema@, else it wouldn't be + * relocatable. */ if (!control->relocatable) { - const char *qSchemaName = quote_identifier(schemaName); + const char *qSchemaName = quote_identifier(schemaName); sql = text_to_cstring( - DatumGetTextPP( - DirectFunctionCall3(replace_text, - CStringGetTextDatum(sql), - CStringGetTextDatum("@extschema@"), - CStringGetTextDatum(qSchemaName)))); + DatumGetTextPP( + DirectFunctionCall3(replace_text, + CStringGetTextDatum(sql), + CStringGetTextDatum("@extschema@"), + CStringGetTextDatum(qSchemaName)))); } /* @@ -890,11 +890,11 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, if (control->module_pathname) { sql = text_to_cstring( - DatumGetTextPP( - DirectFunctionCall3(replace_text, - CStringGetTextDatum(sql), - CStringGetTextDatum("MODULE_PATHNAME"), - CStringGetTextDatum(control->module_pathname)))); + DatumGetTextPP( + DirectFunctionCall3(replace_text, + CStringGetTextDatum(sql), + CStringGetTextDatum("MODULE_PATHNAME"), + CStringGetTextDatum(control->module_pathname)))); } execute_sql_string(sql, filename); @@ -1004,7 +1004,7 @@ get_ext_ver_list(ExtensionControlFile *control) struct dirent *de; location = get_extension_script_directory(control); - dir = AllocateDir(location); + dir = AllocateDir(location); while ((de = ReadDir(dir, location)) != NULL) { char *vername; @@ -1094,7 +1094,7 @@ identify_update_path(ExtensionControlFile *control, * is still good. * * Result is a List of names of versions to transition through (the initial - * version is *not* included). Returns NIL if no such path. + * version is *not* included). Returns NIL if no such path. */ static List * find_update_path(List *evi_list, @@ -1132,7 +1132,7 @@ find_update_path(List *evi_list, foreach(lc, evi->reachable) { ExtensionVersionInfo *evi2 = (ExtensionVersionInfo *) lfirst(lc); - int newdist; + int newdist; newdist = evi->distance + 1; if (newdist < evi2->distance) @@ -1178,10 +1178,10 @@ CreateExtension(CreateExtensionStmt *stmt) DefElem *d_schema = NULL; DefElem *d_new_version = NULL; DefElem *d_old_version = NULL; - char *schemaName; + char *schemaName; Oid schemaOid; - char *versionName; - char *oldVersionName; + char *versionName; + char *oldVersionName; Oid extowner = GetUserId(); ExtensionControlFile *pcontrol; ExtensionControlFile *control; @@ -1195,10 +1195,10 @@ CreateExtension(CreateExtensionStmt *stmt) check_valid_extension_name(stmt->extname); /* - * Check for duplicate extension name. The unique index on + * Check for duplicate extension name. The unique index on * pg_extension.extname would catch this anyway, and serves as a backstop - * in case of race conditions; but this is a friendlier error message, - * and besides we need a check to support IF NOT EXISTS. + * in case of race conditions; but this is a friendlier error message, and + * besides we need a check to support IF NOT EXISTS. */ if (get_extension_oid(stmt->extname, true) != InvalidOid) { @@ -1218,8 +1218,8 @@ CreateExtension(CreateExtensionStmt *stmt) } /* - * We use global variables to track the extension being created, so we - * can create only one extension at the same time. + * We use global variables to track the extension being created, so we can + * create only one extension at the same time. */ if (creating_extension) ereport(ERROR, @@ -1306,8 +1306,8 @@ CreateExtension(CreateExtensionStmt *stmt) if (list_length(updateVersions) == 1) { /* - * Simple case where there's just one update script to run. - * We will not need any follow-on update steps. + * Simple case where there's just one update script to run. We + * will not need any follow-on update steps. */ Assert(strcmp((char *) linitial(updateVersions), versionName) == 0); updateVersions = NIL; @@ -1351,9 +1351,9 @@ CreateExtension(CreateExtensionStmt *stmt) strcmp(control->schema, schemaName) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("extension \"%s\" must be installed in schema \"%s\"", - control->name, - control->schema))); + errmsg("extension \"%s\" must be installed in schema \"%s\"", + control->name, + control->schema))); /* If the user is giving us the schema name, it must exist already */ schemaOid = get_namespace_oid(schemaName, false); @@ -1362,7 +1362,7 @@ CreateExtension(CreateExtensionStmt *stmt) { /* * The extension is not relocatable and the author gave us a schema - * for it. We create the schema here if it does not already exist. + * for it. We create the schema here if it does not already exist. */ schemaName = control->schema; schemaOid = get_namespace_oid(schemaName, true); @@ -1380,13 +1380,13 @@ CreateExtension(CreateExtensionStmt *stmt) * Else, use the current default creation namespace, which is the * first explicit entry in the search_path. */ - List *search_path = fetch_search_path(false); + List *search_path = fetch_search_path(false); - if (search_path == NIL) /* probably can't happen */ + if (search_path == NIL) /* probably can't happen */ elog(ERROR, "there is no default creation target"); schemaOid = linitial_oid(search_path); schemaName = get_namespace_name(schemaOid); - if (schemaName == NULL) /* recently-deleted namespace? */ + if (schemaName == NULL) /* recently-deleted namespace? */ elog(ERROR, "there is no default creation target"); list_free(search_path); @@ -1397,13 +1397,13 @@ CreateExtension(CreateExtensionStmt *stmt) * extension script actually creates any objects there, it will fail if * the user doesn't have such permissions. But there are cases such as * procedural languages where it's convenient to set schema = pg_catalog - * yet we don't want to restrict the command to users with ACL_CREATE - * for pg_catalog. + * yet we don't want to restrict the command to users with ACL_CREATE for + * pg_catalog. */ /* - * Look up the prerequisite extensions, and build lists of their OIDs - * and the OIDs of their target schemas. + * Look up the prerequisite extensions, and build lists of their OIDs and + * the OIDs of their target schemas. */ requiredExtensions = NIL; requiredSchemas = NIL; @@ -1453,8 +1453,8 @@ CreateExtension(CreateExtensionStmt *stmt) schemaName, schemaOid); /* - * If additional update scripts have to be executed, apply the updates - * as though a series of ALTER EXTENSION UPDATE commands were given + * If additional update scripts have to be executed, apply the updates as + * though a series of ALTER EXTENSION UPDATE commands were given */ ApplyExtensionUpdates(extensionOid, pcontrol, versionName, updateVersions); @@ -1653,7 +1653,7 @@ RemoveExtensionById(Oid extId) /* * This function lists the available extensions (one row per primary control - * file in the control directory). We parse each control file and report the + * file in the control directory). We parse each control file and report the * interesting fields. * * The system view pg_available_extensions provides a user interface to this @@ -1663,14 +1663,14 @@ RemoveExtensionById(Oid extId) Datum pg_available_extensions(PG_FUNCTION_ARGS) { - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - char *location; - DIR *dir; - struct dirent *de; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + char *location; + DIR *dir; + struct dirent *de; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) @@ -1699,11 +1699,11 @@ pg_available_extensions(PG_FUNCTION_ARGS) MemoryContextSwitchTo(oldcontext); location = get_extension_control_directory(); - dir = AllocateDir(location); + dir = AllocateDir(location); /* - * If the control directory doesn't exist, we want to silently return - * an empty set. Any other error will be reported by ReadDir. + * If the control directory doesn't exist, we want to silently return an + * empty set. Any other error will be reported by ReadDir. */ if (dir == NULL && errno == ENOENT) { @@ -1762,7 +1762,7 @@ pg_available_extensions(PG_FUNCTION_ARGS) /* * This function lists the available extension versions (one row per - * extension installation script). For each version, we parse the related + * extension installation script). For each version, we parse the related * control file(s) and report the interesting fields. * * The system view pg_available_extension_versions provides a user interface @@ -1772,14 +1772,14 @@ pg_available_extensions(PG_FUNCTION_ARGS) Datum pg_available_extension_versions(PG_FUNCTION_ARGS) { - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - char *location; - DIR *dir; - struct dirent *de; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + char *location; + DIR *dir; + struct dirent *de; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) @@ -1808,11 +1808,11 @@ pg_available_extension_versions(PG_FUNCTION_ARGS) MemoryContextSwitchTo(oldcontext); location = get_extension_control_directory(); - dir = AllocateDir(location); + dir = AllocateDir(location); /* - * If the control directory doesn't exist, we want to silently return - * an empty set. Any other error will be reported by ReadDir. + * If the control directory doesn't exist, we want to silently return an + * empty set. Any other error will be reported by ReadDir. */ if (dir == NULL && errno == ENOENT) { @@ -1867,7 +1867,7 @@ get_available_versions_for_extension(ExtensionControlFile *pcontrol, struct dirent *de; location = get_extension_script_directory(pcontrol); - dir = AllocateDir(location); + dir = AllocateDir(location); /* Note this will fail if script directory doesn't exist */ while ((de = ReadDir(dir, location)) != NULL) { @@ -1962,11 +1962,11 @@ Datum pg_extension_update_paths(PG_FUNCTION_ARGS) { Name extname = PG_GETARG_NAME(0); - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; - Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext per_query_ctx; + MemoryContext oldcontext; List *evi_list; ExtensionControlFile *control; ListCell *lc1; @@ -2079,8 +2079,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS) text *wherecond = PG_GETARG_TEXT_P(1); char *tablename; Relation extRel; - ScanKeyData key[1]; - SysScanDesc extScan; + ScanKeyData key[1]; + SysScanDesc extScan; HeapTuple extTup; Datum arrayDatum; Datum elementDatum; @@ -2092,8 +2092,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS) ArrayType *a; /* - * We only allow this to be called from an extension's SQL script. - * We shouldn't need any permissions check beyond that. + * We only allow this to be called from an extension's SQL script. We + * shouldn't need any permissions check beyond that. */ if (!creating_extension) ereport(ERROR, @@ -2103,8 +2103,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS) /* * Check that the table exists and is a member of the extension being - * created. This ensures that we don't need to register a dependency - * to protect the extconfig entry. + * created. This ensures that we don't need to register a dependency to + * protect the extconfig entry. */ tablename = get_rel_name(tableoid); if (tablename == NULL) @@ -2115,12 +2115,12 @@ pg_extension_config_dump(PG_FUNCTION_ARGS) CurrentExtensionObject) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("table \"%s\" is not a member of the extension being created", - tablename))); + errmsg("table \"%s\" is not a member of the extension being created", + tablename))); /* - * Add the table OID and WHERE condition to the extension's extconfig - * and extcondition arrays. + * Add the table OID and WHERE condition to the extension's extconfig and + * extcondition arrays. */ /* Find the pg_extension tuple */ @@ -2136,7 +2136,7 @@ pg_extension_config_dump(PG_FUNCTION_ARGS) extTup = systable_getnext(extScan); - if (!HeapTupleIsValid(extTup)) /* should not happen */ + if (!HeapTupleIsValid(extTup)) /* should not happen */ elog(ERROR, "extension with oid %u does not exist", CurrentExtensionObject); @@ -2162,7 +2162,7 @@ pg_extension_config_dump(PG_FUNCTION_ARGS) Assert(ARR_NDIM(a) == 1); Assert(ARR_LBOUND(a)[0] == 1); - arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */ + arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */ a = array_set(a, 1, &arrayIndex, elementDatum, @@ -2193,7 +2193,7 @@ pg_extension_config_dump(PG_FUNCTION_ARGS) Assert(ARR_NDIM(a) == 1); Assert(ARR_LBOUND(a)[0] == 1); - arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */ + arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */ a = array_set(a, 1, &arrayIndex, elementDatum, @@ -2231,12 +2231,12 @@ AlterExtensionNamespace(List *names, const char *newschema) Oid oldNspOid = InvalidOid; AclResult aclresult; Relation extRel; - ScanKeyData key[2]; - SysScanDesc extScan; + ScanKeyData key[2]; + SysScanDesc extScan; HeapTuple extTup; Form_pg_extension extForm; Relation depRel; - SysScanDesc depScan; + SysScanDesc depScan; HeapTuple depTup; if (list_length(names) != 1) @@ -2275,7 +2275,7 @@ AlterExtensionNamespace(List *names, const char *newschema) extTup = systable_getnext(extScan); - if (!HeapTupleIsValid(extTup)) /* should not happen */ + if (!HeapTupleIsValid(extTup)) /* should not happen */ elog(ERROR, "extension with oid %u does not exist", extensionOid); /* Copy tuple so we can modify it below */ @@ -2285,8 +2285,8 @@ AlterExtensionNamespace(List *names, const char *newschema) systable_endscan(extScan); /* - * If the extension is already in the target schema, just silently - * do nothing. + * If the extension is already in the target schema, just silently do + * nothing. */ if (extForm->extnamespace == nspOid) { @@ -2323,10 +2323,10 @@ AlterExtensionNamespace(List *names, const char *newschema) { Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup); ObjectAddress dep; - Oid dep_oldNspOid; + Oid dep_oldNspOid; /* - * Ignore non-membership dependencies. (Currently, the only other + * Ignore non-membership dependencies. (Currently, the only other * case we could see here is a normal dependency from another * extension.) */ @@ -2388,13 +2388,13 @@ void ExecAlterExtensionStmt(AlterExtensionStmt *stmt) { DefElem *d_new_version = NULL; - char *versionName; - char *oldVersionName; + char *versionName; + char *oldVersionName; ExtensionControlFile *control; Oid extensionOid; Relation extRel; - ScanKeyData key[1]; - SysScanDesc extScan; + ScanKeyData key[1]; + SysScanDesc extScan; HeapTuple extTup; List *updateVersions; Datum datum; @@ -2402,8 +2402,8 @@ ExecAlterExtensionStmt(AlterExtensionStmt *stmt) ListCell *lc; /* - * We use global variables to track the extension being created, so we - * can create/update only one extension at the same time. + * We use global variables to track the extension being created, so we can + * create/update only one extension at the same time. */ if (creating_extension) ereport(ERROR, @@ -2426,10 +2426,10 @@ ExecAlterExtensionStmt(AlterExtensionStmt *stmt) extTup = systable_getnext(extScan); if (!HeapTupleIsValid(extTup)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("extension \"%s\" does not exist", - stmt->extname))); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("extension \"%s\" does not exist", + stmt->extname))); extensionOid = HeapTupleGetOid(extTup); @@ -2499,8 +2499,8 @@ ExecAlterExtensionStmt(AlterExtensionStmt *stmt) if (strcmp(oldVersionName, versionName) == 0) { ereport(NOTICE, - (errmsg("version \"%s\" of extension \"%s\" is already installed", - versionName, stmt->extname))); + (errmsg("version \"%s\" of extension \"%s\" is already installed", + versionName, stmt->extname))); return; } @@ -2545,8 +2545,8 @@ ApplyExtensionUpdates(Oid extensionOid, List *requiredExtensions; List *requiredSchemas; Relation extRel; - ScanKeyData key[1]; - SysScanDesc extScan; + ScanKeyData key[1]; + SysScanDesc extScan; HeapTuple extTup; Form_pg_extension extForm; Datum values[Natts_pg_extension]; @@ -2573,7 +2573,7 @@ ApplyExtensionUpdates(Oid extensionOid, extTup = systable_getnext(extScan); - if (!HeapTupleIsValid(extTup)) /* should not happen */ + if (!HeapTupleIsValid(extTup)) /* should not happen */ elog(ERROR, "extension with oid %u does not exist", extensionOid); @@ -2668,9 +2668,9 @@ ApplyExtensionUpdates(Oid extensionOid, schemaName, schemaOid); /* - * Update prior-version name and loop around. Since execute_sql_string - * did a final CommandCounterIncrement, we can update the pg_extension - * row again. + * Update prior-version name and loop around. Since + * execute_sql_string did a final CommandCounterIncrement, we can + * update the pg_extension row again. */ oldVersionName = versionName; } @@ -2682,10 +2682,10 @@ ApplyExtensionUpdates(Oid extensionOid, void ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt) { - ObjectAddress extension; - ObjectAddress object; - Relation relation; - Oid oldExtension; + ObjectAddress extension; + ObjectAddress object; + Relation relation; + Oid oldExtension; extension.classId = ExtensionRelationId; extension.objectId = get_extension_oid(stmt->extname, false); @@ -2697,10 +2697,10 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt) stmt->extname); /* - * Translate the parser representation that identifies the object into - * an ObjectAddress. get_object_address() will throw an error if the - * object does not exist, and will also acquire a lock on the object to - * guard against concurrent DROP and ALTER EXTENSION ADD/DROP operations. + * Translate the parser representation that identifies the object into an + * ObjectAddress. get_object_address() will throw an error if the object + * does not exist, and will also acquire a lock on the object to guard + * against concurrent DROP and ALTER EXTENSION ADD/DROP operations. */ object = get_object_address(stmt->objtype, stmt->objname, stmt->objargs, &relation, ShareUpdateExclusiveLock); diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index 13d6d882f8..21d52e06ba 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -586,8 +586,8 @@ AlterForeignDataWrapper(AlterFdwStmt *stmt) */ if (OidIsValid(fdwvalidator)) ereport(WARNING, - (errmsg("changing the foreign-data wrapper validator can cause " - "the options for dependent objects to become invalid"))); + (errmsg("changing the foreign-data wrapper validator can cause " + "the options for dependent objects to become invalid"))); } else { @@ -643,8 +643,8 @@ AlterForeignDataWrapper(AlterFdwStmt *stmt) ObjectAddress referenced; /* - * Flush all existing dependency records of this FDW on functions; - * we assume there can be none other than the ones we are fixing. + * Flush all existing dependency records of this FDW on functions; we + * assume there can be none other than the ones we are fixing. */ deleteDependencyRecordsForClass(ForeignDataWrapperRelationId, fdwId, diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index c8cbe035f0..03da168ff2 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -1665,7 +1665,7 @@ CreateCast(CreateCastStmt *stmt) * We also disallow creating binary-compatibility casts involving * domains. Casting from a domain to its base type is already * allowed, and casting the other way ought to go through domain - * coercion to permit constraint checking. Again, if you're intent on + * coercion to permit constraint checking. Again, if you're intent on * having your own semantics for that, create a no-op cast function. * * NOTE: if we were to relax this, the above checks for composites @@ -1830,7 +1830,7 @@ DropCast(DropCastStmt *stmt) Oid get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok) { - Oid oid; + Oid oid; oid = GetSysCacheOid2(CASTSOURCETARGET, ObjectIdGetDatum(sourcetypeid), diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index cfcce55967..05e8234a0f 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -395,7 +395,7 @@ DefineIndex(RangeVar *heapRelation, indexRelationId = index_create(rel, indexRelationName, indexRelationId, indexInfo, indexColNames, - accessMethodId, tablespaceId, collationObjectId, classObjectId, + accessMethodId, tablespaceId, collationObjectId, classObjectId, coloptions, reloptions, primary, isconstraint, deferrable, initdeferred, allowSystemTableMods, @@ -840,14 +840,14 @@ ComputeIndexAttrs(IndexInfo *indexInfo, else { /* Index expression */ - Node *expr = attribute->expr; + Node *expr = attribute->expr; Assert(expr != NULL); atttype = exprType(expr); attcollation = exprCollation(expr); /* - * Strip any top-level COLLATE clause. This ensures that we treat + * Strip any top-level COLLATE clause. This ensures that we treat * "x COLLATE y" and "(x COLLATE y)" alike. */ while (IsA(expr, CollateExpr)) @@ -864,7 +864,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, } else { - indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */ + indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */ indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions, expr); @@ -876,7 +876,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, if (contain_subplans(expr)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in index expression"))); + errmsg("cannot use subquery in index expression"))); if (contain_agg_clause(expr)) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), @@ -904,8 +904,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo, /* * Check we have a collation iff it's a collatable type. The only * expected failures here are (1) COLLATE applied to a noncollatable - * type, or (2) index expression had an unresolved collation. But - * we might as well code this to be a complete consistency check. + * type, or (2) index expression had an unresolved collation. But we + * might as well code this to be a complete consistency check. */ if (type_is_collatable(atttype)) { diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 68072dd421..aff5ac6ec4 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -126,7 +126,7 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname, bool missing_ok) if (!HeapTupleIsValid(htup) && !missing_ok) { - HeapTuple amtup; + HeapTuple amtup; amtup = SearchSysCache1(AMOID, ObjectIdGetDatum(amID)); if (!HeapTupleIsValid(amtup)) @@ -134,8 +134,8 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname, bool missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("operator family \"%s\" does not exist for access method \"%s\"", - NameListToString(opfamilyname), - NameStr(((Form_pg_am) GETSTRUCT(amtup))->amname)))); + NameListToString(opfamilyname), + NameStr(((Form_pg_am) GETSTRUCT(amtup))->amname)))); } return htup; @@ -143,7 +143,7 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname, bool missing_ok) /* * get_opfamily_oid - * find an opfamily OID by possibly qualified name + * find an opfamily OID by possibly qualified name * * If not found, returns InvalidOid if missing_ok, else throws error. */ @@ -202,7 +202,7 @@ OpClassCacheLookup(Oid amID, List *opclassname, bool missing_ok) if (!HeapTupleIsValid(htup) && !missing_ok) { - HeapTuple amtup; + HeapTuple amtup; amtup = SearchSysCache1(AMOID, ObjectIdGetDatum(amID)); if (!HeapTupleIsValid(amtup)) @@ -219,7 +219,7 @@ OpClassCacheLookup(Oid amID, List *opclassname, bool missing_ok) /* * get_opclass_oid - * find an opclass OID by possibly qualified name + * find an opclass OID by possibly qualified name * * If not found, returns InvalidOid if missing_ok, else throws error. */ @@ -1088,11 +1088,11 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) if (OidIsValid(member->sortfamily)) { /* - * Ordering op, check index supports that. (We could perhaps also + * Ordering op, check index supports that. (We could perhaps also * check that the operator returns a type supported by the sortfamily, * but that seems more trouble than it's worth here. If it does not, - * the operator will never be matchable to any ORDER BY clause, but - * no worse consequences can ensue. Also, trying to check that would + * the operator will never be matchable to any ORDER BY clause, but no + * worse consequences can ensue. Also, trying to check that would * create an ordering hazard during dump/reload: it's possible that * the family has been created but not yet populated with the required * operators.) @@ -1108,8 +1108,8 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) if (!pg_am->amcanorderbyop) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("access method \"%s\" does not support ordering operators", - NameStr(pg_am->amname)))); + errmsg("access method \"%s\" does not support ordering operators", + NameStr(pg_am->amname)))); ReleaseSysCache(amtup); } @@ -1276,7 +1276,7 @@ storeOperators(List *opfamilyname, Oid amoid, foreach(l, operators) { OpFamilyMember *op = (OpFamilyMember *) lfirst(l); - char oppurpose; + char oppurpose; /* * If adding to an existing family, check for conflict with an @@ -1566,7 +1566,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt) { ereport(NOTICE, (errmsg("operator class \"%s\" does not exist for access method \"%s\"", - NameListToString(stmt->opclassname), stmt->amname))); + NameListToString(stmt->opclassname), stmt->amname))); return; } @@ -1617,7 +1617,7 @@ RemoveOpFamily(RemoveOpFamilyStmt *stmt) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("operator family \"%s\" does not exist for access method \"%s\"", - NameListToString(stmt->opfamilyname), stmt->amname))); + NameListToString(stmt->opfamilyname), stmt->amname))); return; } @@ -2029,7 +2029,7 @@ AlterOpClassNamespace(List *name, char *access_method, const char *newschema) Oid AlterOpClassNamespace_oid(Oid opclassOid, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; rel = heap_open(OperatorClassRelationId, RowExclusiveLock); @@ -2238,7 +2238,7 @@ AlterOpFamilyNamespace(List *name, char *access_method, const char *newschema) Oid AlterOpFamilyNamespace_oid(Oid opfamilyOid, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; rel = heap_open(OperatorFamilyRelationId, RowExclusiveLock); diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index b4374a62f4..c99de4b240 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -464,7 +464,8 @@ AlterOperatorNamespace(List *names, List *argtypes, const char *newschema) List *operatorName = names; TypeName *typeName1 = (TypeName *) linitial(argtypes); TypeName *typeName2 = (TypeName *) lsecond(argtypes); - Oid operOid, nspOid; + Oid operOid, + nspOid; Relation rel; rel = heap_open(OperatorRelationId, RowExclusiveLock); @@ -490,7 +491,7 @@ AlterOperatorNamespace(List *names, List *argtypes, const char *newschema) Oid AlterOperatorNamespace_oid(Oid operOid, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; rel = heap_open(OperatorRelationId, RowExclusiveLock); diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index 60aca3ce8e..89086aa371 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -255,10 +255,10 @@ PortalCleanup(Portal portal) if (queryDesc) { /* - * Reset the queryDesc before anything else. This prevents us - * from trying to shut down the executor twice, in case of an - * error below. The transaction abort mechanisms will take care - * of resource cleanup in such a case. + * Reset the queryDesc before anything else. This prevents us from + * trying to shut down the executor twice, in case of an error below. + * The transaction abort mechanisms will take care of resource cleanup + * in such a case. */ portal->queryDesc = NULL; diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index adbf5872f3..dfa2ab0026 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -382,7 +382,7 @@ EvaluateParams(PreparedStatement *pstmt, List *params, /* sizeof(ParamListInfoData) includes the first array element */ paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) + - (num_params - 1) *sizeof(ParamExternData)); + (num_params - 1) * sizeof(ParamExternData)); /* we have static list of params, so no hooks needed */ paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; diff --git a/src/backend/commands/seclabel.c b/src/backend/commands/seclabel.c index 1c96b005d7..7afb7139a6 100644 --- a/src/backend/commands/seclabel.c +++ b/src/backend/commands/seclabel.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------- * * seclabel.c - * routines to support security label feature. + * routines to support security label feature. * * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -28,7 +28,7 @@ typedef struct { const char *provider_name; - check_object_relabel_type hook; + check_object_relabel_type hook; } LabelProvider; static List *label_provider_list = NIL; @@ -42,9 +42,9 @@ void ExecSecLabelStmt(SecLabelStmt *stmt) { LabelProvider *provider = NULL; - ObjectAddress address; - Relation relation; - ListCell *lc; + ObjectAddress address; + Relation relation; + ListCell *lc; /* * Find the named label provider, or if none specified, check whether @@ -55,16 +55,16 @@ ExecSecLabelStmt(SecLabelStmt *stmt) if (label_provider_list == NIL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("no security label providers have been loaded"))); + errmsg("no security label providers have been loaded"))); if (lnext(list_head(label_provider_list)) != NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("must specify provider when multiple security label providers have been loaded"))); + errmsg("must specify provider when multiple security label providers have been loaded"))); provider = (LabelProvider *) linitial(label_provider_list); } else { - foreach (lc, label_provider_list) + foreach(lc, label_provider_list) { LabelProvider *lp = lfirst(lc); @@ -82,10 +82,10 @@ ExecSecLabelStmt(SecLabelStmt *stmt) } /* - * Translate the parser representation which identifies this object - * into an ObjectAddress. get_object_address() will throw an error if - * the object does not exist, and will also acquire a lock on the - * target to guard against concurrent modifications. + * Translate the parser representation which identifies this object into + * an ObjectAddress. get_object_address() will throw an error if the + * object does not exist, and will also acquire a lock on the target to + * guard against concurrent modifications. */ address = get_object_address(stmt->objtype, stmt->objname, stmt->objargs, &relation, ShareUpdateExclusiveLock); @@ -98,6 +98,7 @@ ExecSecLabelStmt(SecLabelStmt *stmt) switch (stmt->objtype) { case OBJECT_COLUMN: + /* * Allow security labels only on columns of tables, views, * composite types, and foreign tables (which are the only @@ -117,7 +118,7 @@ ExecSecLabelStmt(SecLabelStmt *stmt) } /* Provider gets control here, may throw ERROR to veto new label. */ - (*provider->hook)(&address, stmt->label); + (*provider->hook) (&address, stmt->label); /* Apply new label. */ SetSecurityLabel(&address, provider->provider_name, stmt->label); @@ -140,8 +141,8 @@ char * GetSecurityLabel(const ObjectAddress *object, const char *provider) { Relation pg_seclabel; - ScanKeyData keys[4]; - SysScanDesc scan; + ScanKeyData keys[4]; + SysScanDesc scan; HeapTuple tuple; Datum datum; bool isnull; @@ -196,8 +197,8 @@ SetSecurityLabel(const ObjectAddress *object, const char *provider, const char *label) { Relation pg_seclabel; - ScanKeyData keys[4]; - SysScanDesc scan; + ScanKeyData keys[4]; + SysScanDesc scan; HeapTuple oldtup; HeapTuple newtup = NULL; Datum values[Natts_pg_seclabel]; @@ -281,8 +282,8 @@ void DeleteSecurityLabel(const ObjectAddress *object) { Relation pg_seclabel; - ScanKeyData skey[3]; - SysScanDesc scan; + ScanKeyData skey[3]; + SysScanDesc scan; HeapTuple oldtup; int nkeys; @@ -323,8 +324,8 @@ DeleteSecurityLabel(const ObjectAddress *object) void register_label_provider(const char *provider_name, check_object_relabel_type hook) { - LabelProvider *provider; - MemoryContext oldcxt; + LabelProvider *provider; + MemoryContext oldcxt; oldcxt = MemoryContextSwitchTo(TopMemoryContext); provider = palloc(sizeof(LabelProvider)); diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index bfa94a0c11..6a91a102dc 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -287,7 +287,7 @@ ResetSequence(Oid seq_relid) seq->log_cnt = 1; /* - * Create a new storage file for the sequence. We want to keep the + * Create a new storage file for the sequence. We want to keep the * sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs. */ RelationSetNewRelfilenode(seq_rel, InvalidTransactionId); @@ -1037,7 +1037,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel) /* * If the sequence has been transactionally replaced since we last saw it, - * discard any cached-but-unissued values. We do not touch the currval() + * discard any cached-but-unissued values. We do not touch the currval() * state, however. */ if (seqrel->rd_rel->relfilenode != elm->filenode) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 886b656b43..790bc2a521 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -286,9 +286,9 @@ static void ATWrongRelkindError(Relation rel, int allowed_targets); static void ATSimpleRecursion(List **wqueue, Relation rel, AlterTableCmd *cmd, bool recurse, LOCKMODE lockmode); static void ATTypedTableRecursion(List **wqueue, Relation rel, AlterTableCmd *cmd, - LOCKMODE lockmode); + LOCKMODE lockmode); static List *find_typed_table_dependencies(Oid typeOid, const char *typeName, - DropBehavior behavior); + DropBehavior behavior); static void ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, bool recursing, AlterTableCmd *cmd, LOCKMODE lockmode); static void ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, @@ -311,7 +311,7 @@ static void ATExecSetOptions(Relation rel, const char *colName, static void ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE lockmode); static void ATPrepDropColumn(List **wqueue, Relation rel, bool recurse, bool recursing, - AlterTableCmd *cmd, LOCKMODE lockmode); + AlterTableCmd *cmd, LOCKMODE lockmode); static void ATExecDropColumn(List **wqueue, Relation rel, const char *colName, DropBehavior behavior, bool recurse, bool recursing, @@ -320,9 +320,9 @@ static void ATExecAddIndex(AlteredTableInfo *tab, Relation rel, IndexStmt *stmt, bool is_rebuild, LOCKMODE lockmode); static void ATExecAddConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, - Constraint *newConstraint, bool recurse, LOCKMODE lockmode); + Constraint *newConstraint, bool recurse, LOCKMODE lockmode); static void ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, - IndexStmt *stmt, LOCKMODE lockmode); + IndexStmt *stmt, LOCKMODE lockmode); static void ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, Constraint *constr, @@ -339,7 +339,7 @@ static void ATPrepAlterColumnType(List **wqueue, AlterTableCmd *cmd, LOCKMODE lockmode); static bool ATColumnChangeRequiresRewrite(Node *expr, AttrNumber varattno); static void ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, - AlterTableCmd *cmd, LOCKMODE lockmode); + AlterTableCmd *cmd, LOCKMODE lockmode); static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode); static void ATPostAlterTypeParse(char *cmd, List **wqueue, LOCKMODE lockmode); static void change_owner_recurse_to_sequences(Oid relationOid, @@ -351,7 +351,7 @@ static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode); static void ATExecSetRelOptions(Relation rel, List *defList, bool isReset, LOCKMODE lockmode); static void ATExecEnableDisableTrigger(Relation rel, char *trigname, - char fires_when, bool skip_system, LOCKMODE lockmode); + char fires_when, bool skip_system, LOCKMODE lockmode); static void ATExecEnableDisableRule(Relation rel, char *rulename, char fires_when, LOCKMODE lockmode); static void ATPrepAddInherit(Relation child_rel); @@ -412,7 +412,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId) /* * Check consistency of arguments */ - if (stmt->oncommit != ONCOMMIT_NOOP + if (stmt->oncommit != ONCOMMIT_NOOP && stmt->relation->relpersistence != RELPERSISTENCE_TEMP) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), @@ -547,7 +547,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId) if (relkind == RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("default values on foreign tables are not supported"))); + errmsg("default values on foreign tables are not supported"))); Assert(colDef->cooked_default == NULL); @@ -706,7 +706,7 @@ DropErrorMsgWrongType(const char *relname, char wrongkind, char rightkind) /* * RemoveRelations * Implements DROP TABLE, DROP INDEX, DROP SEQUENCE, DROP VIEW, - * DROP FOREIGN TABLE + * DROP FOREIGN TABLE */ void RemoveRelations(DropStmt *drop) @@ -1454,11 +1454,11 @@ MergeAttributes(List *schema, List *supers, char relpersistence, if (defCollId != attribute->attcollation) ereport(ERROR, (errcode(ERRCODE_COLLATION_MISMATCH), - errmsg("inherited column \"%s\" has a collation conflict", - attributeName), + errmsg("inherited column \"%s\" has a collation conflict", + attributeName), errdetail("\"%s\" versus \"%s\"", get_collation_name(defCollId), - get_collation_name(attribute->attcollation)))); + get_collation_name(attribute->attcollation)))); /* Copy storage parameter */ if (def->storage == 0) @@ -2061,8 +2061,8 @@ renameatt_internal(Oid myrelid, relkind != RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, view, composite type, index or foreign table", - RelationGetRelationName(targetrelation)))); + errmsg("\"%s\" is not a table, view, composite type, index or foreign table", + RelationGetRelationName(targetrelation)))); /* * permissions checking. only the owner of a class can change its schema. @@ -2138,7 +2138,7 @@ renameatt_internal(Oid myrelid, ListCell *lo; child_oids = find_typed_table_dependencies(targetrelation->rd_rel->reltype, - RelationGetRelationName(targetrelation), + RelationGetRelationName(targetrelation), behavior); foreach(lo, child_oids) @@ -2211,11 +2211,11 @@ void renameatt(Oid myrelid, RenameStmt *stmt) { renameatt_internal(myrelid, - stmt->subname, /* old att name */ - stmt->newname, /* new att name */ - interpretInhOption(stmt->relation->inhOpt), /* recursive? */ - false, /* recursing? */ - 0, /* expected inhcount */ + stmt->subname, /* old att name */ + stmt->newname, /* new att name */ + interpretInhOption(stmt->relation->inhOpt), /* recursive? */ + false, /* recursing? */ + 0, /* expected inhcount */ stmt->behavior); } @@ -2460,7 +2460,7 @@ void AlterTable(AlterTableStmt *stmt) { Relation rel; - LOCKMODE lockmode = AlterTableGetLockLevel(stmt->cmds); + LOCKMODE lockmode = AlterTableGetLockLevel(stmt->cmds); /* * Acquire same level of lock as already acquired during parsing. @@ -2531,7 +2531,7 @@ AlterTable(AlterTableStmt *stmt) } ATController(rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt), - lockmode); + lockmode); } /* @@ -2549,7 +2549,7 @@ void AlterTableInternal(Oid relid, List *cmds, bool recurse) { Relation rel; - LOCKMODE lockmode = AlterTableGetLockLevel(cmds); + LOCKMODE lockmode = AlterTableGetLockLevel(cmds); rel = relation_open(relid, lockmode); @@ -2581,31 +2581,33 @@ LOCKMODE AlterTableGetLockLevel(List *cmds) { ListCell *lcmd; - LOCKMODE lockmode = ShareUpdateExclusiveLock; + LOCKMODE lockmode = ShareUpdateExclusiveLock; foreach(lcmd, cmds) { AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); - LOCKMODE cmd_lockmode = AccessExclusiveLock; /* default for compiler */ + LOCKMODE cmd_lockmode = AccessExclusiveLock; /* default for compiler */ switch (cmd->subtype) { - /* - * Need AccessExclusiveLock for these subcommands because they - * affect or potentially affect both read and write operations. - * - * New subcommand types should be added here by default. - */ - case AT_AddColumn: /* may rewrite heap, in some cases and visible to SELECT */ - case AT_DropColumn: /* change visible to SELECT */ + /* + * Need AccessExclusiveLock for these subcommands because they + * affect or potentially affect both read and write + * operations. + * + * New subcommand types should be added here by default. + */ + case AT_AddColumn: /* may rewrite heap, in some cases and visible + * to SELECT */ + case AT_DropColumn: /* change visible to SELECT */ case AT_AddColumnToView: /* CREATE VIEW */ case AT_AlterColumnType: /* must rewrite heap */ case AT_DropConstraint: /* as DROP INDEX */ - case AT_AddOids: /* must rewrite heap */ - case AT_DropOids: /* calls AT_DropColumn */ + case AT_AddOids: /* must rewrite heap */ + case AT_DropOids: /* calls AT_DropColumn */ case AT_EnableAlwaysRule: /* may change SELECT rules */ case AT_EnableReplicaRule: /* may change SELECT rules */ - case AT_EnableRule: /* may change SELECT rules */ + case AT_EnableRule: /* may change SELECT rules */ case AT_DisableRule: /* may change SELECT rules */ case AT_ChangeOwner: /* change visible to SELECT */ case AT_SetTableSpace: /* must rewrite heap */ @@ -2615,12 +2617,12 @@ AlterTableGetLockLevel(List *cmds) cmd_lockmode = AccessExclusiveLock; break; - /* - * These subcommands affect write operations only. - */ + /* + * These subcommands affect write operations only. + */ case AT_ColumnDefault: - case AT_ProcessedConstraint: /* becomes AT_AddConstraint */ - case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */ + case AT_ProcessedConstraint: /* becomes AT_AddConstraint */ + case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */ case AT_EnableTrig: case AT_EnableAlwaysTrig: case AT_EnableReplicaTrig: @@ -2629,7 +2631,7 @@ AlterTableGetLockLevel(List *cmds) case AT_DisableTrig: case AT_DisableTrigAll: case AT_DisableTrigUser: - case AT_AddIndex: /* from ADD CONSTRAINT */ + case AT_AddIndex: /* from ADD CONSTRAINT */ case AT_AddIndexConstraint: cmd_lockmode = ShareRowExclusiveLock; break; @@ -2644,14 +2646,17 @@ AlterTableGetLockLevel(List *cmds) case CONSTR_EXCLUSION: case CONSTR_PRIMARY: case CONSTR_UNIQUE: + /* * Cases essentially the same as CREATE INDEX. We - * could reduce the lock strength to ShareLock if we - * can work out how to allow concurrent catalog updates. + * could reduce the lock strength to ShareLock if + * we can work out how to allow concurrent catalog + * updates. */ cmd_lockmode = ShareRowExclusiveLock; break; case CONSTR_FOREIGN: + /* * We add triggers to both tables when we add a * Foreign Key, so the lock level must be at least @@ -2666,26 +2671,29 @@ AlterTableGetLockLevel(List *cmds) } break; - /* - * These subcommands affect inheritance behaviour. Queries started before us - * will continue to see the old inheritance behaviour, while queries started - * after we commit will see new behaviour. No need to prevent reads or writes - * to the subtable while we hook it up though. In both cases the parent table - * is locked with AccessShareLock. - */ + /* + * These subcommands affect inheritance behaviour. Queries + * started before us will continue to see the old inheritance + * behaviour, while queries started after we commit will see + * new behaviour. No need to prevent reads or writes to the + * subtable while we hook it up though. In both cases the + * parent table is locked with AccessShareLock. + */ case AT_AddInherit: case AT_DropInherit: cmd_lockmode = ShareUpdateExclusiveLock; break; - /* - * These subcommands affect general strategies for performance and maintenance, - * though don't change the semantic results from normal data reads and writes. - * Delaying an ALTER TABLE behind currently active writes only delays the point - * where the new strategy begins to take effect, so there is no benefit in waiting. - * In this case the minimum restriction applies: we don't currently allow - * concurrent catalog updates. - */ + /* + * These subcommands affect general strategies for performance + * and maintenance, though don't change the semantic results + * from normal data reads and writes. Delaying an ALTER TABLE + * behind currently active writes only delays the point where + * the new strategy begins to take effect, so there is no + * benefit in waiting. In this case the minimum restriction + * applies: we don't currently allow concurrent catalog + * updates. + */ case AT_SetStatistics: case AT_ClusterOn: case AT_DropCluster: @@ -2698,7 +2706,7 @@ AlterTableGetLockLevel(List *cmds) cmd_lockmode = ShareUpdateExclusiveLock; break; - default: /* oops */ + default: /* oops */ elog(ERROR, "unrecognized alter table type: %d", (int) cmd->subtype); break; @@ -2773,7 +2781,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, { case AT_AddColumn: /* ADD COLUMN */ ATSimplePermissions(rel, - ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE); + ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE); ATPrepAddColumn(wqueue, rel, recurse, recursing, cmd, lockmode); /* Recursion occurs during execution phase */ pass = AT_PASS_ADD_COL; @@ -2793,19 +2801,19 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, * substitutes default values into INSERTs before it expands * rules. */ - ATSimplePermissions(rel, ATT_TABLE|ATT_VIEW); + ATSimplePermissions(rel, ATT_TABLE | ATT_VIEW); ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); /* No command-specific prep needed */ pass = cmd->def ? AT_PASS_ADD_CONSTR : AT_PASS_DROP; break; case AT_DropNotNull: /* ALTER COLUMN DROP NOT NULL */ - ATSimplePermissions(rel, ATT_TABLE|ATT_FOREIGN_TABLE); + ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); /* No command-specific prep needed */ pass = AT_PASS_DROP; break; case AT_SetNotNull: /* ALTER COLUMN SET NOT NULL */ - ATSimplePermissions(rel, ATT_TABLE|ATT_FOREIGN_TABLE); + ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); /* No command-specific prep needed */ pass = AT_PASS_ADD_CONSTR; @@ -2818,7 +2826,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, break; case AT_SetOptions: /* ALTER COLUMN SET ( options ) */ case AT_ResetOptions: /* ALTER COLUMN RESET ( options ) */ - ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX); + ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX); /* This command never recurses */ pass = AT_PASS_MISC; break; @@ -2830,7 +2838,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, break; case AT_DropColumn: /* DROP COLUMN */ ATSimplePermissions(rel, - ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE); + ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE); ATPrepDropColumn(wqueue, rel, recurse, recursing, cmd, lockmode); /* Recursion occurs during execution phase */ pass = AT_PASS_DROP; @@ -2849,7 +2857,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, cmd->subtype = AT_AddConstraintRecurse; pass = AT_PASS_ADD_CONSTR; break; - case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */ + case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */ ATSimplePermissions(rel, ATT_TABLE); /* This command never recurses */ /* No command-specific prep needed */ @@ -2865,7 +2873,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, break; case AT_AlterColumnType: /* ALTER COLUMN TYPE */ ATSimplePermissions(rel, - ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE); + ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE); /* Performs own recursion */ ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd, lockmode); pass = AT_PASS_ALTER_TYPE; @@ -2904,14 +2912,14 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, pass = AT_PASS_DROP; break; case AT_SetTableSpace: /* SET TABLESPACE */ - ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX); + ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX); /* This command never recurses */ ATPrepSetTableSpace(tab, rel, cmd->name, lockmode); pass = AT_PASS_MISC; /* doesn't actually matter */ break; case AT_SetRelOptions: /* SET (...) */ case AT_ResetRelOptions: /* RESET (...) */ - ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX); + ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX); /* This command never recurses */ /* No command-specific prep needed */ pass = AT_PASS_MISC; @@ -3072,11 +3080,11 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, break; case AT_DropColumn: /* DROP COLUMN */ ATExecDropColumn(wqueue, rel, cmd->name, - cmd->behavior, false, false, cmd->missing_ok, lockmode); + cmd->behavior, false, false, cmd->missing_ok, lockmode); break; case AT_DropColumnRecurse: /* DROP COLUMN with recursion */ ATExecDropColumn(wqueue, rel, cmd->name, - cmd->behavior, true, false, cmd->missing_ok, lockmode); + cmd->behavior, true, false, cmd->missing_ok, lockmode); break; case AT_AddIndex: /* ADD INDEX */ ATExecAddIndex(tab, rel, (IndexStmt *) cmd->def, false, lockmode); @@ -3092,7 +3100,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def, true, lockmode); break; - case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */ + case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */ ATExecAddIndexConstraint(tab, rel, (IndexStmt *) cmd->def, lockmode); break; case AT_ValidateConstraint: @@ -3156,7 +3164,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, case AT_EnableTrig: /* ENABLE TRIGGER name */ ATExecEnableDisableTrigger(rel, cmd->name, - TRIGGER_FIRES_ON_ORIGIN, false, lockmode); + TRIGGER_FIRES_ON_ORIGIN, false, lockmode); break; case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */ ATExecEnableDisableTrigger(rel, cmd->name, @@ -3164,7 +3172,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, break; case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */ ATExecEnableDisableTrigger(rel, cmd->name, - TRIGGER_FIRES_ON_REPLICA, false, lockmode); + TRIGGER_FIRES_ON_REPLICA, false, lockmode); break; case AT_DisableTrig: /* DISABLE TRIGGER name */ ATExecEnableDisableTrigger(rel, cmd->name, @@ -3172,7 +3180,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, break; case AT_EnableTrigAll: /* ENABLE TRIGGER ALL */ ATExecEnableDisableTrigger(rel, NULL, - TRIGGER_FIRES_ON_ORIGIN, false, lockmode); + TRIGGER_FIRES_ON_ORIGIN, false, lockmode); break; case AT_DisableTrigAll: /* DISABLE TRIGGER ALL */ ATExecEnableDisableTrigger(rel, NULL, @@ -3180,7 +3188,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, break; case AT_EnableTrigUser: /* ENABLE TRIGGER USER */ ATExecEnableDisableTrigger(rel, NULL, - TRIGGER_FIRES_ON_ORIGIN, true, lockmode); + TRIGGER_FIRES_ON_ORIGIN, true, lockmode); break; case AT_DisableTrigUser: /* DISABLE TRIGGER USER */ ATExecEnableDisableTrigger(rel, NULL, @@ -3254,8 +3262,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode) * (Eventually we'll probably need to check for composite type * dependencies even when we're just scanning the table without a * rewrite, but at the moment a composite type does not enforce any - * constraints, so it's not necessary/appropriate to enforce them - * just during ALTER.) + * constraints, so it's not necessary/appropriate to enforce them just + * during ALTER.) */ if (tab->newvals != NIL || tab->rewrite) { @@ -3386,8 +3394,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode) con->conid); /* - * No need to mark the constraint row as validated, - * we did that when we inserted the row earlier. + * No need to mark the constraint row as validated, we did + * that when we inserted the row earlier. */ heap_close(refrel, NoLock); @@ -3723,7 +3731,7 @@ ATGetQueueEntry(List **wqueue, Relation rel) static void ATSimplePermissions(Relation rel, int allowed_targets) { - int actual_target; + int actual_target; switch (rel->rd_rel->relkind) { @@ -3779,16 +3787,16 @@ ATWrongRelkindError(Relation rel, int allowed_targets) case ATT_TABLE: msg = _("\"%s\" is not a table"); break; - case ATT_TABLE|ATT_INDEX: + case ATT_TABLE | ATT_INDEX: msg = _("\"%s\" is not a table or index"); break; - case ATT_TABLE|ATT_VIEW: + case ATT_TABLE | ATT_VIEW: msg = _("\"%s\" is not a table or view"); break; - case ATT_TABLE|ATT_FOREIGN_TABLE: + case ATT_TABLE | ATT_FOREIGN_TABLE: msg = _("\"%s\" is not a table or foreign table"); break; - case ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE: + case ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE: msg = _("\"%s\" is not a table, composite type, or foreign table"); break; case ATT_VIEW: @@ -4032,7 +4040,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), errmsg("cannot alter type \"%s\" because it is the type of a typed table", typeName), - errhint("Use ALTER ... CASCADE to alter the typed tables too."))); + errhint("Use ALTER ... CASCADE to alter the typed tables too."))); else result = lappend_oid(result, HeapTupleGetOid(tuple)); } @@ -4103,9 +4111,9 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, /* * Are we adding the column to a recursion child? If so, check whether to - * merge with an existing definition for the column. If we do merge, - * we must not recurse. Children will already have the column, and - * recursing into them would mess up attinhcount. + * merge with an existing definition for the column. If we do merge, we + * must not recurse. Children will already have the column, and recursing + * into them would mess up attinhcount. */ if (colDef->inhcount > 0) { @@ -4133,10 +4141,10 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, ereport(ERROR, (errcode(ERRCODE_COLLATION_MISMATCH), errmsg("child table \"%s\" has different collation for column \"%s\"", - RelationGetRelationName(rel), colDef->colname), + RelationGetRelationName(rel), colDef->colname), errdetail("\"%s\" versus \"%s\"", get_collation_name(ccollid), - get_collation_name(childatt->attcollation)))); + get_collation_name(childatt->attcollation)))); /* If it's OID, child column must actually be OID */ if (isOid && childatt->attnum != ObjectIdAttributeNumber) @@ -4265,7 +4273,7 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, if (relkind == RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("default values on foreign tables are not supported"))); + errmsg("default values on foreign tables are not supported"))); rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault)); rawEnt->attnum = attribute.attnum; @@ -5170,10 +5178,11 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, elog(ERROR, "index \"%s\" is not unique", indexName); /* - * Determine name to assign to constraint. We require a constraint to + * Determine name to assign to constraint. We require a constraint to * have the same name as the underlying index; therefore, use the index's - * existing name as the default constraint name, and if the user explicitly - * gives some other name for the constraint, rename the index to match. + * existing name as the default constraint name, and if the user + * explicitly gives some other name for the constraint, rename the index + * to match. */ constraintName = stmt->idxname; if (constraintName == NULL) @@ -5216,7 +5225,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, */ static void ATExecAddConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, - Constraint *newConstraint, bool recurse, LOCKMODE lockmode) + Constraint *newConstraint, bool recurse, LOCKMODE lockmode) { Assert(IsA(newConstraint, Constraint)); @@ -5337,9 +5346,9 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, /* * If the constraint got merged with an existing constraint, we're done. - * We mustn't recurse to child tables in this case, because they've already - * got the constraint, and visiting them again would lead to an incorrect - * value for coninhcount. + * We mustn't recurse to child tables in this case, because they've + * already got the constraint, and visiting them again would lead to an + * incorrect value for coninhcount. */ if (newcons == NIL) return; @@ -5655,8 +5664,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, /* * Tell Phase 3 to check that the constraint is satisfied by existing rows - * We can skip this during table creation or if requested explicitly - * by specifying NOT VALID on an alter table statement. + * We can skip this during table creation or if requested explicitly by + * specifying NOT VALID on an alter table statement. */ if (!fkconstraint->skip_validation) { @@ -5718,8 +5727,8 @@ ATExecValidateConstraint(Relation rel, const char *constrName) if (!found) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("foreign key constraint \"%s\" of relation \"%s\" does not exist", - constrName, RelationGetRelationName(rel)))); + errmsg("foreign key constraint \"%s\" of relation \"%s\" does not exist", + constrName, RelationGetRelationName(rel)))); if (!con->convalidated) { @@ -5729,17 +5738,16 @@ ATExecValidateConstraint(Relation rel, const char *constrName) Relation refrel; /* - * Triggers are already in place on both tables, so a - * concurrent write that alters the result here is not - * possible. Normally we can run a query here to do the - * validation, which would only require AccessShareLock. - * In some cases, it is possible that we might need to - * fire triggers to perform the check, so we take a lock - * at RowShareLock level just in case. + * Triggers are already in place on both tables, so a concurrent write + * that alters the result here is not possible. Normally we can run a + * query here to do the validation, which would only require + * AccessShareLock. In some cases, it is possible that we might need + * to fire triggers to perform the check, so we take a lock at + * RowShareLock level just in case. */ refrel = heap_open(con->confrelid, RowShareLock); - validateForeignKeyConstraint((char *)constrName, rel, refrel, + validateForeignKeyConstraint((char *) constrName, rel, refrel, con->conindid, conid); @@ -6571,12 +6579,12 @@ ATPrepAlterColumnType(List **wqueue, if (tab->relkind == RELKIND_RELATION) { /* - * Set up an expression to transform the old data value to the new type. - * If a USING option was given, transform and use that expression, else - * just take the old value and try to coerce it. We do this first so that - * type incompatibility can be detected before we waste effort, and - * because we need the expression to be parsed against the original table - * rowtype. + * Set up an expression to transform the old data value to the new + * type. If a USING option was given, transform and use that + * expression, else just take the old value and try to coerce it. We + * do this first so that type incompatibility can be detected before + * we waste effort, and because we need the expression to be parsed + * against the original table rowtype. */ if (transform) { @@ -6596,13 +6604,13 @@ ATPrepAlterColumnType(List **wqueue, if (expression_returns_set(transform)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("transform expression must not return a set"))); + errmsg("transform expression must not return a set"))); /* No subplans or aggregates, either... */ if (pstate->p_hasSubLinks) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in transform expression"))); + errmsg("cannot use subquery in transform expression"))); if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), @@ -6615,7 +6623,7 @@ ATPrepAlterColumnType(List **wqueue, else { transform = (Node *) makeVar(1, attnum, - attTup->atttypid, attTup->atttypmod, attTup->attcollation, + attTup->atttypid, attTup->atttypmod, attTup->attcollation, 0); } @@ -6649,14 +6657,14 @@ ATPrepAlterColumnType(List **wqueue, else if (transform) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("ALTER TYPE USING is only supported on plain tables"))); + errmsg("ALTER TYPE USING is only supported on plain tables"))); if (tab->relkind == RELKIND_COMPOSITE_TYPE || tab->relkind == RELKIND_FOREIGN_TABLE) { /* - * For composite types, do this check now. Tables will check - * it later when the table is being rewritten. + * For composite types, do this check now. Tables will check it later + * when the table is being rewritten. */ find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL); } @@ -6699,7 +6707,7 @@ ATColumnChangeRequiresRewrite(Node *expr, AttrNumber varattno) for (;;) { /* only one varno, so no need to check that */ - if (IsA(expr, Var) && ((Var *) expr)->varattno == varattno) + if (IsA(expr, Var) &&((Var *) expr)->varattno == varattno) return false; else if (IsA(expr, RelabelType)) expr = (Node *) ((RelabelType *) expr)->arg; @@ -6924,13 +6932,14 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, break; case OCLASS_TRIGGER: + /* * A trigger can depend on a column because the column is * specified as an update target, or because the column is * used in the trigger's WHEN condition. The first case would * not require any extra work, but the second case would * require updating the WHEN expression, which will take a - * significant amount of new code. Since we can't easily tell + * significant amount of new code. Since we can't easily tell * which case applies, we punt for both. FIXME someday. */ ereport(ERROR, @@ -7940,7 +7949,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, */ static void ATExecEnableDisableTrigger(Relation rel, char *trigname, - char fires_when, bool skip_system, LOCKMODE lockmode) + char fires_when, bool skip_system, LOCKMODE lockmode) { EnableDisableTrigger(rel, trigname, fires_when, skip_system); } @@ -8558,18 +8567,18 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode) static void ATExecGenericOptions(Relation rel, List *options) { - Relation ftrel; - ForeignServer *server; + Relation ftrel; + ForeignServer *server; ForeignDataWrapper *fdw; - HeapTuple tuple; - bool isnull; - Datum repl_val[Natts_pg_foreign_table]; - bool repl_null[Natts_pg_foreign_table]; - bool repl_repl[Natts_pg_foreign_table]; - Datum datum; - Form_pg_foreign_table tableform; - - if (options == NIL) + HeapTuple tuple; + bool isnull; + Datum repl_val[Natts_pg_foreign_table]; + bool repl_null[Natts_pg_foreign_table]; + bool repl_repl[Natts_pg_foreign_table]; + Datum datum; + Form_pg_foreign_table tableform; + + if (options == NIL) return; ftrel = heap_open(ForeignTableRelationId, RowExclusiveLock); @@ -8579,7 +8588,7 @@ ATExecGenericOptions(Relation rel, List *options) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("foreign table \"%s\" does not exist", - RelationGetRelationName(rel)))); + RelationGetRelationName(rel)))); tableform = (Form_pg_foreign_table) GETSTRUCT(tuple); server = GetForeignServer(tableform->ftserver); fdw = GetForeignDataWrapper(server->fdwid); @@ -8718,8 +8727,8 @@ AlterTableNamespace(RangeVar *relation, const char *newschema, default: ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, view, sequence, or foreign table", - RelationGetRelationName(rel)))); + errmsg("\"%s\" is not a table, view, sequence, or foreign table", + RelationGetRelationName(rel)))); } /* get schema OID and check its permissions */ @@ -8836,7 +8845,7 @@ AlterIndexNamespaces(Relation classRel, Relation rel, */ static void AlterSeqNamespaces(Relation classRel, Relation rel, - Oid oldNspOid, Oid newNspOid, const char *newNspName, LOCKMODE lockmode) + Oid oldNspOid, Oid newNspOid, const char *newNspName, LOCKMODE lockmode) { Relation depRel; SysScanDesc scan; diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 42a704beb1..3024dc4b64 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -559,7 +559,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) (errcode(ERRCODE_UNDEFINED_FILE), errmsg("directory \"%s\" does not exist", location), InRecovery ? errhint("Create this directory for the tablespace before " - "restarting the server."): 0)); + "restarting the server.") : 0)); else ereport(ERROR, (errcode_for_file_access(), @@ -573,8 +573,8 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) /* * Our theory for replaying a CREATE is to forcibly drop the target - * subdirectory if present, and then recreate it. This may be - * more work than needed, but it is simple to implement. + * subdirectory if present, and then recreate it. This may be more + * work than needed, but it is simple to implement. */ if (stat(location_with_version_dir, &st) == 0 && S_ISDIR(st.st_mode)) { @@ -1353,10 +1353,10 @@ get_tablespace_oid(const char *tablespacename, bool missing_ok) heap_close(rel, AccessShareLock); if (!OidIsValid(result) && !missing_ok) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace \"%s\" does not exist", - tablespacename))); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("tablespace \"%s\" does not exist", + tablespacename))); return result; } diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 329d4d95f1..6b1ade8990 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -144,11 +144,11 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, referenced; /* - * ShareRowExclusiveLock is sufficient to prevent concurrent write activity - * to the relation, and thus to lock out any operations that might want to - * fire triggers on the relation. If we had ON SELECT triggers we would - * need to take an AccessExclusiveLock to add one of those, just as we do - * with ON SELECT rules. + * ShareRowExclusiveLock is sufficient to prevent concurrent write + * activity to the relation, and thus to lock out any operations that + * might want to fire triggers on the relation. If we had ON SELECT + * triggers we would need to take an AccessExclusiveLock to add one of + * those, just as we do with ON SELECT rules. */ rel = heap_openrv(stmt->relation, ShareRowExclusiveLock); @@ -244,7 +244,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, if (stmt->whenClause) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("INSTEAD OF triggers cannot have WHEN conditions"))); + errmsg("INSTEAD OF triggers cannot have WHEN conditions"))); if (stmt->columns != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -480,8 +480,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * can skip this for internally generated triggers, since the name * modification above should be sufficient. * - * NOTE that this is cool only because we have ShareRowExclusiveLock on the - * relation, so the trigger set won't be changing underneath us. + * NOTE that this is cool only because we have ShareRowExclusiveLock on + * the relation, so the trigger set won't be changing underneath us. */ if (!isInternal) { @@ -1036,8 +1036,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior, if (!OidIsValid(object.objectId)) { ereport(NOTICE, - (errmsg("trigger \"%s\" for table \"%s\" does not exist, skipping", - trigname, get_rel_name(relid)))); + (errmsg("trigger \"%s\" for table \"%s\" does not exist, skipping", + trigname, get_rel_name(relid)))); return; } @@ -1083,9 +1083,9 @@ RemoveTriggerById(Oid trigOid) /* * Open and lock the relation the trigger belongs to. As in - * CreateTrigger, this is sufficient to lock out all operations that - * could fire or add triggers; but it would need to be revisited if - * we had ON SELECT triggers. + * CreateTrigger, this is sufficient to lock out all operations that could + * fire or add triggers; but it would need to be revisited if we had ON + * SELECT triggers. */ relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid; @@ -1960,7 +1960,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2035,7 +2035,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2378,7 +2378,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2461,7 +2461,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2891,7 +2891,7 @@ typedef struct AfterTriggerEventDataOneCtid { TriggerFlags ate_flags; /* status bits and offset to shared data */ ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */ -} AfterTriggerEventDataOneCtid; +} AfterTriggerEventDataOneCtid; #define SizeofTriggerEvent(evt) \ (((evt)->ate_flags & AFTER_TRIGGER_2CTIDS) ? \ diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c index 81f129dff6..80a30e180d 100644 --- a/src/backend/commands/tsearchcmds.c +++ b/src/backend/commands/tsearchcmds.c @@ -407,7 +407,8 @@ RenameTSParser(List *oldname, const char *newname) void AlterTSParserNamespace(List *name, const char *newschema) { - Oid prsId, nspOid; + Oid prsId, + nspOid; Relation rel; rel = heap_open(TSParserRelationId, RowExclusiveLock); @@ -429,7 +430,7 @@ AlterTSParserNamespace(List *name, const char *newschema) Oid AlterTSParserNamespace_oid(Oid prsId, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; rel = heap_open(TSParserRelationId, RowExclusiveLock); @@ -685,7 +686,8 @@ RenameTSDictionary(List *oldname, const char *newname) void AlterTSDictionaryNamespace(List *name, const char *newschema) { - Oid dictId, nspOid; + Oid dictId, + nspOid; Relation rel; rel = heap_open(TSDictionaryRelationId, RowExclusiveLock); @@ -708,7 +710,7 @@ AlterTSDictionaryNamespace(List *name, const char *newschema) Oid AlterTSDictionaryNamespace_oid(Oid dictId, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; rel = heap_open(TSDictionaryRelationId, RowExclusiveLock); @@ -1218,7 +1220,8 @@ RenameTSTemplate(List *oldname, const char *newname) void AlterTSTemplateNamespace(List *name, const char *newschema) { - Oid tmplId, nspOid; + Oid tmplId, + nspOid; Relation rel; rel = heap_open(TSTemplateRelationId, RowExclusiveLock); @@ -1240,7 +1243,7 @@ AlterTSTemplateNamespace(List *name, const char *newschema) Oid AlterTSTemplateNamespace_oid(Oid tmplId, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; rel = heap_open(TSTemplateRelationId, RowExclusiveLock); @@ -1668,7 +1671,8 @@ RenameTSConfiguration(List *oldname, const char *newname) void AlterTSConfigurationNamespace(List *name, const char *newschema) { - Oid cfgId, nspOid; + Oid cfgId, + nspOid; Relation rel; rel = heap_open(TSConfigRelationId, RowExclusiveLock); @@ -1691,7 +1695,7 @@ AlterTSConfigurationNamespace(List *name, const char *newschema) Oid AlterTSConfigurationNamespace_oid(Oid cfgId, Oid newNspOid) { - Oid oldNspOid; + Oid oldNspOid; Relation rel; rel = heap_open(TSConfigRelationId, RowExclusiveLock); diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 4c06d898a8..1a20b0d91b 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -138,7 +138,7 @@ DefineType(List *names, List *parameters) DefElem *byValueEl = NULL; DefElem *alignmentEl = NULL; DefElem *storageEl = NULL; - DefElem *collatableEl = NULL; + DefElem *collatableEl = NULL; Oid inputOid; Oid outputOid; Oid receiveOid = InvalidOid; @@ -537,7 +537,7 @@ DefineType(List *names, List *parameters) * now have TypeCreate do all the real work. * * Note: the pg_type.oid is stored in user tables as array elements (base - * types) in ArrayType and in composite types in DatumTupleFields. This + * types) in ArrayType and in composite types in DatumTupleFields. This * oid must be preserved by binary upgrades. */ typoid = @@ -1179,7 +1179,7 @@ DefineEnum(CreateEnumStmt *stmt) -1, /* typMod (Domains only) */ 0, /* Array dimensions of typbasetype */ false, /* Type NOT NULL */ - InvalidOid); /* typcollation */ + InvalidOid); /* typcollation */ /* Enter the enum's values into pg_enum */ EnumValuesCreate(enumTypeOid, stmt->vals); @@ -2416,7 +2416,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, CONSTRAINT_CHECK, /* Constraint Type */ false, /* Is Deferrable */ false, /* Is Deferred */ - true, /* Is Validated */ + true, /* Is Validated */ InvalidOid, /* not a relation constraint */ NULL, 0, diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index f13eb2891e..9c9164d3bc 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -84,7 +84,7 @@ CreateRole(CreateRoleStmt *stmt) bool createrole = false; /* Can this user create roles? */ bool createdb = false; /* Can the user create databases? */ bool canlogin = false; /* Can this user login? */ - bool isreplication = false; /* Is this a replication role? */ + bool isreplication = false; /* Is this a replication role? */ int connlimit = -1; /* maximum connections allowed */ List *addroleto = NIL; /* roles to make this a member of */ List *rolemembers = NIL; /* roles to be members of this role */ @@ -98,7 +98,7 @@ CreateRole(CreateRoleStmt *stmt) DefElem *dcreaterole = NULL; DefElem *dcreatedb = NULL; DefElem *dcanlogin = NULL; - DefElem *disreplication = NULL; + DefElem *disreplication = NULL; DefElem *dconnlimit = NULL; DefElem *daddroleto = NULL; DefElem *drolemembers = NULL; @@ -240,9 +240,10 @@ CreateRole(CreateRoleStmt *stmt) if (dissuper) { issuper = intVal(dissuper->arg) != 0; + /* - * Superusers get replication by default, but only if - * NOREPLICATION wasn't explicitly mentioned + * Superusers get replication by default, but only if NOREPLICATION + * wasn't explicitly mentioned */ if (!(disreplication && intVal(disreplication->arg) == 0)) isreplication = 1; @@ -287,7 +288,7 @@ CreateRole(CreateRoleStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to create replication users"))); + errmsg("must be superuser to create replication users"))); } else { @@ -384,8 +385,8 @@ CreateRole(CreateRoleStmt *stmt) tuple = heap_form_tuple(pg_authid_dsc, new_record, new_record_nulls); /* - * pg_largeobject_metadata contains pg_authid.oid's, so we - * use the binary-upgrade override, if specified. + * pg_largeobject_metadata contains pg_authid.oid's, so we use the + * binary-upgrade override, if specified. */ if (OidIsValid(binary_upgrade_next_pg_authid_oid)) { @@ -467,7 +468,7 @@ AlterRole(AlterRoleStmt *stmt) int createrole = -1; /* Can this user create roles? */ int createdb = -1; /* Can the user create databases? */ int canlogin = -1; /* Can this user login? */ - int isreplication = -1; /* Is this a replication role? */ + int isreplication = -1; /* Is this a replication role? */ int connlimit = -1; /* maximum connections allowed */ List *rolemembers = NIL; /* roles to be added/removed */ char *validUntil = NULL; /* time the login is valid until */ @@ -479,7 +480,7 @@ AlterRole(AlterRoleStmt *stmt) DefElem *dcreaterole = NULL; DefElem *dcreatedb = NULL; DefElem *dcanlogin = NULL; - DefElem *disreplication = NULL; + DefElem *disreplication = NULL; DefElem *dconnlimit = NULL; DefElem *drolemembers = NULL; DefElem *dvalidUntil = NULL; diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 1651aa94dc..90c413a988 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -527,7 +527,7 @@ vac_update_relstats(Relation relation, /* * If we have discovered that there are no indexes, then there's no - * primary key either. This could be done more thoroughly... + * primary key either. This could be done more thoroughly... */ if (pgcform->relhaspkey && !hasindex) { @@ -839,8 +839,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound, * There's a race condition here: the rel may have gone away since the * last time we saw it. If so, we don't need to vacuum it. * - * If we've been asked not to wait for the relation lock, acquire it - * first in non-blocking mode, before calling try_relation_open(). + * If we've been asked not to wait for the relation lock, acquire it first + * in non-blocking mode, before calling try_relation_open(). */ if (!(vacstmt->options & VACOPT_NOWAIT)) onerel = try_relation_open(relid, lmode); @@ -852,8 +852,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound, if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0) ereport(LOG, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), - errmsg("skipping vacuum of \"%s\" --- lock not available", - vacstmt->relation->relname))); + errmsg("skipping vacuum of \"%s\" --- lock not available", + vacstmt->relation->relname))); } if (!onerel) diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index a5c024cc19..9393fa0727 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -705,15 +705,16 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, PageSetAllVisible(page); SetBufferCommitInfoNeedsSave(buf); } + /* * It's possible for the value returned by GetOldestXmin() to move * backwards, so it's not wrong for us to see tuples that appear to * not be visible to everyone yet, while PD_ALL_VISIBLE is already * set. The real safe xmin value never moves backwards, but * GetOldestXmin() is conservative and sometimes returns a value - * that's unnecessarily small, so if we see that contradiction it - * just means that the tuples that we think are not visible to - * everyone yet actually are, and the PD_ALL_VISIBLE flag is correct. + * that's unnecessarily small, so if we see that contradiction it just + * means that the tuples that we think are not visible to everyone yet + * actually are, and the PD_ALL_VISIBLE flag is correct. * * There should never be dead tuples on a page with PD_ALL_VISIBLE * set, however. diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 2cec713089..5d0fbdfb40 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -132,8 +132,8 @@ check_datestyle(char **newval, void **extra, GucSource source) * We can't simply "return check_datestyle(...)" because we need * to handle constructs like "DEFAULT, ISO". */ - char *subval; - void *subextra = NULL; + char *subval; + void *subextra = NULL; subval = strdup(GetConfigOptionResetString("datestyle")); if (!subval) @@ -262,9 +262,9 @@ check_timezone(char **newval, void **extra, GucSource source) { /* * The boot_val given for TimeZone in guc.c is NULL. When we see this - * we just do nothing. If this isn't overridden from the config file + * we just do nothing. If this isn't overridden from the config file * then pg_timezone_initialize() will eventually select a default - * value from the environment. This hack has two purposes: to avoid + * value from the environment. This hack has two purposes: to avoid * wasting cycles loading values that might soon be overridden from * the config file, and to avoid trying to read the timezone files * during InitializeGUCOptions(). The latter doesn't work in an @@ -289,7 +289,7 @@ check_timezone(char **newval, void **extra, GucSource source) if (pg_strncasecmp(*newval, "interval", 8) == 0) { /* - * Support INTERVAL 'foo'. This is for SQL spec compliance, not + * Support INTERVAL 'foo'. This is for SQL spec compliance, not * because it has any actual real-world usefulness. */ const char *valueptr = *newval; @@ -391,13 +391,13 @@ check_timezone(char **newval, void **extra, GucSource source) * * Note: the result string should be something that we'd accept as input. * We use the numeric format for interval cases, because it's simpler to - * reload. In the named-timezone case, *newval is already OK and need not + * reload. In the named-timezone case, *newval is already OK and need not * be changed; it might not have the canonical casing, but that's taken * care of by show_timezone. */ if (myextra.HasCTZSet) { - char *result = (char *) malloc(64); + char *result = (char *) malloc(64); if (!result) return false; @@ -567,7 +567,7 @@ show_log_timezone(void) * We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and * we also always allow changes from read-write to read-only. However, * read-only may be changed to read-write only when in a top-level transaction - * that has not yet taken an initial snapshot. Can't do it in a hot standby + * that has not yet taken an initial snapshot. Can't do it in a hot standby * slave, either. */ bool @@ -719,7 +719,7 @@ check_transaction_deferrable(bool *newval, void **extra, GucSource source) * * We can't roll back the random sequence on error, and we don't want * config file reloads to affect it, so we only want interactive SET SEED - * commands to set it. We use the "extra" storage to ensure that rollbacks + * commands to set it. We use the "extra" storage to ensure that rollbacks * don't try to do the operation again. */ @@ -851,8 +851,8 @@ check_session_authorization(char **newval, void **extra, GucSource source) { /* * Can't do catalog lookups, so fail. The result of this is that - * session_authorization cannot be set in postgresql.conf, which - * seems like a good thing anyway, so we don't work hard to avoid it. + * session_authorization cannot be set in postgresql.conf, which seems + * like a good thing anyway, so we don't work hard to avoid it. */ return false; } @@ -977,7 +977,7 @@ const char * show_role(void) { /* - * Check whether SET ROLE is active; if not return "none". This is a + * Check whether SET ROLE is active; if not return "none". This is a * kluge to deal with the fact that SET SESSION AUTHORIZATION logically * resets SET ROLE to NONE, but we cannot set the GUC role variable from * assign_session_authorization (because we haven't got enough info to diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index 508fb23c9a..be681e3fd4 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -120,7 +120,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace) def->colname = pstrdup(tle->resname); def->typeName = makeTypeNameFromOid(exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr)); + exprTypmod((Node *) tle->expr)); def->inhcount = 0; def->is_local = true; def->is_not_null = false; @@ -130,6 +130,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace) def->cooked_default = NULL; def->collClause = NULL; def->collOid = exprCollation((Node *) tle->expr); + /* * It's possible that the column is of a collatable type but the * collation could not be resolved, so double-check. @@ -240,7 +241,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace) } else { - Oid relid; + Oid relid; /* * now set the parameters for keys/inheritance etc. All of these are @@ -437,8 +438,8 @@ DefineView(ViewStmt *stmt, const char *queryString) /* * Check for unsupported cases. These tests are redundant with ones in - * DefineQueryRewrite(), but that function will complain about a bogus - * ON SELECT rule, and we'd rather the message complain about a view. + * DefineQueryRewrite(), but that function will complain about a bogus ON + * SELECT rule, and we'd rather the message complain about a view. */ if (viewParse->intoClause != NULL) ereport(ERROR, @@ -447,7 +448,7 @@ DefineView(ViewStmt *stmt, const char *queryString) if (viewParse->hasModifyingCTE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("views must not contain data-modifying statements in WITH"))); + errmsg("views must not contain data-modifying statements in WITH"))); /* * If a list of column names was given, run through and insert these into @@ -500,7 +501,7 @@ DefineView(ViewStmt *stmt, const char *queryString) if (view->relpersistence == RELPERSISTENCE_UNLOGGED) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("views cannot be unlogged because they do not have storage"))); + errmsg("views cannot be unlogged because they do not have storage"))); /* * Create the view relation diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index caa9faea87..86ec987019 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -19,7 +19,7 @@ * ExecutorRun accepts direction and count arguments that specify whether * the plan is to be executed forwards, backwards, and for how many tuples. * In some cases ExecutorRun may be called multiple times to process all - * the tuples for a plan. It is also acceptable to stop short of executing + * the tuples for a plan. It is also acceptable to stop short of executing * the whole plan (but only if it is a SELECT). * * ExecutorFinish must be called after the final ExecutorRun call and @@ -168,6 +168,7 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags) switch (queryDesc->operation) { case CMD_SELECT: + /* * SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to * mark tuples @@ -332,12 +333,12 @@ standard_ExecutorRun(QueryDesc *queryDesc, * ExecutorFinish * * This routine must be called after the last ExecutorRun call. - * It performs cleanup such as firing AFTER triggers. It is + * It performs cleanup such as firing AFTER triggers. It is * separate from ExecutorEnd because EXPLAIN ANALYZE needs to * include these actions in the total runtime. * * We provide a function hook variable that lets loadable plugins - * get control when ExecutorFinish is called. Such a plugin would + * get control when ExecutorFinish is called. Such a plugin would * normally call standard_ExecutorFinish(). * * ---------------------------------------------------------------- @@ -425,9 +426,9 @@ standard_ExecutorEnd(QueryDesc *queryDesc) Assert(estate != NULL); /* - * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. - * This Assert is needed because ExecutorFinish is new as of 9.1, and - * callers might forget to call it. + * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This + * Assert is needed because ExecutorFinish is new as of 9.1, and callers + * might forget to call it. */ Assert(estate->es_finished || (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY)); @@ -519,7 +520,7 @@ ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) foreach(l, rangeTable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); + RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); result = ExecCheckRTEPerms(rte); if (!result) @@ -533,8 +534,8 @@ ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) } if (ExecutorCheckPerms_hook) - result = (*ExecutorCheckPerms_hook)(rangeTable, - ereport_on_violation); + result = (*ExecutorCheckPerms_hook) (rangeTable, + ereport_on_violation); return result; } @@ -980,7 +981,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) void CheckValidResultRel(Relation resultRel, CmdType operation) { - TriggerDesc *trigDesc = resultRel->trigdesc; + TriggerDesc *trigDesc = resultRel->trigdesc; switch (resultRel->rd_rel->relkind) { @@ -1005,26 +1006,26 @@ CheckValidResultRel(Relation resultRel, CmdType operation) case CMD_INSERT: if (!trigDesc || !trigDesc->trig_insert_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot insert into view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot insert into view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger."))); break; case CMD_UPDATE: if (!trigDesc || !trigDesc->trig_update_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot update view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot update view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger."))); break; case CMD_DELETE: if (!trigDesc || !trigDesc->trig_delete_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot delete from view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot delete from view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger."))); break; default: elog(ERROR, "unrecognized CmdType: %d", (int) operation); @@ -1137,8 +1138,8 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) /* * Open the target relation's relcache entry. We assume that an * appropriate lock is still held by the backend from whenever the trigger - * event got queued, so we need take no new lock here. Also, we need - * not recheck the relkind, so no need for CheckValidResultRel. + * event got queued, so we need take no new lock here. Also, we need not + * recheck the relkind, so no need for CheckValidResultRel. */ rel = heap_open(relid, NoLock); @@ -1238,12 +1239,12 @@ ExecPostprocessPlan(EState *estate) /* * Run any secondary ModifyTable nodes to completion, in case the main - * query did not fetch all rows from them. (We do this to ensure that + * query did not fetch all rows from them. (We do this to ensure that * such nodes have predictable results.) */ foreach(lc, estate->es_auxmodifytables) { - PlanState *ps = (PlanState *) lfirst(lc); + PlanState *ps = (PlanState *) lfirst(lc); for (;;) { @@ -2220,9 +2221,9 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) * ExecInitSubPlan expects to be able to find these entries. Some of the * SubPlans might not be used in the part of the plan tree we intend to * run, but since it's not easy to tell which, we just initialize them - * all. (However, if the subplan is headed by a ModifyTable node, then - * it must be a data-modifying CTE, which we will certainly not need to - * re-run, so we can skip initializing it. This is just an efficiency + * all. (However, if the subplan is headed by a ModifyTable node, then it + * must be a data-modifying CTE, which we will certainly not need to + * re-run, so we can skip initializing it. This is just an efficiency * hack; it won't skip data-modifying CTEs for which the ModifyTable node * is not at the top.) */ diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index c153ca00db..5f0b58f43b 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -79,9 +79,9 @@ static Datum ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext, static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalParamExec(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalParamExtern(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static void init_fcache(Oid foid, Oid input_collation, FuncExprState *fcache, MemoryContext fcacheCxt, bool needDescForSets); static void ShutdownFuncExpr(Datum arg); @@ -1043,7 +1043,7 @@ ExecEvalParamExtern(ExprState *exprstate, ExprContext *econtext, ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("no value found for parameter %d", thisParamId))); - return (Datum) 0; /* keep compiler quiet */ + return (Datum) 0; /* keep compiler quiet */ } diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 7e84ccdd9c..0cbbe04d3b 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -1319,9 +1319,9 @@ retry: /* * Ordinarily, at this point the search should have found the originally * inserted tuple, unless we exited the loop early because of conflict. - * However, it is possible to define exclusion constraints for which - * that wouldn't be true --- for instance, if the operator is <>. - * So we no longer complain if found_self is still false. + * However, it is possible to define exclusion constraints for which that + * wouldn't be true --- for instance, if the operator is <>. So we no + * longer complain if found_self is still false. */ econtext->ecxt_scantuple = save_scantuple; diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 70d126c521..9c867bbae2 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -81,7 +81,7 @@ typedef struct char *fname; /* function name (for error msgs) */ char *src; /* function body text (for error msgs) */ - SQLFunctionParseInfoPtr pinfo; /* data for parser callback hooks */ + SQLFunctionParseInfoPtr pinfo; /* data for parser callback hooks */ Oid rettype; /* actual return type */ int16 typlen; /* length of the return type */ @@ -119,7 +119,7 @@ typedef struct SQLFunctionParseInfo Oid *argtypes; /* resolved types of input arguments */ int nargs; /* number of input arguments */ Oid collation; /* function's input collation, if known */ -} SQLFunctionParseInfo; +} SQLFunctionParseInfo; /* non-export function prototypes */ @@ -255,7 +255,7 @@ sql_fn_param_ref(ParseState *pstate, ParamRef *pref) * Set up the per-query execution_state records for a SQL function. * * The input is a List of Lists of parsed and rewritten, but not planned, - * querytrees. The sublist structure denotes the original query boundaries. + * querytrees. The sublist structure denotes the original query boundaries. */ static List * init_execution_state(List *queryTree_list, @@ -299,8 +299,8 @@ init_execution_state(List *queryTree_list, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), /* translator: %s is a SQL statement name */ - errmsg("%s is not allowed in a non-volatile function", - CreateCommandTag(stmt)))); + errmsg("%s is not allowed in a non-volatile function", + CreateCommandTag(stmt)))); /* OK, build the execution_state for this query */ newes = (execution_state *) palloc(sizeof(execution_state)); @@ -311,8 +311,8 @@ init_execution_state(List *queryTree_list, newes->next = NULL; newes->status = F_EXEC_START; - newes->setsResult = false; /* might change below */ - newes->lazyEval = false; /* might change below */ + newes->setsResult = false; /* might change below */ + newes->lazyEval = false; /* might change below */ newes->stmt = stmt; newes->qd = NULL; @@ -442,7 +442,7 @@ init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK) fcache->src = TextDatumGetCString(tmp); /* - * Parse and rewrite the queries in the function text. Use sublists to + * Parse and rewrite the queries in the function text. Use sublists to * keep track of the original query boundaries. But we also build a * "flat" list of the rewritten queries to pass to check_sql_fn_retval. * This is because the last canSetTag query determines the result type @@ -462,7 +462,7 @@ init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK) queryTree_sublist = pg_analyze_and_rewrite_params(parsetree, fcache->src, - (ParserSetupHook) sql_fn_parser_setup, + (ParserSetupHook) sql_fn_parser_setup, fcache->pinfo); queryTree_list = lappend(queryTree_list, queryTree_sublist); flat_query_list = list_concat(flat_query_list, @@ -657,7 +657,7 @@ postquel_sub_params(SQLFunctionCachePtr fcache, { /* sizeof(ParamListInfoData) includes the first array element */ paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) + - (nargs - 1) *sizeof(ParamExternData)); + (nargs - 1) * sizeof(ParamExternData)); /* we have static list of params, so no hooks needed */ paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; @@ -748,8 +748,8 @@ fmgr_sql(PG_FUNCTION_ARGS) execution_state *es; TupleTableSlot *slot; Datum result; - List *eslist; - ListCell *eslc; + List *eslist; + ListCell *eslc; /* * Switch to context in which the fcache lives. This ensures that @@ -847,10 +847,10 @@ fmgr_sql(PG_FUNCTION_ARGS) * * In a non-read-only function, we rely on the fact that we'll never * suspend execution between queries of the function: the only reason to - * suspend execution before completion is if we are returning a row from - * a lazily-evaluated SELECT. So, when first entering this loop, we'll + * suspend execution before completion is if we are returning a row from a + * lazily-evaluated SELECT. So, when first entering this loop, we'll * either start a new query (and push a fresh snapshot) or re-establish - * the active snapshot from the existing query descriptor. If we need to + * the active snapshot from the existing query descriptor. If we need to * start a new query in a subsequent execution of the loop, either we need * a fresh snapshot (and pushed_snapshot is false) or the existing * snapshot is on the active stack and we can just bump its command ID. @@ -927,10 +927,10 @@ fmgr_sql(PG_FUNCTION_ARGS) es = (execution_state *) lfirst(eslc); /* - * Flush the current snapshot so that we will take a new one - * for the new query list. This ensures that new snaps are - * taken at original-query boundaries, matching the behavior - * of interactive execution. + * Flush the current snapshot so that we will take a new one for + * the new query list. This ensures that new snaps are taken at + * original-query boundaries, matching the behavior of interactive + * execution. */ if (pushed_snapshot) { @@ -1183,7 +1183,7 @@ ShutdownSQLFunction(Datum arg) { SQLFunctionCachePtr fcache = (SQLFunctionCachePtr) DatumGetPointer(arg); execution_state *es; - ListCell *lc; + ListCell *lc; foreach(lc, fcache->func_state) { @@ -1415,7 +1415,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, * the function that's calling it. * * XXX Note that if rettype is RECORD, the IsBinaryCoercible check - * will succeed for any composite restype. For the moment we rely on + * will succeed for any composite restype. For the moment we rely on * runtime type checking to catch any discrepancy, but it'd be nice to * do better at parse time. */ @@ -1432,7 +1432,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, tle->expr = (Expr *) makeRelabelType(tle->expr, rettype, -1, - get_typcollation(rettype), + get_typcollation(rettype), COERCE_DONTCARE); /* Relabel is dangerous if sort/group or setop column */ if (tle->ressortgroupref != 0 || parse->setOperations) @@ -1536,7 +1536,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, tle->expr = (Expr *) makeRelabelType(tle->expr, atttype, -1, - get_typcollation(atttype), + get_typcollation(atttype), COERCE_DONTCARE); /* Relabel is dangerous if sort/group or setop column */ if (tle->ressortgroupref != 0 || parse->setOperations) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 51b1228c26..47555bab55 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -199,7 +199,7 @@ typedef struct AggStatePerAggData */ Tuplesortstate *sortstate; /* sort object, if DISTINCT or ORDER BY */ -} AggStatePerAggData; +} AggStatePerAggData; /* * AggStatePerGroupData - per-aggregate-per-group working state @@ -246,7 +246,7 @@ typedef struct AggHashEntryData TupleHashEntryData shared; /* common header for hash table entries */ /* per-aggregate transition status array - must be last! */ AggStatePerGroupData pergroup[1]; /* VARIABLE LENGTH ARRAY */ -} AggHashEntryData; /* VARIABLE LENGTH STRUCT */ +} AggHashEntryData; /* VARIABLE LENGTH STRUCT */ static void initialize_aggregates(AggState *aggstate, @@ -827,7 +827,7 @@ build_hash_table(AggState *aggstate) Assert(node->numGroups > 0); entrysize = sizeof(AggHashEntryData) + - (aggstate->numaggs - 1) *sizeof(AggStatePerGroupData); + (aggstate->numaggs - 1) * sizeof(AggStatePerGroupData); aggstate->hashtable = BuildTupleHashTable(node->numCols, node->grpColIdx, @@ -899,7 +899,7 @@ hash_agg_entry_size(int numAggs) /* This must match build_hash_table */ entrysize = sizeof(AggHashEntryData) + - (numAggs - 1) *sizeof(AggStatePerGroupData); + (numAggs - 1) * sizeof(AggStatePerGroupData); entrysize = MAXALIGN(entrysize); /* Account for hashtable overhead (assuming fill factor = 1) */ entrysize += 3 * sizeof(void *); diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index 90ff0403ab..4de54ea55f 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -307,8 +307,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags) indexstate->biss_NumScanKeys); /* - * If no run-time keys to calculate, go ahead and pass the scankeys to - * the index AM. + * If no run-time keys to calculate, go ahead and pass the scankeys to the + * index AM. */ if (indexstate->biss_NumRuntimeKeys == 0 && indexstate->biss_NumArrayKeys == 0) diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index c4309a981e..d50489c7f4 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -40,7 +40,7 @@ static TupleTableSlot * ForeignNext(ForeignScanState *node) { TupleTableSlot *slot; - ForeignScan *plan = (ForeignScan *) node->ss.ps.plan; + ForeignScan *plan = (ForeignScan *) node->ss.ps.plan; ExprContext *econtext = node->ss.ps.ps_ExprContext; MemoryContext oldcontext; diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 295563011f..1af98c81a6 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -960,13 +960,11 @@ void ExecPrepHashTableForUnmatched(HashJoinState *hjstate) { /* - *---------- - * During this scan we use the HashJoinState fields as follows: + * ---------- During this scan we use the HashJoinState fields as follows: * - * hj_CurBucketNo: next regular bucket to scan - * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums) - * hj_CurTuple: last tuple returned, or NULL to start next bucket - *---------- + * hj_CurBucketNo: next regular bucket to scan hj_CurSkewBucketNo: next + * skew bucket (an index into skewBucketNums) hj_CurTuple: last tuple + * returned, or NULL to start next bucket ---------- */ hjstate->hj_CurBucketNo = 0; hjstate->hj_CurSkewBucketNo = 0; @@ -1003,7 +1001,7 @@ ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext) } else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets) { - int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo]; + int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo]; hashTuple = hashtable->skewBucket[j]->tuples; hjstate->hj_CurSkewBucketNo++; @@ -1020,7 +1018,7 @@ ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext) /* insert hashtable's tuple into exec slot */ inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple), hjstate->hj_HashTupleSlot, - false); /* do not pfree */ + false); /* do not pfree */ econtext->ecxt_innertuple = inntuple; /* @@ -1091,7 +1089,7 @@ ExecHashTableResetMatchFlags(HashJoinTable hashtable) /* ... and the same for the skew buckets, if any */ for (i = 0; i < hashtable->nSkewBuckets; i++) { - int j = hashtable->skewBucketNums[i]; + int j = hashtable->skewBucketNums[i]; HashSkewBucket *skewBucket = hashtable->skewBucket[j]; for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next) diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index a6847c956f..7c02db94ad 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -113,6 +113,7 @@ ExecHashJoin(HashJoinState *node) switch (node->hj_JoinState) { case HJ_BUILD_HASHTABLE: + /* * First time through: build hash table for inner relation. */ @@ -123,12 +124,12 @@ ExecHashJoin(HashJoinState *node) * right/full join, we can quit without building the hash * table. However, for an inner join it is only a win to * check this when the outer relation's startup cost is less - * than the projected cost of building the hash - * table. Otherwise it's best to build the hash table first - * and see if the inner relation is empty. (When it's a left - * join, we should always make this check, since we aren't - * going to be able to skip the join on the strength of an - * empty inner relation anyway.) + * than the projected cost of building the hash table. + * Otherwise it's best to build the hash table first and see + * if the inner relation is empty. (When it's a left join, we + * should always make this check, since we aren't going to be + * able to skip the join on the strength of an empty inner + * relation anyway.) * * If we are rescanning the join, we make use of information * gained on the previous scan: don't bother to try the @@ -185,8 +186,8 @@ ExecHashJoin(HashJoinState *node) return NULL; /* - * need to remember whether nbatch has increased since we began - * scanning the outer relation + * need to remember whether nbatch has increased since we + * began scanning the outer relation */ hashtable->nbatch_outstart = hashtable->nbatch; @@ -202,6 +203,7 @@ ExecHashJoin(HashJoinState *node) /* FALL THRU */ case HJ_NEED_NEW_OUTER: + /* * We don't have an outer tuple, try to get the next one */ @@ -250,7 +252,7 @@ ExecHashJoin(HashJoinState *node) Assert(batchno > hashtable->curbatch); ExecHashJoinSaveTuple(ExecFetchSlotMinimalTuple(outerTupleSlot), hashvalue, - &hashtable->outerBatchFile[batchno]); + &hashtable->outerBatchFile[batchno]); /* Loop around, staying in HJ_NEED_NEW_OUTER state */ continue; } @@ -261,6 +263,7 @@ ExecHashJoin(HashJoinState *node) /* FALL THRU */ case HJ_SCAN_BUCKET: + /* * Scan the selected hash bucket for matches to current outer */ @@ -296,8 +299,8 @@ ExecHashJoin(HashJoinState *node) } /* - * In a semijoin, we'll consider returning the first match, - * but after that we're done with this outer tuple. + * In a semijoin, we'll consider returning the first + * match, but after that we're done with this outer tuple. */ if (node->js.jointype == JOIN_SEMI) node->hj_JoinState = HJ_NEED_NEW_OUTER; @@ -320,10 +323,11 @@ ExecHashJoin(HashJoinState *node) break; case HJ_FILL_OUTER_TUPLE: + /* * The current outer tuple has run out of matches, so check - * whether to emit a dummy outer-join tuple. Whether we - * emit one or not, the next state is NEED_NEW_OUTER. + * whether to emit a dummy outer-join tuple. Whether we emit + * one or not, the next state is NEED_NEW_OUTER. */ node->hj_JoinState = HJ_NEED_NEW_OUTER; @@ -354,6 +358,7 @@ ExecHashJoin(HashJoinState *node) break; case HJ_FILL_INNER_TUPLES: + /* * We have finished a batch, but we are doing right/full join, * so any unmatched inner tuples in the hashtable have to be @@ -389,11 +394,12 @@ ExecHashJoin(HashJoinState *node) break; case HJ_NEED_NEW_BATCH: + /* * Try to advance to next batch. Done if there are no more. */ if (!ExecHashJoinNewBatch(node)) - return NULL; /* end of join */ + return NULL; /* end of join */ node->hj_JoinState = HJ_NEED_NEW_OUTER; break; @@ -783,7 +789,7 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) } if (curbatch >= nbatch) - return false; /* no more batches */ + return false; /* no more batches */ hashtable->curbatch = curbatch; @@ -829,7 +835,7 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) if (BufFileSeek(hashtable->outerBatchFile[curbatch], 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); } return true; @@ -944,14 +950,13 @@ ExecReScanHashJoin(HashJoinState *node) ExecHashTableResetMatchFlags(node->hj_HashTable); /* - * Also, we need to reset our state about the emptiness of - * the outer relation, so that the new scan of the outer will - * update it correctly if it turns out to be empty this time. - * (There's no harm in clearing it now because ExecHashJoin won't - * need the info. In the other cases, where the hash table - * doesn't exist or we are destroying it, we leave this state - * alone because ExecHashJoin will need it the first time - * through.) + * Also, we need to reset our state about the emptiness of the + * outer relation, so that the new scan of the outer will update + * it correctly if it turns out to be empty this time. (There's no + * harm in clearing it now because ExecHashJoin won't need the + * info. In the other cases, where the hash table doesn't exist + * or we are destroying it, we leave this state alone because + * ExecHashJoin will need it the first time through.) */ node->hj_OuterNotEmpty = false; diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 3b8741fc21..d8e59ca39e 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -212,7 +212,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext, /* * For each run-time key, extract the run-time expression and evaluate - * it with respect to the current context. We then stick the result + * it with respect to the current context. We then stick the result * into the proper scan key. * * Note: the result of the eval could be a pass-by-ref value that's @@ -605,16 +605,16 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) indexstate->iss_RelationDesc, estate->es_snapshot, indexstate->iss_NumScanKeys, - indexstate->iss_NumOrderByKeys); + indexstate->iss_NumOrderByKeys); /* - * If no run-time keys to calculate, go ahead and pass the scankeys to - * the index AM. + * If no run-time keys to calculate, go ahead and pass the scankeys to the + * index AM. */ if (indexstate->iss_NumRuntimeKeys == 0) index_rescan(indexstate->iss_ScanDesc, indexstate->iss_ScanKeys, indexstate->iss_NumScanKeys, - indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys); + indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys); /* * all done. @@ -703,11 +703,11 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid, scan_keys = (ScanKey) palloc(n_scan_keys * sizeof(ScanKeyData)); /* - * runtime_keys array is dynamically resized as needed. We handle it - * this way so that the same runtime keys array can be shared between - * indexquals and indexorderbys, which will be processed in separate - * calls of this function. Caller must be sure to pass in NULL/0 for - * first call. + * runtime_keys array is dynamically resized as needed. We handle it this + * way so that the same runtime keys array can be shared between + * indexquals and indexorderbys, which will be processed in separate calls + * of this function. Caller must be sure to pass in NULL/0 for first + * call. */ runtime_keys = *runtimeKeys; n_runtime_keys = max_runtime_keys = *numRuntimeKeys; diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index edbe0558b7..85d1a6e27f 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -346,14 +346,14 @@ pass_down_bound(LimitState *node, PlanState *child_node) else if (IsA(child_node, ResultState)) { /* - * An extra consideration here is that if the Result is projecting - * a targetlist that contains any SRFs, we can't assume that every - * input tuple generates an output tuple, so a Sort underneath - * might need to return more than N tuples to satisfy LIMIT N. - * So we cannot use bounded sort. + * An extra consideration here is that if the Result is projecting a + * targetlist that contains any SRFs, we can't assume that every input + * tuple generates an output tuple, so a Sort underneath might need to + * return more than N tuples to satisfy LIMIT N. So we cannot use + * bounded sort. * - * If Result supported qual checking, we'd have to punt on seeing - * a qual, too. Note that having a resconstantqual is not a + * If Result supported qual checking, we'd have to punt on seeing a + * qual, too. Note that having a resconstantqual is not a * showstopper: if that fails we're not getting any rows at all. */ if (outerPlanState(child_node) && diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index 2e08008807..d71278ebd7 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -291,7 +291,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) /* * Locate the ExecRowMark(s) that this node is responsible for, and - * construct ExecAuxRowMarks for them. (InitPlan should already have + * construct ExecAuxRowMarks for them. (InitPlan should already have * built the global list of ExecRowMarks.) */ lrstate->lr_arowMarks = NIL; diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index 73920f21c8..4ebe0cbe03 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -48,8 +48,8 @@ * contains integers which index into the slots array. These typedefs try to * clear it up, but they're only documentation. */ -typedef int SlotNumber; -typedef int HeapPosition; +typedef int SlotNumber; +typedef int HeapPosition; static void heap_insert_slot(MergeAppendState *node, SlotNumber new_slot); static void heap_siftup_slot(MergeAppendState *node); @@ -128,13 +128,13 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) * initialize sort-key information */ mergestate->ms_nkeys = node->numCols; - mergestate->ms_scankeys = palloc0(sizeof(ScanKeyData) * node->numCols); + mergestate->ms_scankeys = palloc0(sizeof(ScanKeyData) * node->numCols); for (i = 0; i < node->numCols; i++) { - Oid sortFunction; - bool reverse; - int flags; + Oid sortFunction; + bool reverse; + int flags; if (!get_compare_function_for_ordering_op(node->sortOperators[i], &sortFunction, &reverse)) @@ -187,8 +187,8 @@ ExecMergeAppend(MergeAppendState *node) if (!node->ms_initialized) { /* - * First time through: pull the first tuple from each subplan, - * and set up the heap. + * First time through: pull the first tuple from each subplan, and set + * up the heap. */ for (i = 0; i < node->ms_nplans; i++) { @@ -243,7 +243,7 @@ heap_insert_slot(MergeAppendState *node, SlotNumber new_slot) j = node->ms_heap_size++; /* j is where the "hole" is */ while (j > 0) { - int i = (j-1)/2; + int i = (j - 1) / 2; if (heap_compare_slots(node, new_slot, node->ms_heap[i]) >= 0) break; @@ -269,11 +269,11 @@ heap_siftup_slot(MergeAppendState *node) i = 0; /* i is where the "hole" is */ for (;;) { - int j = 2 * i + 1; + int j = 2 * i + 1; if (j >= n) break; - if (j+1 < n && heap_compare_slots(node, heap[j], heap[j+1]) > 0) + if (j + 1 < n && heap_compare_slots(node, heap[j], heap[j + 1]) > 0) j++; if (heap_compare_slots(node, heap[n], heap[j]) <= 0) break; @@ -298,13 +298,13 @@ heap_compare_slots(MergeAppendState *node, SlotNumber slot1, SlotNumber slot2) for (nkey = 0; nkey < node->ms_nkeys; nkey++) { - ScanKey scankey = node->ms_scankeys + nkey; - AttrNumber attno = scankey->sk_attno; - Datum datum1, - datum2; - bool isNull1, - isNull2; - int32 compare; + ScanKey scankey = node->ms_scankeys + nkey; + AttrNumber attno = scankey->sk_attno; + Datum datum1, + datum2; + bool isNull1, + isNull2; + int32 compare; datum1 = slot_getattr(s1, attno, &isNull1); datum2 = slot_getattr(s2, attno, &isNull2); diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 75c3a64535..ce5462e961 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -143,7 +143,7 @@ typedef struct MergeJoinClauseData bool reverse; /* if true, negate the cmpfn's output */ bool nulls_first; /* if true, nulls sort low */ FmgrInfo cmpfinfo; -} MergeJoinClauseData; +} MergeJoinClauseData; /* Result type for MJEvalOuterValues and MJEvalInnerValues */ typedef enum diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index f10f70a17d..c0eab4bf0d 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -544,7 +544,7 @@ ExecUpdate(ItemPointer tupleid, * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck constraints. (We don't need to - * redo triggers, however. If there are any BEFORE triggers then + * redo triggers, however. If there are any BEFORE triggers then * trigger.c will have done heap_lock_tuple to lock the correct tuple, * so there's no need to do them again.) */ @@ -608,11 +608,10 @@ lreplace:; /* * Note: instead of having to update the old index tuples associated - * with the heap tuple, all we do is form and insert new index - * tuples. This is because UPDATEs are actually DELETEs and INSERTs, - * and index tuple deletion is done later by VACUUM (see notes in - * ExecDelete). All we do here is insert new index tuples. -cim - * 9/27/89 + * with the heap tuple, all we do is form and insert new index tuples. + * This is because UPDATEs are actually DELETEs and INSERTs, and index + * tuple deletion is done later by VACUUM (see notes in ExecDelete). + * All we do here is insert new index tuples. -cim 9/27/89 */ /* @@ -713,7 +712,7 @@ ExecModifyTable(ModifyTableState *node) TupleTableSlot *planSlot; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; - HeapTupleHeader oldtuple = NULL; + HeapTupleHeader oldtuple = NULL; /* * If we've already completed processing, don't try to do more. We need @@ -740,7 +739,7 @@ ExecModifyTable(ModifyTableState *node) /* * es_result_relation_info must point to the currently active result - * relation while we are within this ModifyTable node. Even though + * relation while we are within this ModifyTable node. Even though * ModifyTable nodes can't be nested statically, they can be nested * dynamically (since our subplan could include a reference to a modifying * CTE). So we have to save and restore the caller's value. @@ -756,7 +755,7 @@ ExecModifyTable(ModifyTableState *node) for (;;) { /* - * Reset the per-output-tuple exprcontext. This is needed because + * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly * to do this below the top level of the plan, however. We might need * to rethink this later. @@ -806,7 +805,8 @@ ExecModifyTable(ModifyTableState *node) elog(ERROR, "ctid is NULL"); tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* be sure we don't free ctid!! */ + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ tupleid = &tuple_ctid; } else @@ -836,11 +836,11 @@ ExecModifyTable(ModifyTableState *node) break; case CMD_UPDATE: slot = ExecUpdate(tupleid, oldtuple, slot, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + &node->mt_epqstate, estate, node->canSetTag); break; case CMD_DELETE: slot = ExecDelete(tupleid, oldtuple, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + &node->mt_epqstate, estate, node->canSetTag); break; default: elog(ERROR, "unknown operation"); @@ -922,9 +922,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * call ExecInitNode on each of the plans to be executed and save the - * results into the array "mt_plans". This is also a convenient place - * to verify that the proposed target relations are valid and open their - * indexes for insertion of new index entries. Note we *must* set + * results into the array "mt_plans". This is also a convenient place to + * verify that the proposed target relations are valid and open their + * indexes for insertion of new index entries. Note we *must* set * estate->es_result_relation_info correctly while we initialize each * sub-plan; ExecContextForcesOids depends on that! */ @@ -944,7 +944,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * If there are indices on the result relation, open them and save * descriptors in the result relation info, so that we can add new - * index entries for the tuples we add/update. We need not do this + * index entries for the tuples we add/update. We need not do this * for a DELETE, however, since deletion doesn't affect indexes. */ if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex && @@ -1147,10 +1147,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it * to estate->es_auxmodifytables so that it will be run to completion by * ExecPostprocessPlan. (It'd actually work fine to add the primary - * ModifyTable node too, but there's no need.) Note the use of lcons - * not lappend: we need later-initialized ModifyTable nodes to be shut - * down before earlier ones. This ensures that we don't throw away - * RETURNING rows that need to be seen by a later CTE subplan. + * ModifyTable node too, but there's no need.) Note the use of lcons not + * lappend: we need later-initialized ModifyTable nodes to be shut down + * before earlier ones. This ensures that we don't throw away RETURNING + * rows that need to be seen by a later CTE subplan. */ if (!mtstate->canSetTag) estate->es_auxmodifytables = lcons(mtstate, diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index 4893a6ea6d..e98bc0f5a3 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -137,9 +137,8 @@ ExecNestLoop(NestLoopState *node) node->nl_MatchedOuter = false; /* - * fetch the values of any outer Vars that must be passed to - * the inner scan, and store them in the appropriate PARAM_EXEC - * slots. + * fetch the values of any outer Vars that must be passed to the + * inner scan, and store them in the appropriate PARAM_EXEC slots. */ foreach(lc, nl->nestParams) { @@ -330,9 +329,9 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags) * * If we have no parameters to pass into the inner rel from the outer, * tell the inner child that cheap rescans would be good. If we do have - * such parameters, then there is no point in REWIND support at all in - * the inner child, because it will always be rescanned with fresh - * parameter values. + * such parameters, then there is no point in REWIND support at all in the + * inner child, because it will always be rescanned with fresh parameter + * values. */ outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags); if (node->nestParams == NIL) diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c index 84c051854b..12e1b9a585 100644 --- a/src/backend/executor/nodeRecursiveunion.c +++ b/src/backend/executor/nodeRecursiveunion.c @@ -29,7 +29,7 @@ typedef struct RUHashEntryData *RUHashEntry; typedef struct RUHashEntryData { TupleHashEntryData shared; /* common header for hash table entries */ -} RUHashEntryData; +} RUHashEntryData; /* diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index aa352d7822..9106f14873 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -76,7 +76,7 @@ typedef struct SetOpHashEntryData { TupleHashEntryData shared; /* common header for hash table entries */ SetOpStatePerGroupData pergroup; -} SetOpHashEntryData; +} SetOpHashEntryData; static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate); diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 5680efeb69..25d9298cef 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -92,7 +92,7 @@ typedef struct WindowStatePerFuncData int aggno; /* if so, index of its PerAggData */ WindowObject winobj; /* object used in window function API */ -} WindowStatePerFuncData; +} WindowStatePerFuncData; /* * For plain aggregate window functions, we also have one of these. diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index a717a0deea..6e723ca092 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -1787,8 +1787,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, * snapshot != InvalidSnapshot, read_only = true: use exactly the given * snapshot. * - * snapshot != InvalidSnapshot, read_only = false: use the given - * snapshot, modified by advancing its command ID before each querytree. + * snapshot != InvalidSnapshot, read_only = false: use the given snapshot, + * modified by advancing its command ID before each querytree. * * snapshot == InvalidSnapshot, read_only = true: use the entry-time * ActiveSnapshot, if any (if there isn't one, we run with no snapshot). @@ -1797,8 +1797,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, * snapshot for each user command, and advance its command ID before each * querytree within the command. * - * In the first two cases, we can just push the snap onto the stack - * once for the whole plan list. + * In the first two cases, we can just push the snap onto the stack once + * for the whole plan list. */ if (snapshot != InvalidSnapshot) { @@ -2028,7 +2028,7 @@ _SPI_convert_params(int nargs, Oid *argtypes, /* sizeof(ParamListInfoData) includes the first array element */ paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) + - (nargs - 1) *sizeof(ParamExternData)); + (nargs - 1) * sizeof(ParamExternData)); /* we have static list of params, so no hooks needed */ paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 151ec5613b..d003b1206a 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -61,6 +61,7 @@ static int recv_and_check_password_packet(Port *port); #define IDENT_PORT 113 static int ident_inet(hbaPort *port); + #ifdef HAVE_UNIX_SOCKETS static int auth_peer(hbaPort *port); #endif @@ -182,7 +183,7 @@ static int pg_GSS_recvauth(Port *port); *---------------------------------------------------------------- */ #ifdef ENABLE_SSPI -typedef SECURITY_STATUS +typedef SECURITY_STATUS (WINAPI * QUERY_SECURITY_CONTEXT_TOKEN_FN) ( PCtxtHandle, void **); static int pg_SSPI_recvauth(Port *port); @@ -543,7 +544,7 @@ ClientAuthentication(Port *port) } #endif status = auth_peer(port); -#else /* HAVE_UNIX_SOCKETS */ +#else /* HAVE_UNIX_SOCKETS */ Assert(false); #endif break; @@ -598,7 +599,7 @@ ClientAuthentication(Port *port) } if (ClientAuthentication_hook) - (*ClientAuthentication_hook)(port, status); + (*ClientAuthentication_hook) (port, status); if (status == STATUS_OK) sendAuthRequest(port, AUTH_REQ_OK); @@ -844,7 +845,7 @@ pg_krb5_recvauth(Port *port) return ret; retval = krb5_recvauth(pg_krb5_context, &auth_context, - (krb5_pointer) & port->sock, pg_krb_srvnam, + (krb5_pointer) &port->sock, pg_krb_srvnam, pg_krb5_server, 0, pg_krb5_keytab, &ticket); if (retval) { @@ -1814,7 +1815,6 @@ auth_peer(hbaPort *port) } strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1); - #elif defined(SO_PEERCRED) /* Linux style: use getsockopt(SO_PEERCRED) */ struct ucred peercred; @@ -1843,7 +1843,6 @@ auth_peer(hbaPort *port) } strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1); - #elif defined(HAVE_GETPEERUCRED) /* Solaris > 10 */ uid_t uid; @@ -1879,7 +1878,6 @@ auth_peer(hbaPort *port) } strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1); - #elif defined(HAVE_STRUCT_CMSGCRED) || defined(HAVE_STRUCT_FCRED) || (defined(HAVE_STRUCT_SOCKCRED) && defined(LOCAL_CREDS)) struct msghdr msg; @@ -1947,7 +1945,6 @@ auth_peer(hbaPort *port) } strlcpy(ident_user, pw->pw_name, IDENT_USERNAME_MAX + 1); - #else ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -2768,10 +2765,10 @@ CheckRADIUSAuth(Port *port) pg_freeaddrinfo_all(hint.ai_family, serveraddrs); /* - * Figure out at what time we should time out. We can't just use - * a single call to select() with a timeout, since somebody can - * be sending invalid packets to our port thus causing us to - * retry in a loop and never time out. + * Figure out at what time we should time out. We can't just use a single + * call to select() with a timeout, since somebody can be sending invalid + * packets to our port thus causing us to retry in a loop and never time + * out. */ gettimeofday(&endtime, NULL); endtime.tv_sec += RADIUS_TIMEOUT; @@ -2780,7 +2777,7 @@ CheckRADIUSAuth(Port *port) { struct timeval timeout; struct timeval now; - int64 timeoutval; + int64 timeoutval; gettimeofday(&now, NULL); timeoutval = (endtime.tv_sec * 1000000 + endtime.tv_usec) - (now.tv_sec * 1000000 + now.tv_usec); @@ -2820,12 +2817,12 @@ CheckRADIUSAuth(Port *port) /* * Attempt to read the response packet, and verify the contents. * - * Any packet that's not actually a RADIUS packet, or otherwise - * does not validate as an explicit reject, is just ignored and - * we retry for another packet (until we reach the timeout). This - * is to avoid the possibility to denial-of-service the login by - * flooding the server with invalid packets on the port that - * we're expecting the RADIUS response on. + * Any packet that's not actually a RADIUS packet, or otherwise does + * not validate as an explicit reject, is just ignored and we retry + * for another packet (until we reach the timeout). This is to avoid + * the possibility to denial-of-service the login by flooding the + * server with invalid packets on the port that we're expecting the + * RADIUS response on. */ addrsize = sizeof(remoteaddr); @@ -2846,12 +2843,12 @@ CheckRADIUSAuth(Port *port) { #ifdef HAVE_IPV6 ereport(LOG, - (errmsg("RADIUS response was sent from incorrect port: %i", - ntohs(remoteaddr.sin6_port)))); + (errmsg("RADIUS response was sent from incorrect port: %i", + ntohs(remoteaddr.sin6_port)))); #else ereport(LOG, - (errmsg("RADIUS response was sent from incorrect port: %i", - ntohs(remoteaddr.sin_port)))); + (errmsg("RADIUS response was sent from incorrect port: %i", + ntohs(remoteaddr.sin_port)))); #endif continue; } @@ -2885,12 +2882,12 @@ CheckRADIUSAuth(Port *port) */ cryptvector = palloc(packetlength + strlen(port->hba->radiussecret)); - memcpy(cryptvector, receivepacket, 4); /* code+id+length */ - memcpy(cryptvector + 4, packet->vector, RADIUS_VECTOR_LENGTH); /* request - * authenticator, from - * original packet */ - if (packetlength > RADIUS_HEADER_LENGTH) /* there may be no attributes - * at all */ + memcpy(cryptvector, receivepacket, 4); /* code+id+length */ + memcpy(cryptvector + 4, packet->vector, RADIUS_VECTOR_LENGTH); /* request + * authenticator, from + * original packet */ + if (packetlength > RADIUS_HEADER_LENGTH) /* there may be no + * attributes at all */ memcpy(cryptvector + RADIUS_HEADER_LENGTH, receive_buffer + RADIUS_HEADER_LENGTH, packetlength - RADIUS_HEADER_LENGTH); memcpy(cryptvector + packetlength, port->hba->radiussecret, strlen(port->hba->radiussecret)); @@ -2899,7 +2896,7 @@ CheckRADIUSAuth(Port *port) encryptedpassword)) { ereport(LOG, - (errmsg("could not perform MD5 encryption of received packet"))); + (errmsg("could not perform MD5 encryption of received packet"))); pfree(cryptvector); continue; } @@ -2925,9 +2922,9 @@ CheckRADIUSAuth(Port *port) else { ereport(LOG, - (errmsg("RADIUS response has invalid code (%i) for user \"%s\"", - receivepacket->code, port->user_name))); + (errmsg("RADIUS response has invalid code (%i) for user \"%s\"", + receivepacket->code, port->user_name))); continue; } - } /* while (true) */ + } /* while (true) */ } diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index 2def6cea89..fdc29aaa72 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -543,7 +543,7 @@ check_db(const char *dbname, const char *role, Oid roleid, char *param_str) } static bool -ipv4eq(struct sockaddr_in *a, struct sockaddr_in *b) +ipv4eq(struct sockaddr_in * a, struct sockaddr_in * b) { return (a->sin_addr.s_addr == b->sin_addr.s_addr); } @@ -551,9 +551,9 @@ ipv4eq(struct sockaddr_in *a, struct sockaddr_in *b) #ifdef HAVE_IPV6 static bool -ipv6eq(struct sockaddr_in6 *a, struct sockaddr_in6 *b) +ipv6eq(struct sockaddr_in6 * a, struct sockaddr_in6 * b) { - int i; + int i; for (i = 0; i < 16; i++) if (a->sin6_addr.s6_addr[i] != b->sin6_addr.s6_addr[i]) @@ -561,8 +561,7 @@ ipv6eq(struct sockaddr_in6 *a, struct sockaddr_in6 *b) return true; } - -#endif /* HAVE_IPV6 */ +#endif /* HAVE_IPV6 */ /* * Check whether host name matches pattern. @@ -572,8 +571,8 @@ hostname_match(const char *pattern, const char *actual_hostname) { if (pattern[0] == '.') /* suffix match */ { - size_t plen = strlen(pattern); - size_t hlen = strlen(actual_hostname); + size_t plen = strlen(pattern); + size_t hlen = strlen(actual_hostname); if (hlen < plen) return false; @@ -590,7 +589,8 @@ hostname_match(const char *pattern, const char *actual_hostname) static bool check_hostname(hbaPort *port, const char *hostname) { - struct addrinfo *gai_result, *gai; + struct addrinfo *gai_result, + *gai; int ret; bool found; @@ -632,7 +632,7 @@ check_hostname(hbaPort *port, const char *hostname) if (gai->ai_addr->sa_family == AF_INET) { if (ipv4eq((struct sockaddr_in *) gai->ai_addr, - (struct sockaddr_in *) &port->raddr.addr)) + (struct sockaddr_in *) & port->raddr.addr)) { found = true; break; @@ -642,7 +642,7 @@ check_hostname(hbaPort *port, const char *hostname) else if (gai->ai_addr->sa_family == AF_INET6) { if (ipv6eq((struct sockaddr_in6 *) gai->ai_addr, - (struct sockaddr_in6 *) &port->raddr.addr)) + (struct sockaddr_in6 *) & port->raddr.addr)) { found = true; break; @@ -974,8 +974,8 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline) (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("specifying both host name and CIDR mask is invalid: \"%s\"", token), - errcontext("line %d of configuration file \"%s\"", - line_num, HbaFileName))); + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); pfree(token); return false; } diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 3232e64d4a..b83a2efb69 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -85,7 +85,7 @@ #ifdef HAVE_UTIME_H #include <utime.h> #endif -#ifdef WIN32_ONLY_COMPILER /* mstcpip.h is missing on mingw */ +#ifdef WIN32_ONLY_COMPILER /* mstcpip.h is missing on mingw */ #include <mstcpip.h> #endif @@ -745,7 +745,7 @@ TouchSocketFile(void) */ /* -------------------------------- - * pq_set_nonblocking - set socket blocking/non-blocking + * pq_set_nonblocking - set socket blocking/non-blocking * * Sets the socket non-blocking if nonblocking is TRUE, or sets it * blocking otherwise. @@ -760,16 +760,17 @@ pq_set_nonblocking(bool nonblocking) #ifdef WIN32 pgwin32_noblock = nonblocking ? 1 : 0; #else + /* - * Use COMMERROR on failure, because ERROR would try to send the error - * to the client, which might require changing the mode again, leading - * to infinite recursion. + * Use COMMERROR on failure, because ERROR would try to send the error to + * the client, which might require changing the mode again, leading to + * infinite recursion. */ if (nonblocking) { if (!pg_set_noblock(MyProcPort->sock)) ereport(COMMERROR, - (errmsg("could not set socket to non-blocking mode: %m"))); + (errmsg("could not set socket to non-blocking mode: %m"))); } else { @@ -903,18 +904,17 @@ pq_getbyte_if_available(unsigned char *c) { /* * Ok if no data available without blocking or interrupted (though - * EINTR really shouldn't happen with a non-blocking socket). - * Report other errors. + * EINTR really shouldn't happen with a non-blocking socket). Report + * other errors. */ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) r = 0; else { /* - * Careful: an ereport() that tries to write to the client - * would cause recursion to here, leading to stack overflow - * and core dump! This message must go *only* to the - * postmaster log. + * Careful: an ereport() that tries to write to the client would + * cause recursion to here, leading to stack overflow and core + * dump! This message must go *only* to the postmaster log. */ ereport(COMMERROR, (errcode_for_socket_access(), @@ -1219,8 +1219,8 @@ internal_flush(void) continue; /* Ok if we were interrupted */ /* - * Ok if no data writable without blocking, and the socket - * is in non-blocking mode. + * Ok if no data writable without blocking, and the socket is in + * non-blocking mode. */ if (errno == EAGAIN || errno == EWOULDBLOCK) @@ -1369,8 +1369,8 @@ fail: void pq_putmessage_noblock(char msgtype, const char *s, size_t len) { - int res; - int required; + int res; + int required; /* * Ensure we have enough space in the output buffer for the message header @@ -1383,7 +1383,8 @@ pq_putmessage_noblock(char msgtype, const char *s, size_t len) PqSendBufferSize = required; } res = pq_putmessage(msgtype, s, len); - Assert(res == 0); /* should not fail when the message fits in buffer */ + Assert(res == 0); /* should not fail when the message fits in + * buffer */ } @@ -1434,13 +1435,13 @@ pq_endcopyout(bool errorAbort) static int pq_setkeepaliveswin32(Port *port, int idle, int interval) { - struct tcp_keepalive ka; - DWORD retsize; + struct tcp_keepalive ka; + DWORD retsize; if (idle <= 0) - idle = 2 * 60 * 60; /* default = 2 hours */ + idle = 2 * 60 * 60; /* default = 2 hours */ if (interval <= 0) - interval = 1; /* default = 1 second */ + interval = 1; /* default = 1 second */ ka.onoff = 1; ka.keepalivetime = idle * 1000; @@ -1500,11 +1501,11 @@ pq_getkeepalivesidle(Port *port) elog(LOG, "getsockopt(TCP_KEEPALIVE) failed: %m"); port->default_keepalives_idle = -1; /* don't know */ } -#endif /* TCP_KEEPIDLE */ -#else /* WIN32 */ +#endif /* TCP_KEEPIDLE */ +#else /* WIN32 */ /* We can't get the defaults on Windows, so return "don't know" */ port->default_keepalives_idle = -1; -#endif /* WIN32 */ +#endif /* WIN32 */ } return port->default_keepalives_idle; @@ -1555,10 +1556,10 @@ pq_setkeepalivesidle(int idle, Port *port) #endif port->keepalives_idle = idle; -#else /* WIN32 */ +#else /* WIN32 */ return pq_setkeepaliveswin32(port, idle, port->keepalives_interval); #endif -#else /* TCP_KEEPIDLE || SIO_KEEPALIVE_VALS */ +#else /* TCP_KEEPIDLE || SIO_KEEPALIVE_VALS */ if (idle != 0) { elog(LOG, "setting the keepalive idle time is not supported"); @@ -1593,7 +1594,7 @@ pq_getkeepalivesinterval(Port *port) #else /* We can't get the defaults on Windows, so return "don't know" */ port->default_keepalives_interval = -1; -#endif /* WIN32 */ +#endif /* WIN32 */ } return port->default_keepalives_interval; @@ -1635,7 +1636,7 @@ pq_setkeepalivesinterval(int interval, Port *port) } port->keepalives_interval = interval; -#else /* WIN32 */ +#else /* WIN32 */ return pq_setkeepaliveswin32(port, port->keepalives_idle, interval); #endif #else diff --git a/src/backend/main/main.c b/src/backend/main/main.c index 43d182b4db..c4ef56dc6c 100644 --- a/src/backend/main/main.c +++ b/src/backend/main/main.c @@ -204,7 +204,7 @@ main(int argc, char *argv[]) /* * Place platform-specific startup hacks here. This is the right * place to put code that must be executed early in the launch of any new - * server process. Note that this code will NOT be executed when a backend + * server process. Note that this code will NOT be executed when a backend * or sub-bootstrap process is forked, unless we are in a fork/exec * environment (ie EXEC_BACKEND is defined). * @@ -218,8 +218,8 @@ startup_hacks(const char *progname) /* * On some platforms, unaligned memory accesses result in a kernel trap; * the default kernel behavior is to emulate the memory access, but this - * results in a significant performance penalty. We want PG never to - * make such unaligned memory accesses, so this code disables the kernel + * results in a significant performance penalty. We want PG never to make + * such unaligned memory accesses, so this code disables the kernel * emulation: unaligned accesses will result in SIGBUS instead. */ #ifdef NOFIXADE @@ -230,7 +230,7 @@ startup_hacks(const char *progname) #if defined(__alpha) /* no __alpha__ ? */ { - int buffer[] = {SSIN_UACPROC, UAC_SIGBUS | UAC_NOPRINT}; + int buffer[] = {SSIN_UACPROC, UAC_SIGBUS | UAC_NOPRINT}; if (setsysinfo(SSI_NVPAIRS, buffer, 1, (caddr_t) NULL, (unsigned long) NULL) < 0) @@ -238,7 +238,6 @@ startup_hacks(const char *progname) progname, strerror(errno)); } #endif /* __alpha */ - #endif /* NOFIXADE */ /* diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 0eac9826a4..c0d2294317 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -581,7 +581,7 @@ _copyForeignScan(ForeignScan *from) static FdwPlan * _copyFdwPlan(FdwPlan *from) { - FdwPlan *newnode = makeNode(FdwPlan); + FdwPlan *newnode = makeNode(FdwPlan); COPY_SCALAR_FIELD(startup_cost); COPY_SCALAR_FIELD(total_cost); @@ -1468,7 +1468,7 @@ _copyConvertRowtypeExpr(ConvertRowtypeExpr *from) static CollateExpr * _copyCollateExpr(CollateExpr *from) { - CollateExpr *newnode = makeNode(CollateExpr); + CollateExpr *newnode = makeNode(CollateExpr); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(collOid); @@ -2269,7 +2269,7 @@ _copyTypeCast(TypeCast *from) static CollateClause * _copyCollateClause(CollateClause *from) { - CollateClause *newnode = makeNode(CollateClause); + CollateClause *newnode = makeNode(CollateClause); COPY_NODE_FIELD(arg); COPY_NODE_FIELD(collname); diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index af1ccb7efe..0e57f6c6d7 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -675,10 +675,10 @@ exprCollation(Node *expr) coll = ((NullIfExpr *) expr)->opcollid; break; case T_ScalarArrayOpExpr: - coll = InvalidOid; /* result is always boolean */ + coll = InvalidOid; /* result is always boolean */ break; case T_BoolExpr: - coll = InvalidOid; /* result is always boolean */ + coll = InvalidOid; /* result is always boolean */ break; case T_SubLink: { @@ -736,7 +736,7 @@ exprCollation(Node *expr) coll = ((FieldSelect *) expr)->resultcollid; break; case T_FieldStore: - coll = InvalidOid; /* result is always composite */ + coll = InvalidOid; /* result is always composite */ break; case T_RelabelType: coll = ((RelabelType *) expr)->resultcollid; @@ -748,7 +748,7 @@ exprCollation(Node *expr) coll = ((ArrayCoerceExpr *) expr)->resultcollid; break; case T_ConvertRowtypeExpr: - coll = InvalidOid; /* result is always composite */ + coll = InvalidOid; /* result is always composite */ break; case T_CollateExpr: coll = ((CollateExpr *) expr)->collOid; @@ -763,10 +763,10 @@ exprCollation(Node *expr) coll = ((ArrayExpr *) expr)->array_collid; break; case T_RowExpr: - coll = InvalidOid; /* result is always composite */ + coll = InvalidOid; /* result is always composite */ break; case T_RowCompareExpr: - coll = InvalidOid; /* result is always boolean */ + coll = InvalidOid; /* result is always boolean */ break; case T_CoalesceExpr: coll = ((CoalesceExpr *) expr)->coalescecollid; @@ -775,10 +775,11 @@ exprCollation(Node *expr) coll = ((MinMaxExpr *) expr)->minmaxcollid; break; case T_XmlExpr: + /* * XMLSERIALIZE returns text from non-collatable inputs, so its - * collation is always default. The other cases return boolean - * or XML, which are non-collatable. + * collation is always default. The other cases return boolean or + * XML, which are non-collatable. */ if (((XmlExpr *) expr)->op == IS_XMLSERIALIZE) coll = DEFAULT_COLLATION_OID; @@ -786,10 +787,10 @@ exprCollation(Node *expr) coll = InvalidOid; break; case T_NullTest: - coll = InvalidOid; /* result is always boolean */ + coll = InvalidOid; /* result is always boolean */ break; case T_BooleanTest: - coll = InvalidOid; /* result is always boolean */ + coll = InvalidOid; /* result is always boolean */ break; case T_CoerceToDomain: coll = ((CoerceToDomain *) expr)->resultcollid; @@ -801,7 +802,7 @@ exprCollation(Node *expr) coll = ((SetToDefault *) expr)->collation; break; case T_CurrentOfExpr: - coll = InvalidOid; /* result is always boolean */ + coll = InvalidOid; /* result is always boolean */ break; case T_PlaceHolderVar: coll = exprCollation((Node *) ((PlaceHolderVar *) expr)->phexpr); @@ -907,10 +908,10 @@ exprSetCollation(Node *expr, Oid collation) ((NullIfExpr *) expr)->opcollid = collation; break; case T_ScalarArrayOpExpr: - Assert(!OidIsValid(collation)); /* result is always boolean */ + Assert(!OidIsValid(collation)); /* result is always boolean */ break; case T_BoolExpr: - Assert(!OidIsValid(collation)); /* result is always boolean */ + Assert(!OidIsValid(collation)); /* result is always boolean */ break; case T_SubLink: #ifdef USE_ASSERT_CHECKING @@ -937,13 +938,13 @@ exprSetCollation(Node *expr, Oid collation) Assert(!OidIsValid(collation)); } } -#endif /* USE_ASSERT_CHECKING */ +#endif /* USE_ASSERT_CHECKING */ break; case T_FieldSelect: ((FieldSelect *) expr)->resultcollid = collation; break; case T_FieldStore: - Assert(!OidIsValid(collation)); /* result is always composite */ + Assert(!OidIsValid(collation)); /* result is always composite */ break; case T_RelabelType: ((RelabelType *) expr)->resultcollid = collation; @@ -955,7 +956,7 @@ exprSetCollation(Node *expr, Oid collation) ((ArrayCoerceExpr *) expr)->resultcollid = collation; break; case T_ConvertRowtypeExpr: - Assert(!OidIsValid(collation)); /* result is always composite */ + Assert(!OidIsValid(collation)); /* result is always composite */ break; case T_CaseExpr: ((CaseExpr *) expr)->casecollid = collation; @@ -964,10 +965,10 @@ exprSetCollation(Node *expr, Oid collation) ((ArrayExpr *) expr)->array_collid = collation; break; case T_RowExpr: - Assert(!OidIsValid(collation)); /* result is always composite */ + Assert(!OidIsValid(collation)); /* result is always composite */ break; case T_RowCompareExpr: - Assert(!OidIsValid(collation)); /* result is always boolean */ + Assert(!OidIsValid(collation)); /* result is always boolean */ break; case T_CoalesceExpr: ((CoalesceExpr *) expr)->coalescecollid = collation; @@ -981,10 +982,10 @@ exprSetCollation(Node *expr, Oid collation) (collation == InvalidOid)); break; case T_NullTest: - Assert(!OidIsValid(collation)); /* result is always boolean */ + Assert(!OidIsValid(collation)); /* result is always boolean */ break; case T_BooleanTest: - Assert(!OidIsValid(collation)); /* result is always boolean */ + Assert(!OidIsValid(collation)); /* result is always boolean */ break; case T_CoerceToDomain: ((CoerceToDomain *) expr)->resultcollid = collation; @@ -996,7 +997,7 @@ exprSetCollation(Node *expr, Oid collation) ((SetToDefault *) expr)->collation = collation; break; case T_CurrentOfExpr: - Assert(!OidIsValid(collation)); /* result is always boolean */ + Assert(!OidIsValid(collation)); /* result is always boolean */ break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr)); diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c index d6e6e6a2bd..62d766a282 100644 --- a/src/backend/nodes/params.c +++ b/src/backend/nodes/params.c @@ -43,7 +43,7 @@ copyParamList(ParamListInfo from) /* sizeof(ParamListInfoData) includes the first array element */ size = sizeof(ParamListInfoData) + - (from->numParams - 1) *sizeof(ParamExternData); + (from->numParams - 1) * sizeof(ParamExternData); retval = (ParamListInfo) palloc(size); retval->paramFetch = NULL; diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index dc2a23bb27..47ab08e502 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -66,7 +66,7 @@ static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); + RangeTblEntry *rte); static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist); static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery, bool *differentTypes); @@ -413,11 +413,11 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * We have to make child entries in the EquivalenceClass data - * structures as well. This is needed either if the parent - * participates in some eclass joins (because we will want to - * consider inner-indexscan joins on the individual children) - * or if the parent has useful pathkeys (because we should try - * to build MergeAppend paths that produce those sort orderings). + * structures as well. This is needed either if the parent + * participates in some eclass joins (because we will want to consider + * inner-indexscan joins on the individual children) or if the parent + * has useful pathkeys (because we should try to build MergeAppend + * paths that produce those sort orderings). */ if (rel->has_eclass_joins || has_useful_pathkeys(root, rel)) add_child_rel_equivalences(root, appinfo, rel, childrel); @@ -462,7 +462,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* Have we already seen this ordering? */ foreach(lpk, all_child_pathkeys) { - List *existing_pathkeys = (List *) lfirst(lpk); + List *existing_pathkeys = (List *) lfirst(lpk); if (compare_pathkeys(existing_pathkeys, childkeys) == PATHKEYS_EQUAL) @@ -540,18 +540,18 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * Next, build MergeAppend paths based on the collected list of child - * pathkeys. We consider both cheapest-startup and cheapest-total - * cases, ie, for each interesting ordering, collect all the cheapest - * startup subpaths and all the cheapest total paths, and build a - * MergeAppend path for each list. + * pathkeys. We consider both cheapest-startup and cheapest-total cases, + * ie, for each interesting ordering, collect all the cheapest startup + * subpaths and all the cheapest total paths, and build a MergeAppend path + * for each list. */ foreach(l, all_child_pathkeys) { - List *pathkeys = (List *) lfirst(l); - List *startup_subpaths = NIL; - List *total_subpaths = NIL; - bool startup_neq_total = false; - ListCell *lcr; + List *pathkeys = (List *) lfirst(l); + List *startup_subpaths = NIL; + List *total_subpaths = NIL; + bool startup_neq_total = false; + ListCell *lcr; /* Select the child paths for this ordering... */ foreach(lcr, live_childrels) @@ -581,8 +581,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * Notice whether we actually have different paths for the - * "cheapest" and "total" cases; frequently there will be no - * point in two create_merge_append_path() calls. + * "cheapest" and "total" cases; frequently there will be no point + * in two create_merge_append_path() calls. */ if (cheapest_startup != cheapest_total) startup_neq_total = true; @@ -623,7 +623,7 @@ accumulate_append_subpath(List *subpaths, Path *path) { if (IsA(path, AppendPath)) { - AppendPath *apath = (AppendPath *) path; + AppendPath *apath = (AppendPath *) path; /* list_copy is important here to avoid sharing list substructure */ return list_concat(subpaths, list_copy(apath->subpaths)); diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 8f763b4369..e200dcf472 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -1096,7 +1096,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm) * accesses (XXX can't we refine that guess?) * * By default, we charge two operator evals per tuple comparison, which should - * be in the right ballpark in most cases. The caller can tweak this by + * be in the right ballpark in most cases. The caller can tweak this by * specifying nonzero comparison_cost; typically that's used for any extra * work that has to be done to prepare the inputs to the comparison operators. * @@ -1218,7 +1218,7 @@ cost_sort(Path *path, PlannerInfo *root, * Determines and returns the cost of a MergeAppend node. * * MergeAppend merges several pre-sorted input streams, using a heap that - * at any given instant holds the next tuple from each stream. If there + * at any given instant holds the next tuple from each stream. If there * are N streams, we need about N*log2(N) tuple comparisons to construct * the heap at startup, and then for each output tuple, about log2(N) * comparisons to delete the top heap entry and another log2(N) comparisons @@ -2909,7 +2909,7 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo, List *nrclauses; nrclauses = select_nonredundant_join_clauses(root, - path->joinrestrictinfo, + path->joinrestrictinfo, path->innerjoinpath); *indexed_join_quals = (nrclauses == NIL); } @@ -3185,7 +3185,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel, /* * Compute per-output-column width estimates by examining the subquery's - * targetlist. For any output that is a plain Var, get the width estimate + * targetlist. For any output that is a plain Var, get the width estimate * that was made while planning the subquery. Otherwise, fall back on a * datatype-based estimate. */ @@ -3210,7 +3210,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel, if (IsA(texpr, Var) && subroot->parse->setOperations == NULL) { - Var *var = (Var *) texpr; + Var *var = (Var *) texpr; RelOptInfo *subrel = find_base_rel(subroot, var->varno); item_width = subrel->attr_widths[var->varattno - subrel->min_attr]; @@ -3332,7 +3332,7 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan) * of estimating baserestrictcost, so we set that, and we also set up width * using what will be purely datatype-driven estimates from the targetlist. * There is no way to do anything sane with the rows value, so we just put - * a default estimate and hope that the wrapper can improve on it. The + * a default estimate and hope that the wrapper can improve on it. The * wrapper's PlanForeignScan function will be called momentarily. * * The rel's targetlist and restrictinfo list must have been constructed @@ -3396,8 +3396,8 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel) ndx = var->varattno - rel->min_attr; /* - * If it's a whole-row Var, we'll deal with it below after we - * have already cached as many attr widths as possible. + * If it's a whole-row Var, we'll deal with it below after we have + * already cache |