summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib')
-rw-r--r--contrib/amcheck/verify_heapam.c34
-rw-r--r--contrib/basic_archive/basic_archive.c4
-rw-r--r--contrib/dblink/dblink.c2
-rw-r--r--contrib/ltree/ltree_gist.c2
-rw-r--r--contrib/ltree/ltree_io.c6
-rw-r--r--contrib/ltree/ltxtquery_io.c6
-rw-r--r--contrib/pg_walinspect/pg_walinspect.c4
-rw-r--r--contrib/postgres_fdw/connection.c2
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c53
-rw-r--r--contrib/postgres_fdw/shippable.c4
-rw-r--r--contrib/test_decoding/test_decoding.c4
11 files changed, 60 insertions, 61 deletions
diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c
index 34d73ad442..97f3253522 100644
--- a/contrib/amcheck/verify_heapam.c
+++ b/contrib/amcheck/verify_heapam.c
@@ -407,7 +407,7 @@ verify_heapam(PG_FUNCTION_ARGS)
OffsetNumber successor[MaxOffsetNumber];
bool lp_valid[MaxOffsetNumber];
bool xmin_commit_status_ok[MaxOffsetNumber];
- XidCommitStatus xmin_commit_status[MaxOffsetNumber];
+ XidCommitStatus xmin_commit_status[MaxOffsetNumber];
CHECK_FOR_INTERRUPTS();
@@ -444,7 +444,7 @@ verify_heapam(PG_FUNCTION_ARGS)
for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff;
ctx.offnum = OffsetNumberNext(ctx.offnum))
{
- BlockNumber nextblkno;
+ BlockNumber nextblkno;
OffsetNumber nextoffnum;
successor[ctx.offnum] = InvalidOffsetNumber;
@@ -484,9 +484,9 @@ verify_heapam(PG_FUNCTION_ARGS)
/*
* Since we've checked that this redirect points to a line
- * pointer between FirstOffsetNumber and maxoff, it should
- * now be safe to fetch the referenced line pointer. We expect
- * it to be LP_NORMAL; if not, that's corruption.
+ * pointer between FirstOffsetNumber and maxoff, it should now
+ * be safe to fetch the referenced line pointer. We expect it
+ * to be LP_NORMAL; if not, that's corruption.
*/
rditem = PageGetItemId(ctx.page, rdoffnum);
if (!ItemIdIsUsed(rditem))
@@ -610,8 +610,8 @@ verify_heapam(PG_FUNCTION_ARGS)
{
/*
* We should not have set successor[ctx.offnum] to a value
- * other than InvalidOffsetNumber unless that line pointer
- * is LP_NORMAL.
+ * other than InvalidOffsetNumber unless that line pointer is
+ * LP_NORMAL.
*/
Assert(ItemIdIsNormal(next_lp));
@@ -642,8 +642,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
- * If the next line pointer is a redirect, or if it's a tuple
- * but the XMAX of this tuple doesn't match the XMIN of the next
+ * If the next line pointer is a redirect, or if it's a tuple but
+ * the XMAX of this tuple doesn't match the XMIN of the next
* tuple, then the two aren't part of the same update chain and
* there is nothing more to do.
*/
@@ -667,8 +667,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
- * This tuple and the tuple to which it points seem to be part
- * of an update chain.
+ * This tuple and the tuple to which it points seem to be part of
+ * an update chain.
*/
predecessor[nextoffnum] = ctx.offnum;
@@ -721,8 +721,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
- * If the current tuple's xmin is aborted but the successor tuple's
- * xmin is in-progress or committed, that's corruption.
+ * If the current tuple's xmin is aborted but the successor
+ * tuple's xmin is in-progress or committed, that's corruption.
*/
if (xmin_commit_status_ok[ctx.offnum] &&
xmin_commit_status[ctx.offnum] == XID_ABORTED &&
@@ -1025,7 +1025,7 @@ check_tuple_visibility(HeapCheckContext *ctx, bool *xmin_commit_status_ok,
HeapTupleHeader tuphdr = ctx->tuphdr;
ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */
- *xmin_commit_status_ok = false; /* have not yet proven otherwise */
+ *xmin_commit_status_ok = false; /* have not yet proven otherwise */
/* If xmin is normal, it should be within valid range */
xmin = HeapTupleHeaderGetXmin(tuphdr);
@@ -1837,7 +1837,7 @@ check_tuple(HeapCheckContext *ctx, bool *xmin_commit_status_ok,
* therefore cannot check it.
*/
if (!check_tuple_visibility(ctx, xmin_commit_status_ok,
- xmin_commit_status))
+ xmin_commit_status))
return;
/*
@@ -1897,8 +1897,8 @@ FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx)
diff = (int32) (ctx->next_xid - xid);
/*
- * In cases of corruption we might see a 32bit xid that is before epoch
- * 0. We can't represent that as a 64bit xid, due to 64bit xids being
+ * In cases of corruption we might see a 32bit xid that is before epoch 0.
+ * We can't represent that as a 64bit xid, due to 64bit xids being
* unsigned integers, without the modulo arithmetic of 32bit xid. There's
* no really nice way to deal with that, but it works ok enough to use
* FirstNormalFullTransactionId in that case, as a freshly initdb'd
diff --git a/contrib/basic_archive/basic_archive.c b/contrib/basic_archive/basic_archive.c
index cd852888ce..4d78c31859 100644
--- a/contrib/basic_archive/basic_archive.c
+++ b/contrib/basic_archive/basic_archive.c
@@ -407,8 +407,8 @@ basic_archive_shutdown(ArchiveModuleState *state)
MemoryContext basic_archive_context;
/*
- * If we didn't get to storing the pointer to our allocated state, we don't
- * have anything to clean up.
+ * If we didn't get to storing the pointer to our allocated state, we
+ * don't have anything to clean up.
*/
if (data == NULL)
return;
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 55f75eff36..3a3e916f9e 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -1287,7 +1287,7 @@ dblink_get_connections(PG_FUNCTION_ARGS)
if (astate)
PG_RETURN_DATUM(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
else
PG_RETURN_NULL();
}
diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c
index 21b7d02028..932f69bff2 100644
--- a/contrib/ltree/ltree_gist.c
+++ b/contrib/ltree/ltree_gist.c
@@ -43,7 +43,7 @@ ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
ltree *left, ltree *right)
{
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
- (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
+ (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size);
SET_VARSIZE(result, size);
diff --git a/contrib/ltree/ltree_io.c b/contrib/ltree/ltree_io.c
index 5dce70bd1a..0a12c77a62 100644
--- a/contrib/ltree/ltree_io.c
+++ b/contrib/ltree/ltree_io.c
@@ -175,7 +175,7 @@ Datum
ltree_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- ltree *res;
+ ltree *res;
if ((res = parse_ltree(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
@@ -584,7 +584,7 @@ parse_lquery(const char *buf, struct Node *escontext)
*/
static bool
finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos,
- struct Node *escontext)
+ struct Node *escontext)
{
if (is_lquery)
{
@@ -745,7 +745,7 @@ Datum
lquery_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- lquery *res;
+ lquery *res;
if ((res = parse_lquery(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c
index 0d29e15630..121fc55e46 100644
--- a/contrib/ltree/ltxtquery_io.c
+++ b/contrib/ltree/ltxtquery_io.c
@@ -186,8 +186,8 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long")));
- if (! pushquery(state, type, ltree_crc32_sz(strval, lenval),
- state->curop - state->op, lenval, flag))
+ if (!pushquery(state, type, ltree_crc32_sz(strval, lenval),
+ state->curop - state->op, lenval, flag))
return false;
while (state->curop - state->op + lenval + 1 >= state->lenop)
@@ -408,7 +408,7 @@ PG_FUNCTION_INFO_V1(ltxtq_in);
Datum
ltxtq_in(PG_FUNCTION_ARGS)
{
- ltxtquery *res;
+ ltxtquery *res;
if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL)
PG_RETURN_NULL();
diff --git a/contrib/pg_walinspect/pg_walinspect.c b/contrib/pg_walinspect/pg_walinspect.c
index 1cd3744d5d..796a74f322 100644
--- a/contrib/pg_walinspect/pg_walinspect.c
+++ b/contrib/pg_walinspect/pg_walinspect.c
@@ -252,8 +252,8 @@ GetWALBlockInfo(FunctionCallInfo fcinfo, XLogReaderState *record,
int block_id;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
RmgrData desc;
- const char *record_type;
- StringInfoData rec_desc;
+ const char *record_type;
+ StringInfoData rec_desc;
Assert(XLogRecHasAnyBlockRefs(record));
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index da32d503bc..d918ba89e1 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -61,7 +61,7 @@ typedef struct ConnCacheEntry
bool have_error; /* have any subxacts aborted in this xact? */
bool changing_xact_state; /* xact state change in process */
bool parallel_commit; /* do we commit (sub)xacts in parallel? */
- bool parallel_abort; /* do we abort (sub)xacts in parallel? */
+ bool parallel_abort; /* do we abort (sub)xacts in parallel? */
bool invalidated; /* true if reconnect is pending */
bool keep_connections; /* setting value of keep_connections
* server option */
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 95dbe8b06c..428ea3810f 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -2024,9 +2024,8 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
/*
* Should never get called when the insert is being performed on a table
- * that is also among the target relations of an UPDATE operation,
- * because postgresBeginForeignInsert() currently rejects such insert
- * attempts.
+ * that is also among the target relations of an UPDATE operation, because
+ * postgresBeginForeignInsert() currently rejects such insert attempts.
*/
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
@@ -5167,15 +5166,15 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
*/
if (method != ANALYZE_SAMPLE_OFF)
{
- bool can_tablesample;
+ bool can_tablesample;
reltuples = postgresGetAnalyzeInfoForForeignTable(relation,
&can_tablesample);
/*
- * Make sure we're not choosing TABLESAMPLE when the remote relation does
- * not support that. But only do this for "auto" - if the user explicitly
- * requested BERNOULLI/SYSTEM, it's better to fail.
+ * Make sure we're not choosing TABLESAMPLE when the remote relation
+ * does not support that. But only do this for "auto" - if the user
+ * explicitly requested BERNOULLI/SYSTEM, it's better to fail.
*/
if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
method = ANALYZE_SAMPLE_RANDOM;
@@ -5189,35 +5188,35 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
else
{
/*
- * All supported sampling methods require sampling rate,
- * not target rows directly, so we calculate that using
- * the remote reltuples value. That's imperfect, because
- * it might be off a good deal, but that's not something
- * we can (or should) address here.
+ * All supported sampling methods require sampling rate, not
+ * target rows directly, so we calculate that using the remote
+ * reltuples value. That's imperfect, because it might be off a
+ * good deal, but that's not something we can (or should) address
+ * here.
*
- * If reltuples is too low (i.e. when table grew), we'll
- * end up sampling more rows - but then we'll apply the
- * local sampling, so we get the expected sample size.
- * This is the same outcome as without remote sampling.
+ * If reltuples is too low (i.e. when table grew), we'll end up
+ * sampling more rows - but then we'll apply the local sampling,
+ * so we get the expected sample size. This is the same outcome as
+ * without remote sampling.
*
- * If reltuples is too high (e.g. after bulk DELETE), we
- * will end up sampling too few rows.
+ * If reltuples is too high (e.g. after bulk DELETE), we will end
+ * up sampling too few rows.
*
- * We can't really do much better here - we could try
- * sampling a bit more rows, but we don't know how off
- * the reltuples value is so how much is "a bit more"?
+ * We can't really do much better here - we could try sampling a
+ * bit more rows, but we don't know how off the reltuples value is
+ * so how much is "a bit more"?
*
- * Furthermore, the targrows value for partitions is
- * determined based on table size (relpages), which can
- * be off in different ways too. Adjusting the sampling
- * rate here might make the issue worse.
+ * Furthermore, the targrows value for partitions is determined
+ * based on table size (relpages), which can be off in different
+ * ways too. Adjusting the sampling rate here might make the issue
+ * worse.
*/
sample_frac = targrows / reltuples;
/*
* We should never get sampling rate outside the valid range
- * (between 0.0 and 1.0), because those cases should be covered
- * by the previous branch that sets ANALYZE_SAMPLE_OFF.
+ * (between 0.0 and 1.0), because those cases should be covered by
+ * the previous branch that sets ANALYZE_SAMPLE_OFF.
*/
Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
}
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
index eb33d2a993..07c11b75e9 100644
--- a/contrib/postgres_fdw/shippable.c
+++ b/contrib/postgres_fdw/shippable.c
@@ -183,7 +183,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
/* See if we already cached the result. */
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
if (!entry)
{
@@ -196,7 +196,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
* cache invalidation.
*/
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
entry->shippable = shippable;
}
diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c
index 628c6a2595..12d1d0505d 100644
--- a/contrib/test_decoding/test_decoding.c
+++ b/contrib/test_decoding/test_decoding.c
@@ -288,7 +288,7 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;
@@ -348,7 +348,7 @@ pg_decode_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;