diff options
author | Automatic pgindent | 2023-04-23 21:50:53 +0000 |
---|---|---|
committer | Automatic pgindent | 2023-04-23 21:50:53 +0000 |
commit | 962df08da11205a2436c7e802fa16cd47b8a790e (patch) | |
tree | 0338f441a66bc0c76bb37b7dfb590dd04c27bbf4 | |
parent | 8bbd0cce92be98de9f4f727b8bf66fe26e5831ea (diff) |
Automatic pgindentmaster-pgindent
212 files changed, 1018 insertions, 998 deletions
diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c index 34d73ad442..97f3253522 100644 --- a/contrib/amcheck/verify_heapam.c +++ b/contrib/amcheck/verify_heapam.c @@ -407,7 +407,7 @@ verify_heapam(PG_FUNCTION_ARGS) OffsetNumber successor[MaxOffsetNumber]; bool lp_valid[MaxOffsetNumber]; bool xmin_commit_status_ok[MaxOffsetNumber]; - XidCommitStatus xmin_commit_status[MaxOffsetNumber]; + XidCommitStatus xmin_commit_status[MaxOffsetNumber]; CHECK_FOR_INTERRUPTS(); @@ -444,7 +444,7 @@ verify_heapam(PG_FUNCTION_ARGS) for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff; ctx.offnum = OffsetNumberNext(ctx.offnum)) { - BlockNumber nextblkno; + BlockNumber nextblkno; OffsetNumber nextoffnum; successor[ctx.offnum] = InvalidOffsetNumber; @@ -484,9 +484,9 @@ verify_heapam(PG_FUNCTION_ARGS) /* * Since we've checked that this redirect points to a line - * pointer between FirstOffsetNumber and maxoff, it should - * now be safe to fetch the referenced line pointer. We expect - * it to be LP_NORMAL; if not, that's corruption. + * pointer between FirstOffsetNumber and maxoff, it should now + * be safe to fetch the referenced line pointer. We expect it + * to be LP_NORMAL; if not, that's corruption. */ rditem = PageGetItemId(ctx.page, rdoffnum); if (!ItemIdIsUsed(rditem)) @@ -610,8 +610,8 @@ verify_heapam(PG_FUNCTION_ARGS) { /* * We should not have set successor[ctx.offnum] to a value - * other than InvalidOffsetNumber unless that line pointer - * is LP_NORMAL. + * other than InvalidOffsetNumber unless that line pointer is + * LP_NORMAL. */ Assert(ItemIdIsNormal(next_lp)); @@ -642,8 +642,8 @@ verify_heapam(PG_FUNCTION_ARGS) } /* - * If the next line pointer is a redirect, or if it's a tuple - * but the XMAX of this tuple doesn't match the XMIN of the next + * If the next line pointer is a redirect, or if it's a tuple but + * the XMAX of this tuple doesn't match the XMIN of the next * tuple, then the two aren't part of the same update chain and * there is nothing more to do. */ @@ -667,8 +667,8 @@ verify_heapam(PG_FUNCTION_ARGS) } /* - * This tuple and the tuple to which it points seem to be part - * of an update chain. + * This tuple and the tuple to which it points seem to be part of + * an update chain. */ predecessor[nextoffnum] = ctx.offnum; @@ -721,8 +721,8 @@ verify_heapam(PG_FUNCTION_ARGS) } /* - * If the current tuple's xmin is aborted but the successor tuple's - * xmin is in-progress or committed, that's corruption. + * If the current tuple's xmin is aborted but the successor + * tuple's xmin is in-progress or committed, that's corruption. */ if (xmin_commit_status_ok[ctx.offnum] && xmin_commit_status[ctx.offnum] == XID_ABORTED && @@ -1025,7 +1025,7 @@ check_tuple_visibility(HeapCheckContext *ctx, bool *xmin_commit_status_ok, HeapTupleHeader tuphdr = ctx->tuphdr; ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */ - *xmin_commit_status_ok = false; /* have not yet proven otherwise */ + *xmin_commit_status_ok = false; /* have not yet proven otherwise */ /* If xmin is normal, it should be within valid range */ xmin = HeapTupleHeaderGetXmin(tuphdr); @@ -1837,7 +1837,7 @@ check_tuple(HeapCheckContext *ctx, bool *xmin_commit_status_ok, * therefore cannot check it. */ if (!check_tuple_visibility(ctx, xmin_commit_status_ok, - xmin_commit_status)) + xmin_commit_status)) return; /* @@ -1897,8 +1897,8 @@ FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx) diff = (int32) (ctx->next_xid - xid); /* - * In cases of corruption we might see a 32bit xid that is before epoch - * 0. We can't represent that as a 64bit xid, due to 64bit xids being + * In cases of corruption we might see a 32bit xid that is before epoch 0. + * We can't represent that as a 64bit xid, due to 64bit xids being * unsigned integers, without the modulo arithmetic of 32bit xid. There's * no really nice way to deal with that, but it works ok enough to use * FirstNormalFullTransactionId in that case, as a freshly initdb'd diff --git a/contrib/basic_archive/basic_archive.c b/contrib/basic_archive/basic_archive.c index cd852888ce..4d78c31859 100644 --- a/contrib/basic_archive/basic_archive.c +++ b/contrib/basic_archive/basic_archive.c @@ -407,8 +407,8 @@ basic_archive_shutdown(ArchiveModuleState *state) MemoryContext basic_archive_context; /* - * If we didn't get to storing the pointer to our allocated state, we don't - * have anything to clean up. + * If we didn't get to storing the pointer to our allocated state, we + * don't have anything to clean up. */ if (data == NULL) return; diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 55f75eff36..3a3e916f9e 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -1287,7 +1287,7 @@ dblink_get_connections(PG_FUNCTION_ARGS) if (astate) PG_RETURN_DATUM(makeArrayResult(astate, - CurrentMemoryContext)); + CurrentMemoryContext)); else PG_RETURN_NULL(); } diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c index 21b7d02028..932f69bff2 100644 --- a/contrib/ltree/ltree_gist.c +++ b/contrib/ltree/ltree_gist.c @@ -43,7 +43,7 @@ ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen, ltree *left, ltree *right) { int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) + - (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0); + (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0); ltree_gist *result = palloc(size); SET_VARSIZE(result, size); diff --git a/contrib/ltree/ltree_io.c b/contrib/ltree/ltree_io.c index 5dce70bd1a..0a12c77a62 100644 --- a/contrib/ltree/ltree_io.c +++ b/contrib/ltree/ltree_io.c @@ -175,7 +175,7 @@ Datum ltree_in(PG_FUNCTION_ARGS) { char *buf = (char *) PG_GETARG_POINTER(0); - ltree *res; + ltree *res; if ((res = parse_ltree(buf, fcinfo->context)) == NULL) PG_RETURN_NULL(); @@ -584,7 +584,7 @@ parse_lquery(const char *buf, struct Node *escontext) */ static bool finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos, - struct Node *escontext) + struct Node *escontext) { if (is_lquery) { @@ -745,7 +745,7 @@ Datum lquery_in(PG_FUNCTION_ARGS) { char *buf = (char *) PG_GETARG_POINTER(0); - lquery *res; + lquery *res; if ((res = parse_lquery(buf, fcinfo->context)) == NULL) PG_RETURN_NULL(); diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c index 0d29e15630..121fc55e46 100644 --- a/contrib/ltree/ltxtquery_io.c +++ b/contrib/ltree/ltxtquery_io.c @@ -186,8 +186,8 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("word is too long"))); - if (! pushquery(state, type, ltree_crc32_sz(strval, lenval), - state->curop - state->op, lenval, flag)) + if (!pushquery(state, type, ltree_crc32_sz(strval, lenval), + state->curop - state->op, lenval, flag)) return false; while (state->curop - state->op + lenval + 1 >= state->lenop) @@ -408,7 +408,7 @@ PG_FUNCTION_INFO_V1(ltxtq_in); Datum ltxtq_in(PG_FUNCTION_ARGS) { - ltxtquery *res; + ltxtquery *res; if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL) PG_RETURN_NULL(); diff --git a/contrib/pg_walinspect/pg_walinspect.c b/contrib/pg_walinspect/pg_walinspect.c index 1cd3744d5d..796a74f322 100644 --- a/contrib/pg_walinspect/pg_walinspect.c +++ b/contrib/pg_walinspect/pg_walinspect.c @@ -252,8 +252,8 @@ GetWALBlockInfo(FunctionCallInfo fcinfo, XLogReaderState *record, int block_id; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; RmgrData desc; - const char *record_type; - StringInfoData rec_desc; + const char *record_type; + StringInfoData rec_desc; Assert(XLogRecHasAnyBlockRefs(record)); diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index da32d503bc..d918ba89e1 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -61,7 +61,7 @@ typedef struct ConnCacheEntry bool have_error; /* have any subxacts aborted in this xact? */ bool changing_xact_state; /* xact state change in process */ bool parallel_commit; /* do we commit (sub)xacts in parallel? */ - bool parallel_abort; /* do we abort (sub)xacts in parallel? */ + bool parallel_abort; /* do we abort (sub)xacts in parallel? */ bool invalidated; /* true if reconnect is pending */ bool keep_connections; /* setting value of keep_connections * server option */ diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 95dbe8b06c..428ea3810f 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -2024,9 +2024,8 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo) /* * Should never get called when the insert is being performed on a table - * that is also among the target relations of an UPDATE operation, - * because postgresBeginForeignInsert() currently rejects such insert - * attempts. + * that is also among the target relations of an UPDATE operation, because + * postgresBeginForeignInsert() currently rejects such insert attempts. */ Assert(fmstate == NULL || fmstate->aux_fmstate == NULL); @@ -5167,15 +5166,15 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel, */ if (method != ANALYZE_SAMPLE_OFF) { - bool can_tablesample; + bool can_tablesample; reltuples = postgresGetAnalyzeInfoForForeignTable(relation, &can_tablesample); /* - * Make sure we're not choosing TABLESAMPLE when the remote relation does - * not support that. But only do this for "auto" - if the user explicitly - * requested BERNOULLI/SYSTEM, it's better to fail. + * Make sure we're not choosing TABLESAMPLE when the remote relation + * does not support that. But only do this for "auto" - if the user + * explicitly requested BERNOULLI/SYSTEM, it's better to fail. */ if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO)) method = ANALYZE_SAMPLE_RANDOM; @@ -5189,35 +5188,35 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel, else { /* - * All supported sampling methods require sampling rate, - * not target rows directly, so we calculate that using - * the remote reltuples value. That's imperfect, because - * it might be off a good deal, but that's not something - * we can (or should) address here. + * All supported sampling methods require sampling rate, not + * target rows directly, so we calculate that using the remote + * reltuples value. That's imperfect, because it might be off a + * good deal, but that's not something we can (or should) address + * here. * - * If reltuples is too low (i.e. when table grew), we'll - * end up sampling more rows - but then we'll apply the - * local sampling, so we get the expected sample size. - * This is the same outcome as without remote sampling. + * If reltuples is too low (i.e. when table grew), we'll end up + * sampling more rows - but then we'll apply the local sampling, + * so we get the expected sample size. This is the same outcome as + * without remote sampling. * - * If reltuples is too high (e.g. after bulk DELETE), we - * will end up sampling too few rows. + * If reltuples is too high (e.g. after bulk DELETE), we will end + * up sampling too few rows. * - * We can't really do much better here - we could try - * sampling a bit more rows, but we don't know how off - * the reltuples value is so how much is "a bit more"? + * We can't really do much better here - we could try sampling a + * bit more rows, but we don't know how off the reltuples value is + * so how much is "a bit more"? * - * Furthermore, the targrows value for partitions is - * determined based on table size (relpages), which can - * be off in different ways too. Adjusting the sampling - * rate here might make the issue worse. + * Furthermore, the targrows value for partitions is determined + * based on table size (relpages), which can be off in different + * ways too. Adjusting the sampling rate here might make the issue + * worse. */ sample_frac = targrows / reltuples; /* * We should never get sampling rate outside the valid range - * (between 0.0 and 1.0), because those cases should be covered - * by the previous branch that sets ANALYZE_SAMPLE_OFF. + * (between 0.0 and 1.0), because those cases should be covered by + * the previous branch that sets ANALYZE_SAMPLE_OFF. */ Assert(sample_frac >= 0.0 && sample_frac <= 1.0); } diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c index eb33d2a993..07c11b75e9 100644 --- a/contrib/postgres_fdw/shippable.c +++ b/contrib/postgres_fdw/shippable.c @@ -183,7 +183,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo) /* See if we already cached the result. */ entry = (ShippableCacheEntry *) - hash_search(ShippableCacheHash, &key, HASH_FIND, NULL); + hash_search(ShippableCacheHash, &key, HASH_FIND, NULL); if (!entry) { @@ -196,7 +196,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo) * cache invalidation. */ entry = (ShippableCacheEntry *) - hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL); + hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL); entry->shippable = shippable; } diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index 628c6a2595..12d1d0505d 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -288,7 +288,7 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { TestDecodingData *data = ctx->output_plugin_private; TestDecodingTxnData *txndata = - MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); + MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); txndata->xact_wrote_changes = false; txn->output_plugin_private = txndata; @@ -348,7 +348,7 @@ pg_decode_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { TestDecodingData *data = ctx->output_plugin_private; TestDecodingTxnData *txndata = - MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); + MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); txndata->xact_wrote_changes = false; txn->output_plugin_private = txndata; diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index 41bf950a4a..21d7c2d4e9 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -689,8 +689,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) } /* - * If we found a scan key eliminating the range, no need to - * check additional ones. + * If we found a scan key eliminating the range, no need + * to check additional ones. */ if (!addrange) break; @@ -1212,7 +1212,7 @@ brin_build_desc(Relation rel) * Obtain BrinOpcInfo for each indexed column. While at it, accumulate * the number of columns stored, since the number is opclass-defined. */ - opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts); + opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts); for (keyno = 0; keyno < tupdesc->natts; keyno++) { FmgrInfo *opcInfoFn; diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 90cb3951fc..11cc431677 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -1717,7 +1717,7 @@ allocateReloptStruct(Size base, relopt_value *options, int numoptions) if (optstr->fill_cb) { const char *val = optval->isset ? optval->values.string_val : - optstr->default_isnull ? NULL : optstr->default_val; + optstr->default_isnull ? NULL : optstr->default_val; size += optstr->fill_cb(val, NULL); } @@ -1796,8 +1796,8 @@ fillRelOptions(void *rdopts, Size basesize, if (optstring->fill_cb) { Size size = - optstring->fill_cb(string_val, - (char *) rdopts + offset); + optstring->fill_cb(string_val, + (char *) rdopts + offset); if (size) { diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index b5c1754e78..516465f8b7 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1117,7 +1117,7 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset)) { IndexTuple ituple = (IndexTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (downlink == NULL) downlink = CopyIndexTuple(ituple); diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 95cbed4337..1423b4b047 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -598,7 +598,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate, { GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc); GISTNodeBuffer *newNodeBuffer; - int i = foreach_current_index(lc); + int i = foreach_current_index(lc); /* Decompress parent index tuple of node buffer page. */ gistDeCompressAtt(giststate, r, diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 7382b0921d..e2c9b5f069 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -657,7 +657,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage @@ -694,7 +694,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index 9a86fb3fef..dcd302d3de 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -125,7 +125,7 @@ gistRedoPageUpdateRecord(XLogReaderState *record) if (data - begin < datalen) { OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber : - OffsetNumberNext(PageGetMaxOffsetNumber(page)); + OffsetNumberNext(PageGetMaxOffsetNumber(page)); while (data - begin < datalen) { diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index d850edd1d5..37646cc9a1 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -289,7 +289,8 @@ hashtext(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; const char *keydata = VARDATA_ANY(key); size_t keylen = VARSIZE_ANY_EXHDR(key); @@ -304,8 +305,8 @@ hashtext(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any((uint8_t *) buf, bsize + 1); @@ -343,7 +344,8 @@ hashtextextended(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; const char *keydata = VARDATA_ANY(key); size_t keylen = VARSIZE_ANY_EXHDR(key); @@ -357,8 +359,8 @@ hashtextextended(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any_extended((uint8_t *) buf, bsize + 1, PG_GETARG_INT64(1)); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index b300a4675e..8e60fb74a0 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2491,7 +2491,7 @@ static inline bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) { const uint16 interesting = - HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; + HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; if ((new_infomask & interesting) != (old_infomask & interesting)) return true; diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index e2e35b71ea..e76fb1dbdd 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -334,8 +334,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. * - * If the update is not HOT, we must update all indexes. If the update - * is HOT, it could be that we updated summarized columns, so we either + * If the update is not HOT, we must update all indexes. If the update is + * HOT, it could be that we updated summarized columns, so we either * update only summarized indexes, or none at all. */ if (result != TM_Ok) diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index fb95c19e90..c275b08494 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -376,7 +376,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, if (use_fsm && i >= not_in_fsm_pages) { Size freespace = BufferGetPageSize(victim_buffers[i]) - - SizeOfPageHeaderData; + SizeOfPageHeaderData; RecordPageWithFreeSpace(relation, curBlock, freespace); } diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 3f0342351f..ea75c5399b 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -533,7 +533,7 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer) if (!TransactionIdIsValid(prstate->old_snap_xmin)) { TransactionId horizon = - GlobalVisTestNonRemovableHorizon(prstate->vistest); + GlobalVisTestNonRemovableHorizon(prstate->vistest); TransactionIdLimitedForOldSnapshots(horizon, prstate->rel, &prstate->old_snap_xmin, diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 0a9ebd22bd..f232cad592 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1809,12 +1809,12 @@ retry: { /* * We have no freeze plans to execute, so there's no added cost - * from following the freeze path. That's why it was chosen. - * This is important in the case where the page only contains - * totally frozen tuples at this point (perhaps only following - * pruning). Such pages can be marked all-frozen in the VM by our - * caller, even though none of its tuples were newly frozen here - * (note that the "no freeze" path never sets pages all-frozen). + * from following the freeze path. That's why it was chosen. This + * is important in the case where the page only contains totally + * frozen tuples at this point (perhaps only following pruning). + * Such pages can be marked all-frozen in the VM by our caller, + * even though none of its tuples were newly frozen here (note + * that the "no freeze" path never sets pages all-frozen). * * We never increment the frozen_pages instrumentation counter * here, since it only counts pages with newly frozen tuples @@ -3113,8 +3113,8 @@ dead_items_max_items(LVRelState *vacrel) { int64 max_items; int vac_work_mem = IsAutoVacuumWorkerProcess() && - autovacuum_work_mem != -1 ? - autovacuum_work_mem : maintenance_work_mem; + autovacuum_work_mem != -1 ? + autovacuum_work_mem : maintenance_work_mem; if (vacrel->nindexes > 0) { diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index ac91d1a14d..7d54ec9c0f 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -626,7 +626,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend) static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks) { - Buffer buf; + Buffer buf; buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL, EB_CREATE_FORK_IF_NEEDED | diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 41aa1c4ccd..6be8915229 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -2947,7 +2947,7 @@ void _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate) { IndexBulkDeleteResult *stats = vstate->stats; - Relation heaprel = vstate->info->heaprel; + Relation heaprel = vstate->info->heaprel; Assert(stats->pages_newly_deleted >= vstate->npendingpages); @@ -3027,7 +3027,7 @@ _bt_pendingfsm_add(BTVacState *vstate, if (vstate->npendingpages > 0) { FullTransactionId lastsafexid = - vstate->pendingpages[vstate->npendingpages - 1].safexid; + vstate->pendingpages[vstate->npendingpages - 1].safexid; Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid)); } diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c index 7d12e0ef91..3922120d64 100644 --- a/src/backend/access/rmgrdesc/dbasedesc.c +++ b/src/backend/access/rmgrdesc/dbasedesc.c @@ -27,7 +27,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record) if (info == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) rec; + (xl_dbase_create_file_copy_rec *) rec; appendStringInfo(buf, "copy dir %u/%u to %u/%u", xlrec->src_tablespace_id, xlrec->src_db_id, @@ -36,7 +36,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record) else if (info == XLOG_DBASE_CREATE_WAL_LOG) { xl_dbase_create_wal_log_rec *xlrec = - (xl_dbase_create_wal_log_rec *) rec; + (xl_dbase_create_wal_log_rec *) rec; appendStringInfo(buf, "create dir %u/%u", xlrec->tablespace_id, xlrec->db_id); diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index 9ef4981ad1..246a6a6b85 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -120,7 +120,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) else { ginxlogInsertDataInternal *insertData = - (ginxlogInsertDataInternal *) payload; + (ginxlogInsertDataInternal *) payload; appendStringInfo(buf, " pitem: %u-%u/%u", PostingItemGetBlockNumber(&insertData->newitem), @@ -156,7 +156,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) else { ginxlogVacuumDataLeafPage *xlrec = - (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); + (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); desc_recompress_leaf(buf, &xlrec->data); } diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index f323699165..cbfaf0c00a 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -115,7 +115,7 @@ spgAllocSearchItem(SpGistScanOpaque so, bool isnull, double *distances) { /* allocate distance array only for non-NULL items */ SpGistSearchItem *item = - palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys)); + palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys)); item->isNull = isnull; @@ -130,7 +130,7 @@ static void spgAddStartItem(SpGistScanOpaque so, bool isnull) { SpGistSearchItem *startEntry = - spgAllocSearchItem(so, isnull, so->zeroDistances); + spgAllocSearchItem(so, isnull, so->zeroDistances); ItemPointerSet(&startEntry->heapPtr, isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO, @@ -768,7 +768,7 @@ spgTestLeafTuple(SpGistScanOpaque so, storeRes_func storeRes) { SpGistLeafTuple leafTuple = (SpGistLeafTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (leafTuple->tupstate != SPGIST_LIVE) { @@ -896,7 +896,7 @@ redirect: else /* page is inner */ { SpGistInnerTuple innerTuple = (SpGistInnerTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (innerTuple->tupstate != SPGIST_LIVE) { @@ -974,7 +974,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr, else { IndexOrderByDistance *distances = - palloc(sizeof(distances[0]) * so->numberOfOrderBys); + palloc(sizeof(distances[0]) * so->numberOfOrderBys); int i; for (i = 0; i < so->numberOfOrderBys; i++) diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index a5e6c92f35..771438c8ce 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -112,7 +112,7 @@ TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key) { uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; Oid relid = RelationGetRelid(relation); Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); @@ -176,7 +176,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan) { Snapshot snapshot; uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; Assert(RelationGetRelid(relation) == pscan->phs_relid); diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index fe6698d5ff..abb022e067 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -3270,7 +3270,7 @@ multixact_redo(XLogReaderState *record) else if (info == XLOG_MULTIXACT_CREATE_ID) { xl_multixact_create *xlrec = - (xl_multixact_create *) XLogRecGetData(record); + (xl_multixact_create *) XLogRecGetData(record); TransactionId max_xid; int i; diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 7133ec0b22..2b8bc2f58d 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -375,8 +375,8 @@ InitializeParallelDSM(ParallelContext *pcxt) shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace); /* - * Serialize the transaction snapshot if the transaction - * isolation level uses a transaction snapshot. + * Serialize the transaction snapshot if the transaction isolation + * level uses a transaction snapshot. */ if (IsolationUsesXactSnapshot()) { @@ -1497,8 +1497,8 @@ ParallelWorkerMain(Datum main_arg) RestoreClientConnectionInfo(clientconninfospace); /* - * Initialize SystemUser now that MyClientConnectionInfo is restored. - * Also ensure that auth_method is actually valid, aka authn_id is not NULL. + * Initialize SystemUser now that MyClientConnectionInfo is restored. Also + * ensure that auth_method is actually valid, aka authn_id is not NULL. */ if (MyClientConnectionInfo.authn_id) InitializeSystemUser(MyClientConnectionInfo.authn_id, diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 6a837e1539..8daaa535ed 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -3152,10 +3152,9 @@ CommitTransactionCommand(void) break; /* - * The user issued a SAVEPOINT inside a transaction block. - * Start a subtransaction. (DefineSavepoint already did - * PushTransaction, so as to have someplace to put the SUBBEGIN - * state.) + * The user issued a SAVEPOINT inside a transaction block. Start a + * subtransaction. (DefineSavepoint already did PushTransaction, + * so as to have someplace to put the SUBBEGIN state.) */ case TBLOCK_SUBBEGIN: StartSubTransaction(); @@ -4696,9 +4695,9 @@ RollbackAndReleaseCurrentSubTransaction(void) s = CurrentTransactionState; /* changed by pop */ Assert(s->blockState == TBLOCK_SUBINPROGRESS || - s->blockState == TBLOCK_INPROGRESS || - s->blockState == TBLOCK_IMPLICIT_INPROGRESS || - s->blockState == TBLOCK_STARTED); + s->blockState == TBLOCK_INPROGRESS || + s->blockState == TBLOCK_IMPLICIT_INPROGRESS || + s->blockState == TBLOCK_STARTED); } /* diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 63481d826f..408467df12 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5460,8 +5460,8 @@ StartupXLOG(void) missingContrecPtr = endOfRecoveryInfo->missingContrecPtr; /* - * Reset ps status display, so as no information related to recovery - * shows up. + * Reset ps status display, so as no information related to recovery shows + * up. */ set_ps_display(""); @@ -5596,9 +5596,9 @@ StartupXLOG(void) if (!XLogRecPtrIsInvalid(missingContrecPtr)) { /* - * We should only have a missingContrecPtr if we're not switching to - * a new timeline. When a timeline switch occurs, WAL is copied from - * the old timeline to the new only up to the end of the last complete + * We should only have a missingContrecPtr if we're not switching to a + * new timeline. When a timeline switch occurs, WAL is copied from the + * old timeline to the new only up to the end of the last complete * record, so there can't be an incomplete WAL record that we need to * disregard. */ @@ -8494,7 +8494,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, */ if (rllen > datadirpathlen && strncmp(linkpath, DataDir, datadirpathlen) == 0 && - IS_DIR_SEP(linkpath[datadirpathlen])) + IS_DIR_SEP(linkpath[datadirpathlen])) relpath = pstrdup(linkpath + datadirpathlen + 1); /* diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index ea7e2f67af..54247e1d81 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -897,8 +897,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, * * XLogReader machinery is only able to handle records up to a certain * size (ignoring machine resource limitations), so make sure that we will - * not emit records larger than the sizes advertised to be supported. - * This cap is based on DecodeXLogRecordRequiredSpace(). + * not emit records larger than the sizes advertised to be supported. This + * cap is based on DecodeXLogRecordRequiredSpace(). */ if (total_len >= XLogRecordMaxSize) ereport(ERROR, diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c index 906e3d9469..539928cb85 100644 --- a/src/backend/access/transam/xlogprefetcher.c +++ b/src/backend/access/transam/xlogprefetcher.c @@ -569,7 +569,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) if (record_type == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) record->main_data; + (xl_dbase_create_file_copy_rec *) record->main_data; RelFileLocator rlocator = {InvalidOid, xlrec->db_id, InvalidRelFileNumber}; @@ -596,7 +596,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) if (record_type == XLOG_SMGR_CREATE) { xl_smgr_create *xlrec = (xl_smgr_create *) - record->main_data; + record->main_data; if (xlrec->forkNum == MAIN_FORKNUM) { @@ -624,7 +624,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) else if (record_type == XLOG_SMGR_TRUNCATE) { xl_smgr_truncate *xlrec = (xl_smgr_truncate *) - record->main_data; + record->main_data; /* * Don't consider prefetching anything in the truncated diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index cadea21b37..6eee1abccd 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -282,7 +282,7 @@ XLogRecPtr XLogReleasePreviousRecord(XLogReaderState *state) { DecodedXLogRecord *record; - XLogRecPtr next_lsn; + XLogRecPtr next_lsn; if (!state->record) return InvalidXLogRecPtr; diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index 188f6d6f85..4883fcb512 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -3215,7 +3215,7 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *readBuf) { XLogPageReadPrivate *private = - (XLogPageReadPrivate *) xlogreader->private_data; + (XLogPageReadPrivate *) xlogreader->private_data; int emode = private->emode; uint32 targetPageOff; XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY; diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c index 5baea7535b..45be21131c 100644 --- a/src/backend/backup/basebackup.c +++ b/src/backend/backup/basebackup.c @@ -1609,10 +1609,10 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename, * * There's no guarantee that this will actually * happen, though: the torn write could take an - * arbitrarily long time to complete. Retrying multiple - * times wouldn't fix this problem, either, though - * it would reduce the chances of it happening in - * practice. The only real fix here seems to be to + * arbitrarily long time to complete. Retrying + * multiple times wouldn't fix this problem, either, + * though it would reduce the chances of it happening + * in practice. The only real fix here seems to be to * have some kind of interlock that allows us to wait * until we can be certain that no write to the block * is in progress. Since we don't have any such thing diff --git a/src/backend/backup/basebackup_copy.c b/src/backend/backup/basebackup_copy.c index 73a3f4a970..1db80cde1b 100644 --- a/src/backend/backup/basebackup_copy.c +++ b/src/backend/backup/basebackup_copy.c @@ -350,6 +350,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli) tupdesc = CreateTemplateTupleDesc(2); TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0); + /* * int8 may seem like a surprising data type for this, but in theory int4 * would not be wide enough for this, as TimeLineID is unsigned. @@ -360,7 +361,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli) tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual); /* Data row */ - values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr))); + values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr))); values[1] = Int64GetDatum(tli); do_tup_output(tstate, values, nulls); diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 45cdcd3dc6..bc2ad773c9 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -3389,8 +3389,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask, result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)); /* - * Check if ACL_MAINTAIN is being checked and, if so, and not already set as - * part of the result, then check if the user is a member of the + * Check if ACL_MAINTAIN is being checked and, if so, and not already set + * as part of the result, then check if the user is a member of the * pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH * MATERIALIZED VIEW, and REINDEX on all relations. */ diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c index feddff654e..522da0ac85 100644 --- a/src/backend/catalog/indexing.c +++ b/src/backend/catalog/indexing.c @@ -148,8 +148,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple, #endif /* USE_ASSERT_CHECKING */ /* - * Skip insertions into non-summarizing indexes if we only need - * to update summarizing indexes. + * Skip insertions into non-summarizing indexes if we only need to + * update summarizing indexes. */ if (onlySummarized && !indexInfo->ii_Summarizing) continue; diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 14e57adee2..51d5ba669d 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -3838,7 +3838,7 @@ recomputeNamespacePath(void) if (OidIsValid(namespaceId) && !list_member_oid(oidlist, namespaceId) && object_aclcheck(NamespaceRelationId, namespaceId, roleid, - ACL_USAGE) == ACLCHECK_OK && + ACL_USAGE) == ACLCHECK_OK && InvokeNamespaceSearchHook(namespaceId, false)) oidlist = lappend_oid(oidlist, namespaceId); } @@ -3866,7 +3866,7 @@ recomputeNamespacePath(void) if (OidIsValid(namespaceId) && !list_member_oid(oidlist, namespaceId) && object_aclcheck(NamespaceRelationId, namespaceId, roleid, - ACL_USAGE) == ACLCHECK_OK && + ACL_USAGE) == ACLCHECK_OK && InvokeNamespaceSearchHook(namespaceId, false)) oidlist = lappend_oid(oidlist, namespaceId); } @@ -4002,7 +4002,7 @@ InitTempTableNamespace(void) * temp table creation request is made by someone with appropriate rights. */ if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CREATE_TEMP) != ACLCHECK_OK) + ACL_CREATE_TEMP) != ACLCHECK_OK) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create temporary tables in database \"%s\"", diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index 792b0ef414..95918a77a1 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -625,7 +625,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId, /* not in catalogs, different from operator, so make shell */ aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(otherNamespace)); diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 64d326f073..91c7f3426f 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -1414,6 +1414,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior) /* FALLTHROUGH */ case SHARED_DEPENDENCY_OWNER: + /* * Save it for deletion below, if it's a local object or a * role grant. Other shared objects, such as databases, diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 10f28f94bc..e95dc31bde 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -231,7 +231,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) if (OidIsValid(namespaceId)) { aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -1035,7 +1035,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) AclResult aclresult; aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c index c91fe66d9b..2969a2bb21 100644 --- a/src/backend/commands/collationcmds.c +++ b/src/backend/commands/collationcmds.c @@ -270,8 +270,8 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e */ if (!IsBinaryUpgrade) { - char *langtag = icu_language_tag(colliculocale, - icu_validation_level); + char *langtag = icu_language_tag(colliculocale, + icu_validation_level); if (langtag && strcmp(colliculocale, langtag) != 0) { @@ -476,17 +476,18 @@ AlterCollation(AlterCollationStmt *stmt) Datum pg_collation_actual_version(PG_FUNCTION_ARGS) { - Oid collid = PG_GETARG_OID(0); - char provider; - char *locale; - char *version; - Datum datum; + Oid collid = PG_GETARG_OID(0); + char provider; + char *locale; + char *version; + Datum datum; if (collid == DEFAULT_COLLATION_OID) { /* retrieve from pg_database */ HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId)); + if (!HeapTupleIsValid(dbtup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -506,7 +507,8 @@ pg_collation_actual_version(PG_FUNCTION_ARGS) { /* retrieve from pg_collation */ - HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); + HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); + if (!HeapTupleIsValid(colltp)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -657,11 +659,10 @@ create_collation_from_locale(const char *locale, int nspid, Oid collid; /* - * Some systems have locale names that don't consist entirely of - * ASCII letters (such as "bokmål" or "français"). - * This is pretty silly, since we need the locale itself to - * interpret the non-ASCII characters. We can't do much with - * those, so we filter them out. + * Some systems have locale names that don't consist entirely of ASCII + * letters (such as "bokmål" or "français"). This is pretty + * silly, since we need the locale itself to interpret the non-ASCII + * characters. We can't do much with those, so we filter them out. */ if (!pg_is_ascii(locale)) { @@ -681,19 +682,18 @@ create_collation_from_locale(const char *locale, int nspid, return -1; } if (enc == PG_SQL_ASCII) - return -1; /* C/POSIX are already in the catalog */ + return -1; /* C/POSIX are already in the catalog */ /* count valid locales found in operating system */ (*nvalidp)++; /* - * Create a collation named the same as the locale, but quietly - * doing nothing if it already exists. This is the behavior we - * need even at initdb time, because some versions of "locale -a" - * can report the same locale name more than once. And it's - * convenient for later import runs, too, since you just about - * always want to add on new locales without a lot of chatter - * about existing ones. + * Create a collation named the same as the locale, but quietly doing + * nothing if it already exists. This is the behavior we need even at + * initdb time, because some versions of "locale -a" can report the same + * locale name more than once. And it's convenient for later import runs, + * too, since you just about always want to add on new locales without a + * lot of chatter about existing ones. */ collid = CollationCreate(locale, nspid, GetUserId(), COLLPROVIDER_LIBC, true, enc, @@ -995,8 +995,8 @@ pg_import_system_collations(PG_FUNCTION_ARGS) param.nvalidp = &nvalid; /* - * Enumerate the locales that are either installed on or supported - * by the OS. + * Enumerate the locales that are either installed on or supported by + * the OS. */ if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL, (LPARAM) ¶m, NULL)) diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 2e242eeff2..99d4080ea9 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -259,7 +259,7 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath) List *rlocatorlist = NIL; LockRelId relid; Snapshot snapshot; - SMgrRelation smgr; + SMgrRelation smgr; BufferAccessStrategy bstrategy; /* Get pg_class relfilenumber. */ @@ -1065,8 +1065,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) */ if (!IsBinaryUpgrade && dbiculocale != src_iculocale) { - char *langtag = icu_language_tag(dbiculocale, - icu_validation_level); + char *langtag = icu_language_tag(dbiculocale, + icu_validation_level); if (langtag && strcmp(dbiculocale, langtag) != 0) { @@ -1219,7 +1219,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) dst_deftablespace = get_tablespace_oid(tablespacename, false); /* check permissions */ aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, tablespacename); @@ -1406,8 +1406,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) * If we're going to be reading data for the to-be-created database into * shared_buffers, take a lock on it. Nobody should know that this * database exists yet, but it's good to maintain the invariant that an - * AccessExclusiveLock on the database is sufficient to drop all - * of its buffers without worrying about more being read later. + * AccessExclusiveLock on the database is sufficient to drop all of its + * buffers without worrying about more being read later. * * Note that we need to do this before entering the * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback @@ -1933,7 +1933,7 @@ movedb(const char *dbname, const char *tblspcname) * Permission checks */ aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, tblspcname); @@ -3110,7 +3110,7 @@ dbase_redo(XLogReaderState *record) if (info == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) XLogRecGetData(record); + (xl_dbase_create_file_copy_rec *) XLogRecGetData(record); char *src_path; char *dst_path; char *parent_path; @@ -3182,7 +3182,7 @@ dbase_redo(XLogReaderState *record) else if (info == XLOG_DBASE_CREATE_WAL_LOG) { xl_dbase_create_wal_log_rec *xlrec = - (xl_dbase_create_wal_log_rec *) XLogRecGetData(record); + (xl_dbase_create_wal_log_rec *) XLogRecGetData(record); char *dbpath; char *parent_path; diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c index 82bda15889..469a6c2ee9 100644 --- a/src/backend/commands/dropcmds.c +++ b/src/backend/commands/dropcmds.c @@ -493,6 +493,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) case OBJECT_TABLE: case OBJECT_TABLESPACE: case OBJECT_VIEW: + /* * These are handled elsewhere, so if someone gets here the code * is probably wrong or should be revisited. diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 5334c503e1..15f9bddcdf 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -1523,7 +1523,7 @@ ExplainNode(PlanState *planstate, List *ancestors, { BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan; const char *indexname = - explain_get_index_name(bitmapindexscan->indexid); + explain_get_index_name(bitmapindexscan->indexid); if (es->format == EXPLAIN_FORMAT_TEXT) appendStringInfo(es->str, " on %s", @@ -3008,7 +3008,7 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate, for (n = 0; n < incrsortstate->shared_info->num_workers; n++) { IncrementalSortInfo *incsort_info = - &incrsortstate->shared_info->sinfo[n]; + &incrsortstate->shared_info->sinfo[n]; /* * If a worker hasn't processed any sort groups at all, then @@ -4212,7 +4212,7 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es) { ListCell *cell; const char *label = - (list_length(css->custom_ps) != 1 ? "children" : "child"); + (list_length(css->custom_ps) != 1 ? "children" : "child"); foreach(cell, css->custom_ps) ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es); diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 69f66dfe7d..127a3a590c 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -151,7 +151,7 @@ compute_return_type(TypeName *returnType, Oid languageOid, namespaceId = QualifiedNameGetCreationNamespace(returnType->names, &typname); aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -2117,7 +2117,7 @@ ExecuteDoStmt(ParseState *pstate, DoStmt *stmt, bool atomic) AclResult aclresult; aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index e6ee99e51f..a5168c9f09 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -748,7 +748,7 @@ DefineIndex(Oid relationId, AclResult aclresult; aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -780,7 +780,7 @@ DefineIndex(Oid relationId, AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(tablespaceId)); @@ -2708,7 +2708,7 @@ ExecReindex(ParseState *pstate, ReindexStmt *stmt, bool isTopLevel) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(params.tablespaceOid)); @@ -3066,11 +3066,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, /* * The table can be reindexed if the user has been granted MAINTAIN on * the table or one of its partition ancestors or the user is a - * superuser, the table owner, or the database/schema owner (but in the - * latter case, only if it's not a shared relation). pg_class_aclcheck - * includes the superuser case, and depending on objectKind we already - * know that the user has permission to run REINDEX on this database or - * schema per the permission checks at the beginning of this routine. + * superuser, the table owner, or the database/schema owner (but in + * the latter case, only if it's not a shared relation). + * pg_class_aclcheck includes the superuser case, and depending on + * objectKind we already know that the user has permission to run + * REINDEX on this database or schema per the permission checks at the + * beginning of this routine. */ if (classtuple->relisshared && pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK && @@ -3312,7 +3313,7 @@ ReindexMultipleInternal(List *relids, ReindexParams *params) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(params->tablespaceOid)); diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index 90de935267..db347a64cf 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -382,7 +382,7 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId) * no special case for them. */ aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index 56eafbff10..bc7e0cbddf 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -604,9 +604,9 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)"); /* - * We don't want to allow unprivileged users to be able to trigger attempts - * to access arbitrary network destinations, so require the user to have - * been specifically authorized to create subscriptions. + * We don't want to allow unprivileged users to be able to trigger + * attempts to access arbitrary network destinations, so require the user + * to have been specifically authorized to create subscriptions. */ if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION)) ereport(ERROR, @@ -629,10 +629,10 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, * exempt a subscription from this requirement. */ if (!opts.passwordrequired && !superuser_arg(owner)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* * If built with appropriate switch, whine when regression-testing @@ -1111,8 +1111,8 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, if (!sub->passwordrequired && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* Lock the subscription so nobody else can do anything with it. */ LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock); @@ -1825,8 +1825,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) if (!form->subpasswordrequired && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* Must be able to become new owner */ check_can_set_role(GetUserId(), newOwnerId); @@ -1835,8 +1835,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) * current owner must have CREATE on database * * This is consistent with how ALTER SCHEMA ... OWNER TO works, but some - * other object types behave differently (e.g. you can't give a table to - * a user who lacks CREATE privileges on a schema). + * other object types behave differently (e.g. you can't give a table to a + * user who lacks CREATE privileges on a schema). */ aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), ACL_CREATE); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 343fe61115..750b0332da 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -806,7 +806,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(tablespaceId)); @@ -1931,7 +1931,7 @@ ExecuteTruncateGuts(List *explicit_rels, resultRelInfo = resultRelInfos; foreach(cell, rels) { - UserContext ucxt; + UserContext ucxt; if (run_as_table_owner) SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner, @@ -2143,7 +2143,7 @@ ExecuteTruncateGuts(List *explicit_rels, resultRelInfo = resultRelInfos; foreach(cell, rels) { - UserContext ucxt; + UserContext ucxt; if (run_as_table_owner) SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner, @@ -2635,7 +2635,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, if (CompressionMethodIsValid(attribute->attcompression)) { const char *compression = - GetCompressionMethodName(attribute->attcompression); + GetCompressionMethodName(attribute->attcompression); if (def->compression == NULL) def->compression = pstrdup(compression); @@ -13947,7 +13947,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock /* New owner must have CREATE privilege on namespace */ aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceOid)); @@ -14377,7 +14377,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(view_query, true); + view_query_is_auto_updatable(view_query, true); if (view_updatable_error) ereport(ERROR, @@ -14656,7 +14656,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(new_tablespaceoid)); @@ -17134,7 +17134,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, if (IsA(stmt, RenameStmt)) { aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(classform->relnamespace)); diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 3dfbf6a917..13b0dee146 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -1278,7 +1278,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source) /* Check permissions, similarly complaining only if interactive */ aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) { if (source >= PGC_S_INTERACTIVE) @@ -1408,7 +1408,7 @@ PrepareTempTablespaces(void) /* Check permissions similarly */ aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) continue; diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 3440dbc440..216482095d 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -734,7 +734,7 @@ DefineDomain(CreateDomainStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(domainNamespace)); @@ -3743,8 +3743,8 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype) /* New owner must have CREATE privilege on namespace */ aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace, - newOwnerId, - ACL_CREATE); + newOwnerId, + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(typTup->typnamespace)); diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 707114bdd0..d63d3c58ca 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -86,7 +86,7 @@ typedef struct int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256; char *createrole_self_grant = ""; bool createrole_self_grant_enabled = false; -GrantRoleOptions createrole_self_grant_options; +GrantRoleOptions createrole_self_grant_options; /* Hook to check passwords in CreateRole() and AlterRole() */ check_password_hook_type check_password_hook = NULL; @@ -169,7 +169,7 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) DefElem *dadminmembers = NULL; DefElem *dvalidUntil = NULL; DefElem *dbypassRLS = NULL; - GrantRoleOptions popt; + GrantRoleOptions popt; /* The defaults can vary depending on the original statement type */ switch (stmt->stmt_type) @@ -535,8 +535,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) * * The grantor of record for this implicit grant is the bootstrap * superuser, which means that the CREATEROLE user cannot revoke the - * grant. They can however grant the created role back to themselves - * with different options, since they enjoy ADMIN OPTION on it. + * grant. They can however grant the created role back to themselves with + * different options, since they enjoy ADMIN OPTION on it. */ if (!superuser()) { @@ -561,8 +561,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) BOOTSTRAP_SUPERUSERID, &poptself); /* - * We must make the implicit grant visible to the code below, else - * the additional grants will fail. + * We must make the implicit grant visible to the code below, else the + * additional grants will fail. */ CommandCounterIncrement(); @@ -585,8 +585,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) * Add the specified members to this new role. adminmembers get the admin * option, rolemembers don't. * - * NB: No permissions check is required here. If you have enough rights - * to create a role, you can add any members you like. + * NB: No permissions check is required here. If you have enough rights to + * create a role, you can add any members you like. */ AddRoleMems(currentUserId, stmt->role, roleid, rolemembers, roleSpecsToIds(rolemembers), @@ -647,7 +647,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt) DefElem *dbypassRLS = NULL; Oid roleid; Oid currentUserId = GetUserId(); - GrantRoleOptions popt; + GrantRoleOptions popt; check_rolespec_name(stmt->role, _("Cannot alter reserved roles.")); @@ -862,7 +862,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt) */ if (dissuper) { - bool should_be_super = boolVal(dissuper->arg); + bool should_be_super = boolVal(dissuper->arg); if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID) ereport(ERROR, @@ -1021,9 +1021,9 @@ AlterRoleSet(AlterRoleSetStmt *stmt) shdepLockAndCheckObject(AuthIdRelationId, roleid); /* - * To mess with a superuser you gotta be superuser; otherwise you - * need CREATEROLE plus admin option on the target role; unless you're - * just trying to change your own settings + * To mess with a superuser you gotta be superuser; otherwise you need + * CREATEROLE plus admin option on the target role; unless you're just + * trying to change your own settings */ if (roleform->rolsuper) { @@ -1037,7 +1037,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt) else { if ((!have_createrole_privilege() || - !is_admin_of_role(GetUserId(), roleid)) + !is_admin_of_role(GetUserId(), roleid)) && roleid != GetUserId()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), @@ -1490,14 +1490,14 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt) Oid grantor; List *grantee_ids; ListCell *item; - GrantRoleOptions popt; + GrantRoleOptions popt; Oid currentUserId = GetUserId(); /* Parse options list. */ InitGrantRoleOptions(&popt); foreach(item, stmt->opt) { - DefElem *opt = (DefElem *) lfirst(item); + DefElem *opt = (DefElem *) lfirst(item); char *optval = defGetString(opt); if (strcmp(opt->defname, "admin") == 0) @@ -1546,8 +1546,8 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt) /* * Step through all of the granted roles and add, update, or remove * entries in pg_auth_members as appropriate. If stmt->is_grant is true, - * we are adding new grants or, if they already exist, updating options - * on those grants. If stmt->is_grant is false, we are revoking grants or + * we are adding new grants or, if they already exist, updating options on + * those grants. If stmt->is_grant is false, we are revoking grants or * removing options from them. */ foreach(item, stmt->granted_roles) @@ -1848,8 +1848,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, ObjectIdGetDatum(grantorId)); /* - * If we found a tuple, update it with new option values, unless - * there are no changes, in which case issue a WARNING. + * If we found a tuple, update it with new option values, unless there + * are no changes, in which case issue a WARNING. * * If we didn't find a tuple, just insert one. */ @@ -1932,8 +1932,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, popt->inherit; else { - HeapTuple mrtup; - Form_pg_authid mrform; + HeapTuple mrtup; + Form_pg_authid mrform; mrtup = SearchSysCache1(AUTHOID, memberid); if (!HeapTupleIsValid(mrtup)) @@ -2332,8 +2332,8 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, /* * If popt.specified == 0, we're revoking the grant entirely; otherwise, * we expect just one bit to be set, and we're revoking the corresponding - * option. As of this writing, there's no syntax that would allow for - * an attempt to revoke multiple options at once, and the logic below + * option. As of this writing, there's no syntax that would allow for an + * attempt to revoke multiple options at once, and the logic below * wouldn't work properly if such syntax were added, so assert that our * caller isn't trying to do that. */ @@ -2365,7 +2365,7 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, } else { - bool revoke_admin_option_only; + bool revoke_admin_option_only; /* * Revoking the grant entirely, or ADMIN option on a grant, @@ -2572,7 +2572,7 @@ check_createrole_self_grant(char **newval, void **extra, GucSource source) void assign_createrole_self_grant(const char *newval, void *extra) { - unsigned options = * (unsigned *) extra; + unsigned options = *(unsigned *) extra; createrole_self_grant_enabled = (options != 0); createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index ff98c773f5..9bd77546b9 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -437,7 +437,7 @@ DefineView(ViewStmt *stmt, const char *queryString, if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(viewParse, true); + view_query_is_auto_updatable(viewParse, true); if (view_updatable_error) ereport(ERROR, diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index dcf56446c7..5e52d5ece2 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -1214,8 +1214,8 @@ ExecInitExprRec(Expr *node, ExprState *state, /* Check permission to call function */ aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid, - GetUserId(), - ACL_EXECUTE); + GetUserId(), + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(cmpfuncid)); @@ -1224,8 +1224,8 @@ ExecInitExprRec(Expr *node, ExprState *state, if (OidIsValid(opexpr->hashfuncid)) { aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid, - GetUserId(), - ACL_EXECUTE); + GetUserId(), + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(opexpr->hashfuncid)); @@ -3602,7 +3602,7 @@ ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase, * column sorted on. */ TargetEntry *source_tle = - (TargetEntry *) linitial(pertrans->aggref->args); + (TargetEntry *) linitial(pertrans->aggref->args); Assert(list_length(pertrans->aggref->args) == 1); diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index 4cd46f1717..7561e64dfc 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1647,7 +1647,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerGroup pergroup_allaggs = - aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff]; + aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff]; if (pergroup_allaggs == NULL) EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull); @@ -1672,7 +1672,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1700,7 +1700,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1718,7 +1718,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1735,7 +1735,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -1756,7 +1756,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -1773,7 +1773,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index da28e5e40c..1d82b64b89 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -354,8 +354,8 @@ ExecInsertIndexTuples(ResultRelInfo *resultRelInfo, continue; /* - * Skip processing of non-summarizing indexes if we only - * update summarizing indexes + * Skip processing of non-summarizing indexes if we only update + * summarizing indexes */ if (onlySummarizing && !indexInfo->ii_Summarizing) continue; diff --git a/src/backend/executor/execSRF.c b/src/backend/executor/execSRF.c index d09a7758dc..73bf9152a4 100644 --- a/src/backend/executor/execSRF.c +++ b/src/backend/executor/execSRF.c @@ -260,7 +260,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, if (first_time) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); rsinfo.setResult = tupstore; @@ -290,7 +290,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, if (tupdesc == NULL) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); /* * This is the first non-NULL result from the @@ -395,7 +395,7 @@ no_function_result: if (rsinfo.setResult == NULL) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); rsinfo.setResult = tupstore; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 3aab5a0e80..f3a522571a 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -3692,7 +3692,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) /* Check permission to call aggregate function */ aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(), - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_AGGREGATE, get_func_name(aggref->aggfnoid)); @@ -3759,7 +3759,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(finalfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(finalfn_oid)); @@ -3768,7 +3768,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(serialfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(serialfn_oid)); @@ -3777,7 +3777,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(deserialfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(deserialfn_oid)); diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 5fd1c5553b..ac3eb32d97 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -1327,7 +1327,7 @@ ExecParallelHashRepartitionFirst(HashJoinTable hashtable) else { size_t tuple_size = - MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); + MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); /* It belongs in a later batch. */ hashtable->batches[batchno].estimated_size += tuple_size; @@ -1369,7 +1369,7 @@ ExecParallelHashRepartitionRest(HashJoinTable hashtable) for (i = 1; i < old_nbatch; ++i) { ParallelHashJoinBatch *shared = - NthParallelHashJoinBatch(old_batches, i); + NthParallelHashJoinBatch(old_batches, i); old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared), ParallelWorkerNumber + 1, @@ -3317,7 +3317,7 @@ ExecHashTableDetachBatch(HashJoinTable hashtable) while (DsaPointerIsValid(batch->chunks)) { HashMemoryChunk chunk = - dsa_get_address(hashtable->area, batch->chunks); + dsa_get_address(hashtable->area, batch->chunks); dsa_pointer next = chunk->next.shared; dsa_free(hashtable->area, batch->chunks); diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 0a3f32f731..b29a8ff48b 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -1170,7 +1170,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate) { SharedTuplestoreAccessor *inner_tuples; Barrier *batch_barrier = - &hashtable->batches[batchno].shared->batch_barrier; + &hashtable->batches[batchno].shared->batch_barrier; switch (BarrierAttach(batch_barrier)) { @@ -1558,7 +1558,7 @@ ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt) { int plan_node_id = state->js.ps.plan->plan_node_id; ParallelHashJoinState *pstate = - shm_toc_lookup(pcxt->toc, plan_node_id, false); + shm_toc_lookup(pcxt->toc, plan_node_id, false); /* * It would be possible to reuse the shared hash table in single-batch @@ -1593,7 +1593,7 @@ ExecHashJoinInitializeWorker(HashJoinState *state, HashState *hashNode; int plan_node_id = state->js.ps.plan->plan_node_id; ParallelHashJoinState *pstate = - shm_toc_lookup(pwcxt->toc, plan_node_id, false); + shm_toc_lookup(pwcxt->toc, plan_node_id, false); /* Attach to the space for shared temporary files. */ SharedFileSetAttach(&pstate->fileset, pwcxt->seg); diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c index 12bc22f33c..0994b2c113 100644 --- a/src/backend/executor/nodeIncrementalSort.c +++ b/src/backend/executor/nodeIncrementalSort.c @@ -1007,9 +1007,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags) if (incrsortstate->ss.ps.instrument != NULL) { IncrementalSortGroupInfo *fullsortGroupInfo = - &incrsortstate->incsort_info.fullsortGroupInfo; + &incrsortstate->incsort_info.fullsortGroupInfo; IncrementalSortGroupInfo *prefixsortGroupInfo = - &incrsortstate->incsort_info.prefixsortGroupInfo; + &incrsortstate->incsort_info.prefixsortGroupInfo; fullsortGroupInfo->groupCount = 0; fullsortGroupInfo->maxDiskSpaceUsed = 0; diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 6aa8c03def..a4b53b0474 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -111,7 +111,7 @@ typedef struct UpdateContext { bool updated; /* did UPDATE actually occur? */ bool crossPartUpdate; /* was it a cross-partition update? */ - TU_UpdateIndexes updateIndexes; /* Which index updates are required? */ + TU_UpdateIndexes updateIndexes; /* Which index updates are required? */ /* * Lock mode to acquire on the latest tuple version before performing @@ -882,7 +882,7 @@ ExecInsert(ModifyTableContext *context, { TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor); TupleDesc plan_tdesc = - CreateTupleDescCopy(planSlot->tts_tupleDescriptor); + CreateTupleDescCopy(planSlot->tts_tupleDescriptor); resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] = MakeSingleTupleTableSlot(tdesc, slot->tts_ops); diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c index 0c6c912778..791cbd2372 100644 --- a/src/backend/executor/nodeTableFuncscan.c +++ b/src/backend/executor/nodeTableFuncscan.c @@ -352,7 +352,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) int colno; Datum value; int ordinalitycol = - ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; + ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; /* * Install the document as a possibly-toasted Datum into the tablefunc diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 3ac581a711..8bf15e7236 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -2580,7 +2580,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) /* Check permission to call window function */ aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(), - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(wfunc->winfnoid)); @@ -2819,7 +2819,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (!OidIsValid(aggform->aggminvtransfn)) use_ma_code = false; /* sine qua non */ else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY && - aggform->aggfinalmodify != AGGMODIFY_READ_ONLY) + aggform->aggfinalmodify != AGGMODIFY_READ_ONLY) use_ma_code = true; /* decision forced by safety */ else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) use_ma_code = false; /* non-moving frame head */ @@ -2869,7 +2869,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, ReleaseSysCache(procTuple); aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(transfn_oid)); @@ -2878,7 +2878,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (OidIsValid(invtransfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(invtransfn_oid)); @@ -2888,7 +2888,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (OidIsValid(finalfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(finalfn_oid)); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 256632c985..33975687b3 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -3345,7 +3345,7 @@ SPI_register_trigger_data(TriggerData *tdata) if (tdata->tg_newtable) { EphemeralNamedRelation enr = - palloc(sizeof(EphemeralNamedRelationData)); + palloc(sizeof(EphemeralNamedRelationData)); int rc; enr->md.name = tdata->tg_trigger->tgnewtable; @@ -3362,7 +3362,7 @@ SPI_register_trigger_data(TriggerData *tdata) if (tdata->tg_oldtable) { EphemeralNamedRelation enr = - palloc(sizeof(EphemeralNamedRelationData)); + palloc(sizeof(EphemeralNamedRelationData)); int rc; enr->md.name = tdata->tg_trigger->tgoldtable; diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c index a8b73a9cf1..812a86d62c 100644 --- a/src/backend/jit/llvm/llvmjit.c +++ b/src/backend/jit/llvm/llvmjit.c @@ -52,7 +52,7 @@ typedef struct LLVMJitHandle LLVMOrcJITStackRef stack; LLVMOrcModuleHandle orc_handle; #endif -} LLVMJitHandle; +} LLVMJitHandle; /* types & functions commonly needed for JITing */ @@ -110,8 +110,8 @@ static LLVMOrcJITStackRef llvm_opt3_orc; static void llvm_release_context(JitContext *context); static void llvm_session_initialize(void); static void llvm_shutdown(int code, Datum arg); -static void llvm_compile_module(LLVMJitContext *context); -static void llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module); +static void llvm_compile_module(LLVMJitContext * context); +static void llvm_optimize_module(LLVMJitContext * context, LLVMModuleRef module); static void llvm_create_types(void); static uint64_t llvm_resolve_symbol(const char *name, void *ctx); @@ -227,7 +227,7 @@ llvm_release_context(JitContext *context) * Return module which may be modified, e.g. by creating new functions. */ LLVMModuleRef -llvm_mutable_module(LLVMJitContext *context) +llvm_mutable_module(LLVMJitContext * context) { llvm_assert_in_fatal_section(); @@ -273,7 +273,7 @@ llvm_expand_funcname(struct LLVMJitContext *context, const char *basename) * code to be optimized and emitted, do so first. */ void * -llvm_get_function(LLVMJitContext *context, const char *funcname) +llvm_get_function(LLVMJitContext * context, const char *funcname) { #if LLVM_VERSION_MAJOR > 11 || \ defined(HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN) && HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN @@ -493,7 +493,7 @@ llvm_copy_attributes(LLVMValueRef v_from, LLVMValueRef v_to) * Return a callable LLVMValueRef for fcinfo. */ LLVMValueRef -llvm_function_reference(LLVMJitContext *context, +llvm_function_reference(LLVMJitContext * context, LLVMBuilderRef builder, LLVMModuleRef mod, FunctionCallInfo fcinfo) @@ -556,7 +556,7 @@ llvm_function_reference(LLVMJitContext *context, * Optimize code in module using the flags set in context. */ static void -llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module) +llvm_optimize_module(LLVMJitContext * context, LLVMModuleRef module) { LLVMPassManagerBuilderRef llvm_pmb; LLVMPassManagerRef llvm_mpm; @@ -627,7 +627,7 @@ llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module) * Emit code for the currently pending module. */ static void -llvm_compile_module(LLVMJitContext *context) +llvm_compile_module(LLVMJitContext * context) { LLVMJitHandle *handle; MemoryContext oldcontext; @@ -799,9 +799,9 @@ llvm_session_initialize(void) LLVMInitializeNativeAsmParser(); /* - * When targeting an LLVM version with opaque pointers enabled by - * default, turn them off for the context we build our code in. We don't - * need to do so for other contexts (e.g. llvm_ts_context). Once the IR is + * When targeting an LLVM version with opaque pointers enabled by default, + * turn them off for the context we build our code in. We don't need to + * do so for other contexts (e.g. llvm_ts_context). Once the IR is * generated, it carries the necessary information. */ #if LLVM_VERSION_MAJOR > 14 @@ -1175,7 +1175,7 @@ static LLVMOrcObjectLayerRef llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple) { LLVMOrcObjectLayerRef objlayer = - LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES); + LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES); #if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER if (jit_debugging_support) diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c index 6b15588da6..4fbc8a0cbc 100644 --- a/src/backend/jit/llvm/llvmjit_deform.c +++ b/src/backend/jit/llvm/llvmjit_deform.c @@ -31,7 +31,7 @@ * Create a function that deforms a tuple of type desc up to natts columns. */ LLVMValueRef -slot_compile_deform(LLVMJitContext *context, TupleDesc desc, +slot_compile_deform(LLVMJitContext * context, TupleDesc desc, const TupleTableSlotOps *ops, int natts) { char *funcname; @@ -650,7 +650,7 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, { LLVMValueRef v_tmp_loaddata; LLVMTypeRef vartypep = - LLVMPointerType(LLVMIntType(att->attlen * 8), 0); + LLVMPointerType(LLVMIntType(att->attlen * 8), 0); v_tmp_loaddata = LLVMBuildPointerCast(b, v_attdatap, vartypep, ""); diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c index daefe66f40..8a515849c8 100644 --- a/src/backend/jit/llvm/llvmjit_expr.c +++ b/src/backend/jit/llvm/llvmjit_expr.c @@ -49,19 +49,19 @@ typedef struct CompiledExprState { LLVMJitContext *context; const char *funcname; -} CompiledExprState; +} CompiledExprState; static Datum ExecRunCompiledExpr(ExprState *state, ExprContext *econtext, bool *isNull); -static LLVMValueRef BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b, +static LLVMValueRef BuildV1Call(LLVMJitContext * context, LLVMBuilderRef b, LLVMModuleRef mod, FunctionCallInfo fcinfo, - LLVMValueRef *v_fcinfo_isnull); + LLVMValueRef * v_fcinfo_isnull); static LLVMValueRef build_EvalXFuncInt(LLVMBuilderRef b, LLVMModuleRef mod, const char *funcname, LLVMValueRef v_state, ExprEvalStep *op, - int natts, LLVMValueRef *v_args); + int natts, LLVMValueRef * v_args); static LLVMValueRef create_LifetimeEnd(LLVMModuleRef mod); /* macro making it easier to call ExecEval* functions */ @@ -1047,7 +1047,7 @@ llvm_compile_expr(ExprState *state) else { LLVMValueRef v_value = - LLVMBuildLoad(b, v_resvaluep, ""); + LLVMBuildLoad(b, v_resvaluep, ""); v_value = LLVMBuildZExt(b, LLVMBuildICmp(b, LLVMIntEQ, @@ -2464,9 +2464,9 @@ ExecRunCompiledExpr(ExprState *state, ExprContext *econtext, bool *isNull) } static LLVMValueRef -BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b, +BuildV1Call(LLVMJitContext * context, LLVMBuilderRef b, LLVMModuleRef mod, FunctionCallInfo fcinfo, - LLVMValueRef *v_fcinfo_isnull) + LLVMValueRef * v_fcinfo_isnull) { LLVMValueRef v_fn; LLVMValueRef v_fcinfo_isnullp; @@ -2512,7 +2512,7 @@ BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b, static LLVMValueRef build_EvalXFuncInt(LLVMBuilderRef b, LLVMModuleRef mod, const char *funcname, LLVMValueRef v_state, ExprEvalStep *op, - int nargs, LLVMValueRef *v_args) + int nargs, LLVMValueRef * v_args) { LLVMValueRef v_fn = llvm_pg_func(mod, funcname); LLVMValueRef *params; diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c index 7f52e1ee23..43d45810cd 100644 --- a/src/backend/libpq/be-secure-gssapi.c +++ b/src/backend/libpq/be-secure-gssapi.c @@ -527,8 +527,8 @@ secure_open_gssapi(Port *port) /* * Use the configured keytab, if there is one. As we now require MIT - * Kerberos, we might consider using the credential store extensions in the - * future instead of the environment variable. + * Kerberos, we might consider using the credential store extensions in + * the future instead of the environment variable. */ if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0') { diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 685aa2ed69..69662099cd 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -1104,8 +1104,8 @@ prepare_cert_name(char *name) if (namelen > MAXLEN) { /* - * Keep the end of the name, not the beginning, since the most specific - * field is likely to give users the most information. + * Keep the end of the name, not the beginning, since the most + * specific field is likely to give users the most information. */ truncated = name + namelen - MAXLEN; truncated[0] = truncated[1] = truncated[2] = '.'; @@ -1165,8 +1165,8 @@ verify_cb(int ok, X509_STORE_CTX *ctx) /* * Get the Subject and Issuer for logging, but don't let maliciously - * huge certs flood the logs, and don't reflect non-ASCII bytes into it - * either. + * huge certs flood the logs, and don't reflect non-ASCII bytes into + * it either. */ subject = X509_NAME_to_cstring(X509_get_subject_name(cert)); sub_prepared = prepare_cert_name(subject); diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index d786a01835..1ef113649f 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -2693,8 +2693,9 @@ load_hba(void) if (!ok) { /* - * File contained one or more errors, so bail out. MemoryContextDelete - * is enough to clean up everything, including regexes. + * File contained one or more errors, so bail out. + * MemoryContextDelete is enough to clean up everything, including + * regexes. */ MemoryContextDelete(hbacxt); return false; @@ -3056,8 +3057,9 @@ load_ident(void) if (!ok) { /* - * File contained one or more errors, so bail out. MemoryContextDelete - * is enough to clean up everything, including regexes. + * File contained one or more errors, so bail out. + * MemoryContextDelete is enough to clean up everything, including + * regexes. */ MemoryContextDelete(ident_context); return false; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 0a2562c149..320c9db432 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -2011,7 +2011,7 @@ cost_incremental_sort(Path *path, { PathKey *key = (PathKey *) lfirst(l); EquivalenceMember *member = (EquivalenceMember *) - linitial(key->pk_eclass->ec_members); + linitial(key->pk_eclass->ec_members); /* * Check if the expression contains Var with "varno 0" so that we diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 1812db7f2f..9148fe3054 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -1738,7 +1738,7 @@ set_customscan_references(PlannerInfo *root, static int register_partpruneinfo(PlannerInfo *root, int part_prune_index) { - PlannerGlobal *glob = root->glob; + PlannerGlobal *glob = root->glob; PartitionPruneInfo *pruneinfo; Assert(part_prune_index >= 0 && diff --git a/src/backend/optimizer/util/appendinfo.c b/src/backend/optimizer/util/appendinfo.c index c1b1557570..f456b3b0a4 100644 --- a/src/backend/optimizer/util/appendinfo.c +++ b/src/backend/optimizer/util/appendinfo.c @@ -370,7 +370,7 @@ adjust_appendrel_attrs_mutator(Node *node, if (leaf_relid) { RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *) - list_nth(context->root->row_identity_vars, var->varattno - 1); + list_nth(context->root->row_identity_vars, var->varattno - 1); if (bms_is_member(leaf_relid, ridinfo->rowidrels)) { diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 68fd033595..a1026139d5 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -1133,7 +1133,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, { /* UPDATE/DELETE/MERGE row identity vars are always needed */ RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *) - list_nth(root->row_identity_vars, var->varattno - 1); + list_nth(root->row_identity_vars, var->varattno - 1); /* Update reltarget width estimate from RowIdentityVarInfo */ joinrel->reltarget->width += ridinfo->rowidwidth; diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index 64356436ef..a2f4d66ddb 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -3297,7 +3297,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format, if (format->format_type == JS_FORMAT_JSON) { JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ? - format->encoding : JS_ENC_UTF8; + format->encoding : JS_ENC_UTF8; if (targettype != BYTEAOID && format->encoding != JS_ENC_DEFAULT) diff --git a/src/backend/parser/parse_merge.c b/src/backend/parser/parse_merge.c index d8866373b8..91b1156d99 100644 --- a/src/backend/parser/parse_merge.c +++ b/src/backend/parser/parse_merge.c @@ -165,8 +165,8 @@ transformMergeStmt(ParseState *pstate, MergeStmt *stmt) /* * Set up the MERGE target table. The target table is added to the - * namespace below and to joinlist in transform_MERGE_to_join, so don't - * do it here. + * namespace below and to joinlist in transform_MERGE_to_join, so don't do + * it here. */ qry->resultRelation = setTargetTable(pstate, stmt->relation, stmt->relation->inh, diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index b0f6fe4fa6..c8d9728362 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -995,7 +995,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) { aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TYPE, RelationGetRelationName(relation)); @@ -2357,7 +2357,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) * mentioned above. */ Datum attoptions = - get_attoptions(RelationGetRelid(index_rel), i + 1); + get_attoptions(RelationGetRelid(index_rel), i + 1); defopclass = GetDefaultOpClass(attform->atttypid, index_rel->rd_rel->relam); diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index cf1156b842..6158938d05 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -3193,7 +3193,7 @@ check_new_partition_bound(char *relname, Relation parent, * datums list. */ PartitionRangeDatum *datum = - list_nth(spec->upperdatums, abs(cmpval) - 1); + list_nth(spec->upperdatums, abs(cmpval) - 1); /* * The new partition overlaps with the diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c index 509587636e..6f9c2765d6 100644 --- a/src/backend/postmaster/fork_process.c +++ b/src/backend/postmaster/fork_process.c @@ -58,8 +58,8 @@ fork_process(void) /* * We start postmaster children with signals blocked. This allows them to * install their own handlers before unblocking, to avoid races where they - * might run the postmaster's handler and miss an important control signal. - * With more analysis this could potentially be relaxed. + * might run the postmaster's handler and miss an important control + * signal. With more analysis this could potentially be relaxed. */ sigprocmask(SIG_SETMASK, &BlockSig, &save_mask); result = fork(); diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c index 38c09b1123..9087ef95af 100644 --- a/src/backend/regex/regc_lex.c +++ b/src/backend/regex/regc_lex.c @@ -759,6 +759,7 @@ lexescape(struct vars *v) RETV(PLAIN, c); break; default: + /* * Throw an error for unrecognized ASCII alpha escape sequences, * which reserves them for future use if needed. diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 052505e46f..dc9c5c82d9 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -259,7 +259,7 @@ libpqrcv_check_conninfo(const char *conninfo, bool must_use_password) if (must_use_password) { - bool uses_password = false; + bool uses_password = false; for (opt = opts; opt->keyword != NULL; ++opt) { diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index beef399b42..d91055a440 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -155,7 +155,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_PARAMETER_CHANGE: { xl_parameter_change *xlrec = - (xl_parameter_change *) XLogRecGetData(buf->record); + (xl_parameter_change *) XLogRecGetData(buf->record); /* * If wal_level on the primary is reduced to less than @@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) * invalidated when this WAL record is replayed; and further, * slot creation fails when wal_level is not sufficient; but * all these operations are not synchronized, so a logical - * slot may creep in while the wal_level is being - * reduced. Hence this extra check. + * slot may creep in while the wal_level is being reduced. + * Hence this extra check. */ if (xlrec->wal_level < WAL_LEVEL_LOGICAL) { @@ -752,7 +752,7 @@ DecodePrepare(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, SnapBuild *builder = ctx->snapshot_builder; XLogRecPtr origin_lsn = parsed->origin_lsn; TimestampTz prepare_time = parsed->xact_time; - RepOriginId origin_id = XLogRecGetOrigin(buf->record); + RepOriginId origin_id = XLogRecGetOrigin(buf->record); int i; TransactionId xid = parsed->twophase_xid; @@ -828,7 +828,7 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, int i; XLogRecPtr origin_lsn = InvalidXLogRecPtr; TimestampTz abort_time = parsed->xact_time; - RepOriginId origin_id = XLogRecGetOrigin(buf->record); + RepOriginId origin_id = XLogRecGetOrigin(buf->record); bool skip_xact; if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN) diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 7e1f677f7a..41243d0187 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin, MemoryContext old_context; /* - * On a standby, this check is also required while creating the - * slot. Check the comments in the function. + * On a standby, this check is also required while creating the slot. + * Check the comments in the function. */ CheckLogicalDecodingRequirements(); diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 2c04c8707d..b0255ffd25 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -833,7 +833,7 @@ replorigin_redo(XLogReaderState *record) case XLOG_REPLORIGIN_SET: { xl_replorigin_set *xlrec = - (xl_replorigin_set *) XLogRecGetData(record); + (xl_replorigin_set *) XLogRecGetData(record); replorigin_advance(xlrec->node_id, xlrec->remote_lsn, record->EndRecPtr, diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 9f44974473..828613d325 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -1408,7 +1408,7 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state) { dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node); ReorderBufferChange *next_change = - dlist_container(ReorderBufferChange, node, next); + dlist_container(ReorderBufferChange, node, next); /* txn stays the same */ state->entries[off].lsn = next_change->lsn; @@ -1439,8 +1439,8 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state) { /* successfully restored changes from disk */ ReorderBufferChange *next_change = - dlist_head_element(ReorderBufferChange, node, - &entry->txn->changes); + dlist_head_element(ReorderBufferChange, node, + &entry->txn->changes); elog(DEBUG2, "restored %u/%u changes from disk", (uint32) entry->txn->nentries_mem, @@ -1582,7 +1582,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) dclist_delete_from(&rb->catchange_txns, &txn->catchange_node); /* now remove reference from buffer */ - hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found); + hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found); Assert(found); /* remove entries spilled to disk */ @@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb) ReorderBufferTXN *txn; /* - * Bail out if logical_replication_mode is buffered and we haven't exceeded - * the memory limit. + * Bail out if logical_replication_mode is buffered and we haven't + * exceeded the memory limit. */ if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED && rb->size < logical_decoding_work_mem * 1024L) @@ -3841,7 +3841,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, { char *data; Size inval_size = sizeof(SharedInvalidationMessage) * - change->data.inval.ninvalidations; + change->data.inval.ninvalidations; sz += inval_size; @@ -4206,7 +4206,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, dlist_foreach_modify(cleanup_iter, &txn->changes) { ReorderBufferChange *cleanup = - dlist_container(ReorderBufferChange, node, cleanup_iter.cur); + dlist_container(ReorderBufferChange, node, cleanup_iter.cur); dlist_delete(&cleanup->node); ReorderBufferReturnChange(rb, cleanup, true); @@ -4431,7 +4431,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, case REORDER_BUFFER_CHANGE_INVALIDATION: { Size inval_size = sizeof(SharedInvalidationMessage) * - change->data.inval.ninvalidations; + change->data.inval.ninvalidations; change->data.inval.invalidations = MemoryContextAlloc(rb->context, inval_size); @@ -4936,7 +4936,7 @@ ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn) dlist_foreach_modify(it, &ent->chunks) { ReorderBufferChange *change = - dlist_container(ReorderBufferChange, node, it.cur); + dlist_container(ReorderBufferChange, node, it.cur); dlist_delete(&change->node); ReorderBufferReturnChange(rb, change, true); diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 62542827e4..0786bb0ab7 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -574,7 +574,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder) Assert(builder->building_full_snapshot); /* don't allow older snapshots */ - InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */ + InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */ if (HaveRegisteredOrActiveSnapshot()) elog(ERROR, "cannot build an initial slot snapshot when snapshots exist"); Assert(!HistoricSnapshotActive()); @@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn */ /* - * xl_running_xacts record is older than what we can use, we might not have - * all necessary catalog rows anymore. + * xl_running_xacts record is older than what we can use, we might not + * have all necessary catalog rows anymore. */ if (TransactionIdIsNormal(builder->initial_xmin_horizon) && NormalTransactionIdPrecedes(running->oldestRunningXid, diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 0c71ae9ba7..c56d42dcd2 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -563,7 +563,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) * the lock. */ int nsyncworkers = - logicalrep_sync_worker_count(MyLogicalRepWorker->subid); + logicalrep_sync_worker_count(MyLogicalRepWorker->subid); /* Now safe to release the LWLock */ LWLockRelease(LogicalRepWorkerLock); diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 37bb884127..b655c24d0b 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -2396,7 +2396,7 @@ apply_handle_insert(StringInfo s) LogicalRepRelMapEntry *rel; LogicalRepTupleData newtup; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; TupleTableSlot *remoteslot; @@ -2544,7 +2544,7 @@ apply_handle_update(StringInfo s) { LogicalRepRelMapEntry *rel; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; LogicalRepTupleData oldtup; @@ -2729,7 +2729,7 @@ apply_handle_delete(StringInfo s) LogicalRepRelMapEntry *rel; LogicalRepTupleData oldtup; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; TupleTableSlot *remoteslot; @@ -3076,8 +3076,8 @@ apply_handle_tuple_routing(ApplyExecutionData *edata, if (map) { TupleConversionMap *PartitionToRootMap = - convert_tuples_by_name(RelationGetDescr(partrel), - RelationGetDescr(parentrel)); + convert_tuples_by_name(RelationGetDescr(partrel), + RelationGetDescr(parentrel)); remoteslot = execute_attr_map_slot(PartitionToRootMap->attrMap, @@ -3411,7 +3411,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush, dlist_foreach_modify(iter, &lsn_mapping) { FlushPosition *pos = - dlist_container(FlushPosition, node, iter.cur); + dlist_container(FlushPosition, node, iter.cur); *write = pos->remote_end; @@ -4695,11 +4695,11 @@ ApplyWorkerMain(Datum main_arg) ereport(DEBUG1, (errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s", - MySubscription->name, - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" : - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" : - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" : - "?"))); + MySubscription->name, + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" : + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" : + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" : + "?"))); } else { @@ -5073,10 +5073,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo) } /* - * If we are processing this transaction using a parallel apply worker then - * either we send the changes to the parallel worker or if the worker is busy - * then serialize the changes to the file which will later be processed by - * the parallel worker. + * If we are processing this transaction using a parallel apply worker + * then either we send the changes to the parallel worker or if the worker + * is busy then serialize the changes to the file which will later be + * processed by the parallel worker. */ *winfo = pa_find_worker(xid); @@ -5090,9 +5090,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo) } /* - * If there is no parallel worker involved to process this transaction then - * we either directly apply the change or serialize it to a file which will - * later be applied when the transaction finish message is processed. + * If there is no parallel worker involved to process this transaction + * then we either directly apply the change or serialize it to a file + * which will later be applied when the transaction finish message is + * processed. */ else if (in_streamed_transaction) { diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index f88389de84..b08ca55041 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications, * are multiple lists (one for each operation) to which row filters will * be appended. * - * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row - * filter expression" so it takes precedence. + * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter + * expression" so it takes precedence. */ foreach(lc, publications) { diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 889e20b5dd..a8a2f8f1b9 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -330,7 +330,7 @@ static void SyncRepQueueInsert(int mode) { dlist_head *queue; - dlist_iter iter; + dlist_iter iter; Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE); queue = &WalSndCtl->SyncRepQueue[mode]; @@ -879,7 +879,7 @@ SyncRepWakeQueue(bool all, int mode) dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode]) { - PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur); + PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur); /* * Assume the queue is ordered by LSN diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index 980dc1816f..0e4f76efa8 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -3548,7 +3548,7 @@ rewriteTargetView(Query *parsetree, Relation view) if (parsetree->withCheckOptions != NIL) { WithCheckOption *parent_wco = - (WithCheckOption *) linitial(parsetree->withCheckOptions); + (WithCheckOption *) linitial(parsetree->withCheckOptions); if (parent_wco->cascaded) { diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index 569c1c9467..5c3fe4eda2 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -581,7 +581,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_restrictive) { List *hook_policies = - (*row_security_policy_hook_restrictive) (cmd, relation); + (*row_security_policy_hook_restrictive) (cmd, relation); /* * As with built-in restrictive policies, we sort any hook-provided @@ -603,7 +603,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_permissive) { List *hook_policies = - (*row_security_policy_hook_permissive) (cmd, relation); + (*row_security_policy_hook_permissive) (cmd, relation); foreach(item, hook_policies) { diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index 54e3bb4aa2..28b52d8aa1 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -2237,8 +2237,8 @@ compute_expr_stats(Relation onerel, double totalrows, if (tcnt > 0) { AttributeOpts *aopt = - get_attribute_options(stats->attr->attrelid, - stats->attr->attnum); + get_attribute_options(stats->attr->attrelid, + stats->attr->attnum); stats->exprvals = exprvals; stats->exprnulls = exprnulls; diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 1fa689052e..e7a63e295b 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2667,7 +2667,7 @@ BufferSync(int flags) { BufferDesc *bufHdr = NULL; CkptTsStatus *ts_stat = (CkptTsStatus *) - DatumGetPointer(binaryheap_first(ts_heap)); + DatumGetPointer(binaryheap_first(ts_heap)); buf_id = CkptBufferIds[ts_stat->index].buf_id; Assert(buf_id != -1); diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c index 84ead85942..41ab64100e 100644 --- a/src/backend/storage/file/buffile.c +++ b/src/backend/storage/file/buffile.c @@ -98,8 +98,7 @@ struct BufFile /* * XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid - * wasting per-file alignment padding when some users create many - * files. + * wasting per-file alignment padding when some users create many files. */ PGAlignedBlock buffer; }; diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c index f0965c3481..6399fa2ad5 100644 --- a/src/backend/storage/ipc/dsm_impl.c +++ b/src/backend/storage/ipc/dsm_impl.c @@ -357,14 +357,15 @@ dsm_impl_posix_resize(int fd, off_t size) /* * Block all blockable signals, except SIGQUIT. posix_fallocate() can run * for quite a long time, and is an all-or-nothing operation. If we - * allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery - * conflicts), the retry loop might never succeed. + * allowed SIGUSR1 to interrupt us repeatedly (for example, due to + * recovery conflicts), the retry loop might never succeed. */ if (IsUnderPostmaster) sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask); pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE); #if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__) + /* * On Linux, a shm_open fd is backed by a tmpfs file. If we were to use * ftruncate, the file would contain a hole. Accessing memory backed by a @@ -374,8 +375,8 @@ dsm_impl_posix_resize(int fd, off_t size) * SIGBUS later. * * We still use a traditional EINTR retry loop to handle SIGCONT. - * posix_fallocate() doesn't restart automatically, and we don't want - * this to fail if you attach a debugger. + * posix_fallocate() doesn't restart automatically, and we don't want this + * to fail if you attach a debugger. */ do { @@ -383,9 +384,9 @@ dsm_impl_posix_resize(int fd, off_t size) } while (rc == EINTR); /* - * The caller expects errno to be set, but posix_fallocate() doesn't - * set it. Instead it returns error numbers directly. So set errno, - * even though we'll also return rc to indicate success or failure. + * The caller expects errno to be set, but posix_fallocate() doesn't set + * it. Instead it returns error numbers directly. So set errno, even + * though we'll also return rc to indicate success or failure. */ errno = rc; #else diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 42595b38b2..193f50fc0f 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -3936,6 +3936,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data) dclist_foreach(proc_iter, waitQueue) { PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur); + if (queued_proc == blocked_proc) break; data->waiter_pids[data->npids++] = queued_proc->pid; diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index d2ec396045..4fb4280f05 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -1118,9 +1118,9 @@ LWLockDequeueSelf(LWLock *lock) LWLockWaitListLock(lock); /* - * Remove ourselves from the waitlist, unless we've already been - * removed. The removal happens with the wait list lock held, so there's - * no race in this check. + * Remove ourselves from the waitlist, unless we've already been removed. + * The removal happens with the wait list lock held, so there's no race in + * this check. */ on_waitlist = MyProc->lwWaiting == LW_WS_WAITING; if (on_waitlist) diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 203b189559..533f616541 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -625,7 +625,7 @@ RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer) dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (conflict->sxactIn == writer) return true; @@ -708,7 +708,7 @@ FlagSxactUnsafe(SERIALIZABLEXACT *sxact) dlist_foreach_modify(iter, &sxact->possibleUnsafeConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); Assert(!SxactIsReadOnly(conflict->sxactOut)); Assert(sxact == conflict->sxactIn); @@ -1587,7 +1587,7 @@ GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size) dlist_foreach(iter, &blocking_sxact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); output[num_written++] = possibleUnsafeConflict->sxactOut->pid; @@ -1825,8 +1825,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot, /* * If we didn't find any possibly unsafe conflicts because every * uncommitted writable transaction turned out to be doomed, then we - * can "opt out" immediately. See comments above the earlier check for - * PredXact->WritableSxactCount == 0. + * can "opt out" immediately. See comments above the earlier check + * for PredXact->WritableSxactCount == 0. */ if (dlist_is_empty(&sxact->possibleUnsafeConflicts)) { @@ -2613,7 +2613,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); bool found; dlist_delete(&(predlock->xactLink)); @@ -2754,7 +2754,7 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag, dlist_foreach_modify(iter, &oldtarget->predicateLocks) { PREDICATELOCK *oldpredlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); PREDICATELOCK *newpredlock; SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo; @@ -2976,7 +2976,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer) dlist_foreach_modify(iter, &oldtarget->predicateLocks) { PREDICATELOCK *oldpredlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); PREDICATELOCK *newpredlock; SerCommitSeqNo oldCommitSeqNo; SERIALIZABLEXACT *oldXact; @@ -3194,7 +3194,7 @@ SetNewSxactGlobalXmin(void) dlist_foreach(iter, &PredXact->activeList) { SERIALIZABLEXACT *sxact = - dlist_container(SERIALIZABLEXACT, xactLink, iter.cur); + dlist_container(SERIALIZABLEXACT, xactLink, iter.cur); if (!SxactIsRolledBack(sxact) && !SxactIsCommitted(sxact) @@ -3440,7 +3440,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut)); Assert(MySerializableXact == possibleUnsafeConflict->sxactIn); @@ -3471,7 +3471,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (isCommit && !SxactIsReadOnly(MySerializableXact) @@ -3496,7 +3496,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->inConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); if (!isCommit || SxactIsCommitted(conflict->sxactOut) @@ -3515,7 +3515,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); roXact = possibleUnsafeConflict->sxactIn; Assert(MySerializableXact == possibleUnsafeConflict->sxactOut); @@ -3564,8 +3564,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) * xmin and purge any transactions which finished before this transaction * was launched. * - * For parallel queries in read-only transactions, it might run twice. - * We only release the reference on the first call. + * For parallel queries in read-only transactions, it might run twice. We + * only release the reference on the first call. */ needToClear = false; if ((partiallyReleasing || @@ -3641,7 +3641,7 @@ ClearOldPredicateLocks(void) dlist_foreach_modify(iter, FinishedSerializableTransactions) { SERIALIZABLEXACT *finishedSxact = - dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur); + dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur); if (!TransactionIdIsValid(PredXact->SxactGlobalXmin) || TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore, @@ -3700,7 +3700,7 @@ ClearOldPredicateLocks(void) dlist_foreach_modify(iter, &OldCommittedSxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); bool canDoPartialCleanup; LWLockAcquire(SerializableXactHashLock, LW_SHARED); @@ -3787,7 +3787,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); PREDICATELOCKTAG tag; PREDICATELOCKTARGET *target; PREDICATELOCKTARGETTAG targettag; @@ -3864,7 +3864,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (summarize) conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN; @@ -3876,7 +3876,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->inConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); if (summarize) conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT; @@ -4134,7 +4134,7 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); SERIALIZABLEXACT *sxact = predlock->tag.myXact; if (sxact == MySerializableXact) @@ -4407,7 +4407,7 @@ CheckTableForSerializableConflictIn(Relation relation) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); if (predlock->tag.myXact != MySerializableXact && !RWConflictExists(predlock->tag.myXact, MySerializableXact)) @@ -4519,7 +4519,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, dlist_foreach(iter, &writer->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); SERIALIZABLEXACT *t2 = conflict->sxactIn; if (SxactIsPrepared(t2) @@ -4566,7 +4566,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->inConflicts) { const RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); const SERIALIZABLEXACT *t0 = conflict->sxactOut; if (!SxactIsDoomed(t0) @@ -4664,7 +4664,7 @@ PreCommit_CheckForSerializationFailure(void) dlist_foreach(near_iter, &MySerializableXact->inConflicts) { RWConflict nearConflict = - dlist_container(RWConflictData, inLink, near_iter.cur); + dlist_container(RWConflictData, inLink, near_iter.cur); if (!SxactIsCommitted(nearConflict->sxactOut) && !SxactIsDoomed(nearConflict->sxactOut)) @@ -4674,7 +4674,7 @@ PreCommit_CheckForSerializationFailure(void) dlist_foreach(far_iter, &nearConflict->sxactOut->inConflicts) { RWConflict farConflict = - dlist_container(RWConflictData, inLink, far_iter.cur); + dlist_container(RWConflictData, inLink, far_iter.cur); if (farConflict->sxactOut == MySerializableXact || (!SxactIsCommitted(farConflict->sxactOut) @@ -4770,7 +4770,7 @@ AtPrepare_PredicateLocks(void) dlist_foreach(iter, &sxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); record.type = TWOPHASEPREDICATERECORD_LOCK; lockRecord->target = predlock->tag.myTarget->tag; diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 22b4278610..dac921219f 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -101,7 +101,7 @@ ProcGlobalShmemSize(void) { Size size = 0; Size TotalProcs = - add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts)); + add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts)); /* ProcGlobal */ size = add_size(size, sizeof(PROC_HDR)); @@ -331,7 +331,7 @@ InitProcess(void) if (!dlist_is_empty(procgloballist)) { - MyProc = (PGPROC*) dlist_pop_head_node(procgloballist); + MyProc = (PGPROC *) dlist_pop_head_node(procgloballist); SpinLockRelease(ProcStructLock); } else @@ -1009,7 +1009,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) uint32 hashcode = locallock->hashcode; LWLock *partitionLock = LockHashPartitionLock(hashcode); dclist_head *waitQueue = &lock->waitProcs; - PGPROC *insert_before = NULL; + PGPROC *insert_before = NULL; LOCKMASK myHeldLocks = MyProc->heldLocks; TimestampTz standbyWaitStart = 0; bool early_deadlock = false; @@ -1244,7 +1244,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) if (InHotStandby) { bool maybe_log_conflict = - (standbyWaitStart != 0 && !logged_recovery_conflict); + (standbyWaitStart != 0 && !logged_recovery_conflict); /* Set a timer and wait for that or for the lock to be granted */ ResolveRecoveryConflictWithLock(locallock->tag.lock, diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index e982a8dd7f..acd75b40d2 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -549,7 +549,7 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, while (remblocks > 0) { - BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE); + BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE); off_t seekpos = (off_t) BLCKSZ * segstartblock; int numblocks; @@ -597,9 +597,9 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, /* * Even if we don't want to use fallocate, we can still extend a * bit more efficiently than writing each 8kB block individually. - * pg_pwrite_zeros() (via FileZero()) uses - * pg_pwritev_with_retry() to avoid multiple writes or needing a - * zeroed buffer for the whole length of the extension. + * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry() + * to avoid multiple writes or needing a zeroed buffer for the + * whole length of the extension. */ ret = FileZero(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * numblocks, diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c index fe4fd3a929..8a2cb55876 100644 --- a/src/backend/tsearch/spell.c +++ b/src/backend/tsearch/spell.c @@ -2256,7 +2256,7 @@ NormalizeSubWord(IspellDict *Conf, char *word, int flag) { /* prefix success */ char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ? - VoidString : prefix->aff[j]->flag; + VoidString : prefix->aff[j]->flag; if (FindWord(Conf, pnewword, ff, flag)) cur += addToResult(forms, cur, pnewword); diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c index b125802b21..2f6fccc567 100644 --- a/src/backend/utils/activity/pgstat.c +++ b/src/backend/utils/activity/pgstat.c @@ -1169,7 +1169,7 @@ pgstat_flush_pending_entries(bool nowait) while (cur) { PgStat_EntryRef *entry_ref = - dlist_container(PgStat_EntryRef, pending_node, cur); + dlist_container(PgStat_EntryRef, pending_node, cur); PgStat_HashKey key = entry_ref->shared_entry->key; PgStat_Kind kind = key.kind; const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind); diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c index 09fffd0e82..d1149adf70 100644 --- a/src/backend/utils/activity/pgstat_shmem.c +++ b/src/backend/utils/activity/pgstat_shmem.c @@ -865,7 +865,7 @@ pgstat_drop_entry(PgStat_Kind kind, Oid dboid, Oid objoid) if (pgStatEntryRefHash) { PgStat_EntryRefHashEntry *lohashent = - pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key); + pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key); if (lohashent) pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref, diff --git a/src/backend/utils/activity/pgstat_xact.c b/src/backend/utils/activity/pgstat_xact.c index 91cdd9222e..369239d501 100644 --- a/src/backend/utils/activity/pgstat_xact.c +++ b/src/backend/utils/activity/pgstat_xact.c @@ -76,7 +76,7 @@ AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit) dclist_foreach_modify(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); xl_xact_stats_item *it = &pending->item; if (isCommit && !pending->is_create) @@ -148,7 +148,7 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, dclist_foreach_modify(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); xl_xact_stats_item *it = &pending->item; dclist_delete_from(&xact_state->pending_drops, &pending->node); @@ -290,7 +290,7 @@ pgstat_get_transactional_drops(bool isCommit, xl_xact_stats_item **items) dclist_foreach(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); if (isCommit && pending->is_create) continue; @@ -335,7 +335,7 @@ create_drop_transactional_internal(PgStat_Kind kind, Oid dboid, Oid objoid, bool int nest_level = GetCurrentTransactionNestLevel(); PgStat_SubXactStatus *xact_state; PgStat_PendingDroppedStatsItem *drop = (PgStat_PendingDroppedStatsItem *) - MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem)); + MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem)); xact_state = pgstat_get_xact_stack_level(nest_level); diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index be2e55bb29..5d8d583ddc 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -4482,17 +4482,17 @@ EncodeInterval(struct pg_itm *itm, int style, char *str) case INTSTYLE_SQL_STANDARD: { bool has_negative = year < 0 || mon < 0 || - mday < 0 || hour < 0 || - min < 0 || sec < 0 || fsec < 0; + mday < 0 || hour < 0 || + min < 0 || sec < 0 || fsec < 0; bool has_positive = year > 0 || mon > 0 || - mday > 0 || hour > 0 || - min > 0 || sec > 0 || fsec > 0; + mday > 0 || hour > 0 || + min > 0 || sec > 0 || fsec > 0; bool has_year_month = year != 0 || mon != 0; bool has_day_time = mday != 0 || hour != 0 || - min != 0 || sec != 0 || fsec != 0; + min != 0 || sec != 0 || fsec != 0; bool has_day = mday != 0; bool sql_standard_value = !(has_negative && has_positive) && - !(has_year_month && has_day_time); + !(has_year_month && has_day_time); /* * SQL Standard wants only 1 "<sign>" preceding the whole diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index 9b51da2382..dfa90a04fb 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -189,8 +189,7 @@ float4in_internal(char *num, char **endptr_p, /* * endptr points to the first character _after_ the sequence we recognized * as a valid floating point number. orig_string points to the original - * input - * string. + * input string. */ /* skip leading whitespace */ diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 4c5abaff25..70cb922e6b 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -3219,9 +3219,9 @@ static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns) { RecordIOData *data = (RecordIOData *) - MemoryContextAlloc(mcxt, - offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + MemoryContextAlloc(mcxt, + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); data->record_type = InvalidOid; data->record_typmod = 0; diff --git a/src/backend/utils/adt/jsonpath.c b/src/backend/utils/adt/jsonpath.c index 0021b01830..7891fde310 100644 --- a/src/backend/utils/adt/jsonpath.c +++ b/src/backend/utils/adt/jsonpath.c @@ -76,7 +76,7 @@ static Datum jsonPathFromCstring(char *in, int len, struct Node *escontext); static char *jsonPathToCstring(StringInfo out, JsonPath *in, int estimated_len); -static bool flattenJsonPathParseItem(StringInfo buf, int *result, +static bool flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, JsonPathParseItem *item, int nestingLevel, bool insideArraySubscript); @@ -234,7 +234,7 @@ jsonPathToCstring(StringInfo out, JsonPath *in, int estimated_len) * children into a binary representation. */ static bool -flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, +flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, JsonPathParseItem *item, int nestingLevel, bool insideArraySubscript) { @@ -306,19 +306,19 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (!item->value.args.left) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.args.left, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.args.left, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + left) = chld - pos; if (!item->value.args.right) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.args.right, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.args.right, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + right) = chld - pos; } @@ -338,10 +338,10 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, item->value.like_regex.patternlen); appendStringInfoChar(buf, '\0'); - if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.like_regex.expr, - nestingLevel, - insideArraySubscript)) + if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.like_regex.expr, + nestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + offs) = chld - pos; } @@ -360,10 +360,10 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (!item->value.arg) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.arg, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.arg, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + arg) = chld - pos; } @@ -405,17 +405,17 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, int32 topos; int32 frompos; - if (! flattenJsonPathParseItem(buf, &frompos, escontext, - item->value.array.elems[i].from, - nestingLevel, true)) + if (!flattenJsonPathParseItem(buf, &frompos, escontext, + item->value.array.elems[i].from, + nestingLevel, true)) return false; frompos -= pos; if (item->value.array.elems[i].to) { - if (! flattenJsonPathParseItem(buf, &topos, escontext, - item->value.array.elems[i].to, - nestingLevel, true)) + if (!flattenJsonPathParseItem(buf, &topos, escontext, + item->value.array.elems[i].to, + nestingLevel, true)) return false; topos -= pos; } @@ -451,9 +451,9 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (item->next) { - if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->next, nestingLevel, - insideArraySubscript)) + if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->next, nestingLevel, + insideArraySubscript)) return false; chld -= pos; *(int32 *) (buf->data + next) = chld; diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c index b561f0e7e8..41430bab7e 100644 --- a/src/backend/utils/adt/jsonpath_exec.c +++ b/src/backend/utils/adt/jsonpath_exec.c @@ -1326,8 +1326,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp, */ JsonValueList vals = {0}; JsonPathExecResult res = - executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, - false, &vals); + executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, + false, &vals); if (jperIsError(res)) return jpbUnknown; @@ -1337,8 +1337,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp, else { JsonPathExecResult res = - executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, - false, NULL); + executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, + false, NULL); if (jperIsError(res)) return jpbUnknown; @@ -1869,7 +1869,7 @@ executeDateTimeMethod(JsonPathExecContext *cxt, JsonPathItem *jsp, if (!fmt_txt[i]) { MemoryContext oldcxt = - MemoryContextSwitchTo(TopMemoryContext); + MemoryContextSwitchTo(TopMemoryContext); fmt_txt[i] = cstring_to_text(fmt_str[i]); MemoryContextSwitchTo(oldcxt); diff --git a/src/backend/utils/adt/jsonpath_internal.h b/src/backend/utils/adt/jsonpath_internal.h index 2e12de038c..90eea6e961 100644 --- a/src/backend/utils/adt/jsonpath_internal.h +++ b/src/backend/utils/adt/jsonpath_internal.h @@ -20,7 +20,7 @@ typedef struct JsonPathString char *val; int len; int total; -} JsonPathString; +} JsonPathString; #include "utils/jsonpath.h" #include "jsonpath_gram.h" @@ -29,8 +29,8 @@ typedef struct JsonPathString JsonPathParseResult **result, \ struct Node *escontext) YY_DECL; -extern int jsonpath_yyparse(JsonPathParseResult **result, - struct Node *escontext); +extern int jsonpath_yyparse(JsonPathParseResult **result, + struct Node *escontext); extern void jsonpath_yyerror(JsonPathParseResult **result, struct Node *escontext, const char *message); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 51df570ce9..bdb285aa3b 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1794,8 +1794,7 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2, else #endif result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p); - if (result == 2147483647) /* _NLSCMPERROR; missing from mingw - * headers */ + if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */ ereport(ERROR, (errmsg("could not compare Unicode strings: %m"))); @@ -1818,14 +1817,15 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2, static int pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale) { - int result; + int result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); #ifdef WIN32 if (GetDatabaseEncoding() == PG_UTF8) { - size_t len1 = strlen(arg1); - size_t len2 = strlen(arg2); + size_t len1 = strlen(arg1); + size_t len2 = strlen(arg2); + result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale); } else @@ -1854,13 +1854,13 @@ static int pg_strncoll_libc(const char *arg1, size_t len1, const char *arg2, size_t len2, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - size_t bufsize1 = len1 + 1; - size_t bufsize2 = len2 + 1; - char *arg1n; - char *arg2n; - int result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + size_t bufsize1 = len1 + 1; + size_t bufsize2 = len2 + 1; + char *arg1n; + char *arg2n; + int result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); @@ -1906,15 +1906,15 @@ static int pg_strncoll_icu_no_utf8(const char *arg1, int32_t len1, const char *arg2, int32_t len2, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - int32_t ulen1; - int32_t ulen2; - size_t bufsize1; - size_t bufsize2; - UChar *uchar1, - *uchar2; - int result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + int32_t ulen1; + int32_t ulen2; + size_t bufsize1; + size_t bufsize2; + UChar *uchar1, + *uchar2; + int result; Assert(locale->provider == COLLPROVIDER_ICU); #ifdef HAVE_UCOL_STRCOLLUTF8 @@ -1961,7 +1961,7 @@ static int pg_strncoll_icu(const char *arg1, int32_t len1, const char *arg2, int32_t len2, pg_locale_t locale) { - int result; + int result; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2042,7 +2042,7 @@ int pg_strncoll(const char *arg1, size_t len1, const char *arg2, size_t len2, pg_locale_t locale) { - int result; + int result; if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strncoll_libc(arg1, len1, arg2, len2, locale); @@ -2074,7 +2074,7 @@ pg_strxfrm_libc(char *dest, const char *src, size_t destsize, #else /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return 0; /* keep compiler quiet */ + return 0; /* keep compiler quiet */ #endif } @@ -2082,10 +2082,10 @@ static size_t pg_strnxfrm_libc(char *dest, const char *src, size_t srclen, size_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - size_t bufsize = srclen + 1; - size_t result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + size_t bufsize = srclen + 1; + size_t result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); @@ -2114,12 +2114,12 @@ static size_t pg_strnxfrm_icu(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - UChar *uchar; - int32_t ulen; - size_t uchar_bsize; - Size result_bsize; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + UChar *uchar; + int32_t ulen; + size_t uchar_bsize; + Size result_bsize; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2161,15 +2161,15 @@ static size_t pg_strnxfrm_prefix_icu_no_utf8(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - UCharIterator iter; - uint32_t state[2]; - UErrorCode status; - int32_t ulen = -1; - UChar *uchar = NULL; - size_t uchar_bsize; - Size result_bsize; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + UCharIterator iter; + uint32_t state[2]; + UErrorCode status; + int32_t ulen = -1; + UChar *uchar = NULL; + size_t uchar_bsize; + Size result_bsize; Assert(locale->provider == COLLPROVIDER_ICU); Assert(GetDatabaseEncoding() != PG_UTF8); @@ -2209,7 +2209,7 @@ static size_t pg_strnxfrm_prefix_icu(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - size_t result; + size_t result; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2271,7 +2271,7 @@ pg_strxfrm_enabled(pg_locale_t locale) /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } /* @@ -2291,7 +2291,7 @@ pg_strxfrm_enabled(pg_locale_t locale) size_t pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strxfrm_libc(dest, src, destsize, locale); @@ -2328,7 +2328,7 @@ size_t pg_strnxfrm(char *dest, size_t destsize, const char *src, size_t srclen, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strnxfrm_libc(dest, src, srclen, destsize, locale); @@ -2358,7 +2358,7 @@ pg_strxfrm_prefix_enabled(pg_locale_t locale) /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } /* @@ -2378,7 +2378,7 @@ size_t pg_strxfrm_prefix(char *dest, const char *src, size_t destsize, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) elog(ERROR, "collprovider '%c' does not support pg_strxfrm_prefix()", @@ -2415,7 +2415,7 @@ size_t pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src, size_t srclen, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) elog(ERROR, "collprovider '%c' does not support pg_strnxfrm_prefix()", @@ -2491,7 +2491,7 @@ pg_ucol_open(const char *loc_str) collator = ucol_open(loc_str, &status); if (U_FAILURE(status)) ereport(ERROR, - /* use original string for error report */ + /* use original string for error report */ (errmsg("could not open collator for locale \"%s\": %s", orig_str, u_errorName(status)))); @@ -2554,6 +2554,7 @@ uchar_length(UConverter *converter, const char *str, int32_t len) { UErrorCode status = U_ZERO_ERROR; int32_t ulen; + ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status); if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR) ereport(ERROR, @@ -2571,6 +2572,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen, { UErrorCode status = U_ZERO_ERROR; int32_t ulen; + status = U_ZERO_ERROR; ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status); if (U_FAILURE(status)) @@ -2594,7 +2596,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen, int32_t icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes) { - int32_t len_uchar; + int32_t len_uchar; init_icu_converter(); @@ -2781,11 +2783,11 @@ char * icu_language_tag(const char *loc_str, int elevel) { #ifdef USE_ICU - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - char *langtag; - size_t buflen = 32; /* arbitrary starting buffer size */ - const bool strict = true; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + char *langtag; + size_t buflen = 32; /* arbitrary starting buffer size */ + const bool strict = true; status = U_ZERO_ERROR; uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status); @@ -2803,8 +2805,8 @@ icu_language_tag(const char *loc_str, int elevel) return pstrdup("en-US-u-va-posix"); /* - * A BCP47 language tag doesn't have a clearly-defined upper limit - * (cf. RFC5646 section 4.4). Additionally, in older ICU versions, + * A BCP47 language tag doesn't have a clearly-defined upper limit (cf. + * RFC5646 section 4.4). Additionally, in older ICU versions, * uloc_toLanguageTag() doesn't always return the ultimate length on the * first call, necessitating a loop. */ @@ -2850,7 +2852,7 @@ icu_language_tag(const char *loc_str, int elevel) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ICU is not supported in this build"))); - return NULL; /* keep compiler quiet */ + return NULL; /* keep compiler quiet */ #endif /* not USE_ICU */ } @@ -2861,11 +2863,11 @@ void icu_validate_locale(const char *loc_str) { #ifdef USE_ICU - UCollator *collator; - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - bool found = false; - int elevel = icu_validation_level; + UCollator *collator; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + bool found = false; + int elevel = icu_validation_level; /* no validation */ if (elevel < 0) @@ -2896,8 +2898,8 @@ icu_validate_locale(const char *loc_str) /* search for matching language within ICU */ for (int32_t i = 0; !found && i < uloc_countAvailable(); i++) { - const char *otherloc = uloc_getAvailable(i); - char otherlang[ULOC_LANG_CAPACITY]; + const char *otherloc = uloc_getAvailable(i); + char otherlang[ULOC_LANG_CAPACITY]; status = U_ZERO_ERROR; uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 461735e84f..bcedfcfad2 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -12445,7 +12445,7 @@ get_range_partbound_string(List *bound_datums) foreach(cell, bound_datums) { PartitionRangeDatum *datum = - lfirst_node(PartitionRangeDatum, cell); + lfirst_node(PartitionRangeDatum, cell); appendStringInfoString(buf, sep); if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE) diff --git a/src/backend/utils/adt/tsquery_op.c b/src/backend/utils/adt/tsquery_op.c index 7e3bd51c1f..2bc4ec904f 100644 --- a/src/backend/utils/adt/tsquery_op.c +++ b/src/backend/utils/adt/tsquery_op.c @@ -150,9 +150,9 @@ Datum tsquery_phrase(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(DirectFunctionCall3(tsquery_phrase_distance, - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1), - Int32GetDatum(1))); + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1), + Int32GetDatum(1))); } Datum diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c index a38db4697d..4457c5d4f9 100644 --- a/src/backend/utils/adt/tsvector_op.c +++ b/src/backend/utils/adt/tsvector_op.c @@ -525,7 +525,7 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete, if (arrin[i].haspos) { int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) - + sizeof(uint16); + + sizeof(uint16); curoff = SHORTALIGN(curoff); memcpy(dataout + curoff, diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 592afc18ec..b92ff4d266 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -1021,7 +1021,8 @@ hashbpchar(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale); @@ -1033,8 +1034,8 @@ hashbpchar(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any((uint8_t *) buf, bsize + 1); @@ -1076,7 +1077,8 @@ hashbpcharextended(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale); @@ -1088,8 +1090,8 @@ hashbpcharextended(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any_extended((uint8_t *) buf, bsize + 1, PG_GETARG_INT64(1)); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index b571876468..884bfbc8ce 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -2312,8 +2312,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) memcpy(sss->buf1, authoritative_data, len); /* - * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated - * strings. + * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings. */ sss->buf1[len] = '\0'; sss->last_len1 = len; @@ -4523,7 +4522,7 @@ text_to_array(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID)); PG_RETURN_DATUM(makeArrayResult(tstate.astate, - CurrentMemoryContext)); + CurrentMemoryContext)); } /* diff --git a/src/backend/utils/adt/xid8funcs.c b/src/backend/utils/adt/xid8funcs.c index 24271dfff7..06ae940df6 100644 --- a/src/backend/utils/adt/xid8funcs.c +++ b/src/backend/utils/adt/xid8funcs.c @@ -519,7 +519,7 @@ pg_snapshot_recv(PG_FUNCTION_ARGS) for (i = 0; i < nxip; i++) { FullTransactionId cur = - FullTransactionIdFromU64((uint64) pq_getmsgint64(buf)); + FullTransactionIdFromU64((uint64) pq_getmsgint64(buf)); if (FullTransactionIdPrecedes(cur, last) || FullTransactionIdPrecedes(cur, xmin) || diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 15adbd6a01..866d0d649a 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -630,7 +630,7 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent) XmlOptionType parsed_xmloptiontype; xmlNodePtr content_nodes; volatile xmlBufferPtr buf = NULL; - volatile xmlSaveCtxtPtr ctxt = NULL; + volatile xmlSaveCtxtPtr ctxt = NULL; ErrorSaveContext escontext = {T_ErrorSaveContext}; PgXmlErrorContext *xmlerrcxt; #endif diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index c7607895cd..60978f9415 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -3603,7 +3603,7 @@ char * get_publication_name(Oid pubid, bool missing_ok) { HeapTuple tup; - char *pubname; + char *pubname; Form_pg_publication pubform; tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid)); @@ -3630,16 +3630,16 @@ get_publication_name(Oid pubid, bool missing_ok) * return InvalidOid. */ Oid -get_subscription_oid(const char* subname, bool missing_ok) +get_subscription_oid(const char *subname, bool missing_ok) { Oid oid; oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid, - MyDatabaseId, CStringGetDatum(subname)); + MyDatabaseId, CStringGetDatum(subname)); if (!OidIsValid(oid) && !missing_ok) ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("subscription \"%s\" does not exist", subname))); + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("subscription \"%s\" does not exist", subname))); return oid; } @@ -3653,7 +3653,7 @@ char * get_subscription_name(Oid subid, bool missing_ok) { HeapTuple tup; - char* subname; + char *subname; Form_pg_subscription subform; tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid)); diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 40140de958..8a08463c2b 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -3084,10 +3084,10 @@ static void AssertPendingSyncConsistency(Relation relation) { bool relcache_verdict = - RelationIsPermanent(relation) && - ((relation->rd_createSubid != InvalidSubTransactionId && - RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) || - relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId); + RelationIsPermanent(relation) && + ((relation->rd_createSubid != InvalidSubTransactionId && + RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) || + relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId); Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator)); @@ -3765,12 +3765,12 @@ RelationSetNewRelfilenumber(Relation relation, char persistence) */ if (IsBinaryUpgrade) { - SMgrRelation srel; + SMgrRelation srel; /* * During a binary upgrade, we use this code path to ensure that - * pg_largeobject and its index have the same relfilenumbers as in - * the old cluster. This is necessary because pg_upgrade treats + * pg_largeobject and its index have the same relfilenumbers as in the + * old cluster. This is necessary because pg_upgrade treats * pg_largeobject like a user table, not a system table. It is however * possible that a table or index may need to end up with the same * relfilenumber in the new cluster as what it had in the old cluster. @@ -5171,8 +5171,8 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind) Bitmapset *uindexattrs; /* columns in unique indexes */ Bitmapset *pkindexattrs; /* columns in the primary index */ Bitmapset *idindexattrs; /* columns in the replica identity */ - Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */ - Bitmapset *summarizedattrs; /* columns with summarizing indexes */ + Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */ + Bitmapset *summarizedattrs; /* columns with summarizing indexes */ List *indexoidlist; List *newindexoidlist; Oid relpkindex; @@ -5314,8 +5314,8 @@ restart: * when the column value changes, thus require a separate * attribute bitmapset. * - * Obviously, non-key columns couldn't be referenced by - * foreign key or identity key. Hence we do not include them into + * Obviously, non-key columns couldn't be referenced by foreign + * key or identity key. Hence we do not include them into * uindexattrs, pkindexattrs and idindexattrs bitmaps. */ if (attrnum != 0) diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index 4c21129707..26575cae6c 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -801,11 +801,11 @@ read_relmap_file(RelMapFile *map, char *dbpath, bool lock_held, int elevel) /* * Open the target file. * - * Because Windows isn't happy about the idea of renaming over a file - * that someone has open, we only open this file after acquiring the lock, - * and for the same reason, we close it before releasing the lock. That - * way, by the time write_relmap_file() acquires an exclusive lock, no - * one else will have it open. + * Because Windows isn't happy about the idea of renaming over a file that + * someone has open, we only open this file after acquiring the lock, and + * for the same reason, we close it before releasing the lock. That way, + * by the time write_relmap_file() acquires an exclusive lock, no one else + * will have it open. */ snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath, RELMAPPER_FILENAME); diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index f72dd25efa..b4b01dc5e0 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -2151,7 +2151,7 @@ CheckFunctionValidatorAccess(Oid validatorOid, Oid functionOid) /* first validate that we have permissions to use the language */ aclresult = object_aclcheck(LanguageRelationId, procStruct->prolang, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(langStruct->lanname)); diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 53420f4974..88434c3e5d 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -362,7 +362,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect */ if (!am_superuser && object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CONNECT) != ACLCHECK_OK) + ACL_CONNECT) != ACLCHECK_OK) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied for database \"%s\"", name), @@ -933,10 +933,10 @@ InitPostgres(const char *in_dbname, Oid dboid, } /* - * The last few connection slots are reserved for superusers and roles with - * privileges of pg_use_reserved_connections. Replication connections are - * drawn from slots reserved with max_wal_senders and are not limited by - * max_connections, superuser_reserved_connections, or + * The last few connection slots are reserved for superusers and roles + * with privileges of pg_use_reserved_connections. Replication + * connections are drawn from slots reserved with max_wal_senders and are + * not limited by max_connections, superuser_reserved_connections, or * reserved_connections. * * Note: At this point, the new backend has already claimed a proc struct, diff --git a/src/backend/utils/init/usercontext.c b/src/backend/utils/init/usercontext.c index 38bcfa60df..dd9a0dd6a8 100644 --- a/src/backend/utils/init/usercontext.c +++ b/src/backend/utils/init/usercontext.c @@ -61,15 +61,15 @@ SwitchToUntrustedUser(Oid userid, UserContext *context) } else { - int sec_context = context->save_sec_context; + int sec_context = context->save_sec_context; /* * This user can SET ROLE to the target user, but not the other way * around, so protect ourselves against the target user by setting * SECURITY_RESTRICTED_OPERATION to prevent certain changes to the - * session state. Also set up a new GUC nest level, so that we can roll - * back any GUC changes that may be made by code running as the target - * user, inasmuch as they could be malicious. + * session state. Also set up a new GUC nest level, so that we can + * roll back any GUC changes that may be made by code running as the + * target user, inasmuch as they could be malicious. */ sec_context |= SECURITY_RESTRICTED_OPERATION; SetUserIdAndSecContext(userid, sec_context); diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 9dd624b3ae..53047104f5 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -1469,8 +1469,8 @@ check_GUC_init(struct config_generic *gconf) /* Flag combinations */ /* - * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part - * of SHOW ALL should not be hidden in postgresql.conf.sample. + * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of + * SHOW ALL should not be hidden in postgresql.conf.sample. */ if ((gconf->flags & GUC_NO_SHOW_ALL) && !(gconf->flags & GUC_NOT_IN_SAMPLE)) diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index cab3ddbe11..ca8558d662 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -4694,8 +4694,8 @@ struct config_enum ConfigureNamesEnum[] = { {"icu_validation_level", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Log level for reporting invalid ICU locale strings."), - NULL + gettext_noop("Log level for reporting invalid ICU locale strings."), + NULL }, &icu_validation_level, ERROR, icu_validation_level_options, diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c index f5a62061a3..7a3781466e 100644 --- a/src/backend/utils/mmgr/dsa.c +++ b/src/backend/utils/mmgr/dsa.c @@ -1369,7 +1369,7 @@ init_span(dsa_area *area, if (DsaPointerIsValid(pool->spans[1])) { dsa_area_span *head = (dsa_area_span *) - dsa_get_address(area, pool->spans[1]); + dsa_get_address(area, pool->spans[1]); head->prevspan = span_pointer; } @@ -2215,7 +2215,7 @@ make_new_segment(dsa_area *area, size_t requested_pages) if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE) { dsa_segment_map *next = - get_segment_by_index(area, segment_map->header->next); + get_segment_by_index(area, segment_map->header->next); Assert(next->header->bin == segment_map->header->bin); next->header->prev = new_index; diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 722a2e34db..8f9ea090fa 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -285,7 +285,7 @@ sum_free_pages(FreePageManager *fpm) if (!relptr_is_null(fpm->freelist[list])) { FreePageSpanLeader *candidate = - relptr_access(base, fpm->freelist[list]); + relptr_access(base, fpm->freelist[list]); do { diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index 42b90e4d4f..9fc83f11f6 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -734,9 +734,9 @@ MemoryContextStatsDetail(MemoryContext context, int max_children, * * We don't buffer the information about all memory contexts in a * backend into StringInfo and log it as one message. That would - * require the buffer to be enlarged, risking an OOM as there could - * be a large number of memory contexts in a backend. Instead, we - * log one message per memory context. + * require the buffer to be enlarged, risking an OOM as there could be + * a large number of memory contexts in a backend. Instead, we log + * one message per memory context. */ ereport(LOG_SERVER_ONLY, (errhidestmt(true), diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c index 7dec652106..f926f1faad 100644 --- a/src/backend/utils/resowner/resowner.c +++ b/src/backend/utils/resowner/resowner.c @@ -587,7 +587,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner, while (ResourceArrayGetAny(&(owner->cryptohasharr), &foundres)) { pg_cryptohash_ctx *context = - (pg_cryptohash_ctx *) DatumGetPointer(foundres); + (pg_cryptohash_ctx *) DatumGetPointer(foundres); if (isCommit) PrintCryptoHashLeakWarning(foundres); diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 95c3970437..e5a4e5b371 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -1438,8 +1438,8 @@ tuplesort_performsort(Tuplesortstate *state) /* * We were able to accumulate all the tuples required for output * in memory, using a heap to eliminate excess tuples. Now we - * have to transform the heap to a properly-sorted array. - * Note that sort_bounded_heap sets the correct state->status. + * have to transform the heap to a properly-sorted array. Note + * that sort_bounded_heap sets the correct state->status. */ sort_bounded_heap(state); state->current = 0; diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index c9ca44d8b7..3a419e348f 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -1990,7 +1990,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) int bucket = (oldSnapshotControl->head_offset + ((ts - oldSnapshotControl->head_timestamp) / USECS_PER_MINUTE)) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin)) oldSnapshotControl->xid_by_minute[bucket] = xmin; @@ -2057,7 +2057,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) /* Extend map to unused entry. */ int new_tail = (oldSnapshotControl->head_offset + oldSnapshotControl->count_used) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; oldSnapshotControl->count_used++; oldSnapshotControl->xid_by_minute[new_tail] = xmin; @@ -2188,7 +2188,7 @@ SerializeSnapshot(Snapshot snapshot, char *start_address) if (serialized_snapshot.subxcnt > 0) { Size subxipoff = sizeof(SerializedSnapshotData) + - snapshot->xcnt * sizeof(TransactionId); + snapshot->xcnt * sizeof(TransactionId); memcpy((TransactionId *) (start_address + subxipoff), snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId)); diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 2c208ead01..08f0a5c469 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1565,8 +1565,8 @@ static void setup_auth(FILE *cmdfd) { /* - * The authid table shouldn't be readable except through views, to - * ensure passwords are not publicly visible. + * The authid table shouldn't be readable except through views, to ensure + * passwords are not publicly visible. */ PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n"); @@ -1950,9 +1950,9 @@ make_template0(FILE *cmdfd) " STRATEGY = file_copy;\n\n"); /* - * template0 shouldn't have any collation-dependent objects, so unset - * the collation version. This disables collation version checks when - * making a new database from it. + * template0 shouldn't have any collation-dependent objects, so unset the + * collation version. This disables collation version checks when making + * a new database from it. */ PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n"); @@ -1962,9 +1962,8 @@ make_template0(FILE *cmdfd) PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n"); /* - * Explicitly revoke public create-schema and create-temp-table - * privileges in template1 and template0; else the latter would be on - * by default + * Explicitly revoke public create-schema and create-temp-table privileges + * in template1 and template0; else the latter would be on by default */ PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n"); PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n"); @@ -2237,11 +2236,11 @@ static char * icu_language_tag(const char *loc_str) { #ifdef USE_ICU - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - char *langtag; - size_t buflen = 32; /* arbitrary starting buffer size */ - const bool strict = true; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + char *langtag; + size_t buflen = 32; /* arbitrary starting buffer size */ + const bool strict = true; status = U_ZERO_ERROR; uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status); @@ -2257,8 +2256,8 @@ icu_language_tag(const char *loc_str) return pstrdup("en-US-u-va-posix"); /* - * A BCP47 language tag doesn't have a clearly-defined upper limit - * (cf. RFC5646 section 4.4). Additionally, in older ICU versions, + * A BCP47 language tag doesn't have a clearly-defined upper limit (cf. + * RFC5646 section 4.4). Additionally, in older ICU versions, * uloc_toLanguageTag() doesn't always return the ultimate length on the * first call, necessitating a loop. */ @@ -2298,7 +2297,7 @@ icu_language_tag(const char *loc_str) return langtag; #else pg_fatal("ICU is not supported in this build"); - return NULL; /* keep compiler quiet */ + return NULL; /* keep compiler quiet */ #endif } @@ -2311,9 +2310,9 @@ static void icu_validate_locale(const char *loc_str) { #ifdef USE_ICU - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - bool found = false; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + bool found = false; /* validate that we can extract the language */ status = U_ZERO_ERROR; @@ -2334,8 +2333,8 @@ icu_validate_locale(const char *loc_str) /* search for matching language within ICU */ for (int32_t i = 0; !found && i < uloc_countAvailable(); i++) { - const char *otherloc = uloc_getAvailable(i); - char otherlang[ULOC_LANG_CAPACITY]; + const char *otherloc = uloc_getAvailable(i); + char otherlang[ULOC_LANG_CAPACITY]; status = U_ZERO_ERROR; uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status); @@ -2366,10 +2365,10 @@ static char * default_icu_locale(void) { #ifdef USE_ICU - UCollator *collator; - UErrorCode status; - const char *valid_locale; - char *default_locale; + UCollator *collator; + UErrorCode status; + const char *valid_locale; + char *default_locale; status = U_ZERO_ERROR; collator = ucol_open(NULL, &status); @@ -2449,7 +2448,7 @@ setlocales(void) if (locale_provider == COLLPROVIDER_ICU) { - char *langtag; + char *langtag; /* acquire default locale from the environment, if not specified */ if (icu_locale == NULL) diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index ba471f898c..1dc8efe0cb 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -341,18 +341,18 @@ tablespace_list_append(const char *arg) /* * All tablespaces are created with absolute directories, so specifying a - * non-absolute path here would just never match, possibly confusing users. - * Since we don't know whether the remote side is Windows or not, and it - * might be different than the local side, permit any path that could be - * absolute under either set of rules. + * non-absolute path here would just never match, possibly confusing + * users. Since we don't know whether the remote side is Windows or not, + * and it might be different than the local side, permit any path that + * could be absolute under either set of rules. * * (There is little practical risk of confusion here, because someone * running entirely on Linux isn't likely to have a relative path that * begins with a backslash or something that looks like a drive - * specification. If they do, and they also incorrectly believe that - * a relative path is acceptable here, we'll silently fail to warn them - * of their mistake, and the -T option will just not get applied, same - * as if they'd specified -T for a nonexistent tablespace.) + * specification. If they do, and they also incorrectly believe that a + * relative path is acceptable here, we'll silently fail to warn them of + * their mistake, and the -T option will just not get applied, same as if + * they'd specified -T for a nonexistent tablespace.) */ if (!is_nonwindows_absolute_path(cell->old_dir) && !is_windows_absolute_path(cell->old_dir)) diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c index fb9e29682b..d0a4079d50 100644 --- a/src/bin/pg_basebackup/pg_receivewal.c +++ b/src/bin/pg_basebackup/pg_receivewal.c @@ -43,7 +43,7 @@ static char *basedir = NULL; static int verbose = 0; static int compresslevel = 0; -static bool noloop = false; +static bool noloop = false; static int standby_message_timeout = 10 * 1000; /* 10 sec = default */ static volatile sig_atomic_t time_to_stop = false; static bool do_create_slot = false; diff --git a/src/bin/pg_basebackup/walmethods.c b/src/bin/pg_basebackup/walmethods.c index 6d14b988cb..e74b69ffba 100644 --- a/src/bin/pg_basebackup/walmethods.c +++ b/src/bin/pg_basebackup/walmethods.c @@ -44,14 +44,14 @@ static Walfile *dir_open_for_write(WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size); -static int dir_close(Walfile *f, WalCloseMethod method); +static int dir_close(Walfile *f, WalCloseMethod method); static bool dir_existsfile(WalWriteMethod *wwmethod, const char *pathname); static ssize_t dir_get_file_size(WalWriteMethod *wwmethod, const char *pathname); static char *dir_get_file_name(WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix); static ssize_t dir_write(Walfile *f, const void *buf, size_t count); -static int dir_sync(Walfile *f); +static int dir_sync(Walfile *f); static bool dir_finish(WalWriteMethod *wwmethod); static void dir_free(WalWriteMethod *wwmethod); @@ -72,7 +72,7 @@ const WalWriteMethodOps WalDirectoryMethodOps = { */ typedef struct DirectoryMethodData { - WalWriteMethod base; + WalWriteMethod base; char *basedir; } DirectoryMethodData; @@ -660,14 +660,14 @@ static Walfile *tar_open_for_write(WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size); -static int tar_close(Walfile *f, WalCloseMethod method); +static int tar_close(Walfile *f, WalCloseMethod method); static bool tar_existsfile(WalWriteMethod *wwmethod, const char *pathname); static ssize_t tar_get_file_size(WalWriteMethod *wwmethod, const char *pathname); static char *tar_get_file_name(WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix); static ssize_t tar_write(Walfile *f, const void *buf, size_t count); -static int tar_sync(Walfile *f); +static int tar_sync(Walfile *f); static bool tar_finish(WalWriteMethod *wwmethod); static void tar_free(WalWriteMethod *wwmethod); @@ -693,7 +693,7 @@ typedef struct TarMethodFile typedef struct TarMethodData { - WalWriteMethod base; + WalWriteMethod base; char *tarfilename; int fd; TarMethodFile *currentfile; @@ -1353,7 +1353,7 @@ CreateWalTarMethod(const char *tarbase, { TarMethodData *wwmethod; const char *suffix = (compression_algorithm == PG_COMPRESSION_GZIP) ? - ".tar.gz" : ".tar"; + ".tar.gz" : ".tar"; wwmethod = pg_malloc0(sizeof(TarMethodData)); *((const WalWriteMethodOps **) &wwmethod->base.ops) = diff --git a/src/bin/pg_basebackup/walmethods.h b/src/bin/pg_basebackup/walmethods.h index d7284c08ce..54a22fe607 100644 --- a/src/bin/pg_basebackup/walmethods.h +++ b/src/bin/pg_basebackup/walmethods.h @@ -19,11 +19,12 @@ typedef struct WalWriteMethod *wwmethod; off_t currpos; char *pathname; + /* * MORE DATA FOLLOWS AT END OF STRUCT * - * Each WalWriteMethod is expected to embed this as the first member of - * a larger struct with method-specific fields following. + * Each WalWriteMethod is expected to embed this as the first member of a + * larger struct with method-specific fields following. */ } Walfile; @@ -45,7 +46,7 @@ typedef struct WalWriteMethodOps * automatically renamed in close(). If pad_to_size is specified, the file * will be padded with NUL up to that size, if supported by the Walmethod. */ - Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size); + Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size); /* * Close an open Walfile, using one or more methods for handling automatic @@ -107,11 +108,12 @@ struct WalWriteMethod bool sync; const char *lasterrstring; /* if set, takes precedence over lasterrno */ int lasterrno; + /* * MORE DATA FOLLOWS AT END OF STRUCT * - * Each WalWriteMethod is expected to embed this as the first member of - * a larger struct with method-specific fields following. + * Each WalWriteMethod is expected to embed this as the first member of a + * larger struct with method-specific fields following. */ }; diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c index db19058354..38018b1691 100644 --- a/src/bin/pg_dump/compress_io.c +++ b/src/bin/pg_dump/compress_io.c @@ -87,8 +87,8 @@ char * supports_compression(const pg_compress_specification compression_spec) { - const pg_compress_algorithm algorithm = compression_spec.algorithm; - bool supported = false; + const pg_compress_algorithm algorithm = compression_spec.algorithm; + bool supported = false; if (algorithm == PG_COMPRESSION_NONE) supported = true; diff --git a/src/bin/pg_dump/compress_lz4.c b/src/bin/pg_dump/compress_lz4.c index 423e1b7976..8b6cc06745 100644 --- a/src/bin/pg_dump/compress_lz4.c +++ b/src/bin/pg_dump/compress_lz4.c @@ -44,8 +44,8 @@ typedef struct LZ4State LZ4F_preferences_t prefs; - LZ4F_compressionContext_t ctx; - LZ4F_decompressionContext_t dtx; + LZ4F_compressionContext_t ctx; + LZ4F_decompressionContext_t dtx; /* * Used by the Stream API's lazy initialization. @@ -148,8 +148,8 @@ ReadDataFromArchiveLZ4(ArchiveHandle *AH, CompressorState *cs) char *outbuf; char *readbuf; LZ4F_decompressionContext_t ctx = NULL; - LZ4F_decompressOptions_t dec_opt; - LZ4F_errorCode_t status; + LZ4F_decompressOptions_t dec_opt; + LZ4F_errorCode_t status; memset(&dec_opt, 0, sizeof(dec_opt)); status = LZ4F_createDecompressionContext(&ctx, LZ4F_VERSION); diff --git a/src/bin/pg_dump/compress_zstd.c b/src/bin/pg_dump/compress_zstd.c index 001b4f1513..23789c6193 100644 --- a/src/bin/pg_dump/compress_zstd.c +++ b/src/bin/pg_dump/compress_zstd.c @@ -82,8 +82,8 @@ _ZstdCStreamParams(pg_compress_specification compress) if (compress.options & PG_COMPRESSION_OPTION_LONG_DISTANCE) _Zstd_CCtx_setParam_or_die(cstream, - ZSTD_c_enableLongDistanceMatching, - compress.long_distance, "long"); + ZSTD_c_enableLongDistanceMatching, + compress.long_distance, "long"); return cstream; } diff --git a/src/bin/pg_dump/compress_zstd.h b/src/bin/pg_dump/compress_zstd.h index 2aaa6b100b..d0ab1351fd 100644 --- a/src/bin/pg_dump/compress_zstd.h +++ b/src/bin/pg_dump/compress_zstd.h @@ -18,8 +18,8 @@ #include "compress_io.h" extern void InitCompressorZstd(CompressorState *cs, - const pg_compress_specification compression_spec); + const pg_compress_specification compression_spec); extern void InitCompressFileHandleZstd(CompressFileHandle *CFH, - const pg_compress_specification compression_spec); + const pg_compress_specification compression_spec); -#endif /* COMPRESS_ZSTD_H */ +#endif /* COMPRESS_ZSTD_H */ diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index d518349e10..39ebcfec32 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -386,10 +386,11 @@ RestoreArchive(Archive *AHX) { if (te->hadDumper && (te->reqs & REQ_DATA) != 0) { - char *errmsg = supports_compression(AH->compression_spec); + char *errmsg = supports_compression(AH->compression_spec); + if (errmsg) pg_fatal("cannot restore from compressed archive (%s)", - errmsg); + errmsg); else break; } @@ -2985,11 +2986,11 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH) if (!te->hadDumper) { /* - * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then - * it is considered a data entry. We don't need to check for the - * BLOBS entry or old-style BLOB COMMENTS, because they will have - * hadDumper = true ... but we do need to check new-style BLOB ACLs, - * comments, etc. + * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it + * is considered a data entry. We don't need to check for the BLOBS + * entry or old-style BLOB COMMENTS, because they will have hadDumper + * = true ... but we do need to check new-style BLOB ACLs, comments, + * etc. */ if (strcmp(te->desc, "SEQUENCE SET") == 0 || strcmp(te->desc, "BLOB") == 0 || @@ -3480,6 +3481,7 @@ _getObjectDescription(PQExpBuffer buf, const TocEntry *te) { appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag); } + /* * These object types require additional decoration. Fortunately, the * information needed is exactly what's in the DROP command. @@ -3639,6 +3641,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData) initPQExpBuffer(&temp); _getObjectDescription(&temp, te); + /* * If _getObjectDescription() didn't fill the buffer, then there is no * owner. @@ -3802,7 +3805,7 @@ ReadHead(ArchiveHandle *AH) if (errmsg) { pg_log_warning("archive is compressed, but this installation does not support compression (%s) -- no data will be available", - errmsg); + errmsg); pg_free(errmsg); } diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index babd23b4eb..db5fb43bae 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -684,10 +684,10 @@ _LoadLOs(ArchiveHandle *AH) tarClose(AH, th); /* - * Once we have found the first LO, stop at the first non-LO - * entry (which will be 'blobs.toc'). This coding would eat all - * the rest of the archive if there are no LOs ... but this - * function shouldn't be called at all in that case. + * Once we have found the first LO, stop at the first non-LO entry + * (which will be 'blobs.toc'). This coding would eat all the + * rest of the archive if there are no LOs ... but this function + * shouldn't be called at all in that case. */ if (foundLO) break; diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 058244cd17..cd003c4448 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -741,9 +741,9 @@ main(int argc, char **argv) pg_fatal("%s", error_detail); /* - * Disable support for zstd workers for now - these are based on threading, - * and it's unclear how it interacts with parallel dumps on platforms where - * that relies on threads too (e.g. Windows). + * Disable support for zstd workers for now - these are based on + * threading, and it's unclear how it interacts with parallel dumps on + * platforms where that relies on threads too (e.g. Windows). */ if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS) pg_log_warning("compression option \"%s\" is not currently supported by pg_dump", @@ -879,8 +879,8 @@ main(int argc, char **argv) /* * Dumping LOs is the default for dumps where an inclusion switch is not * used (an "include everything" dump). -B can be used to exclude LOs - * from those dumps. -b can be used to include LOs even when an - * inclusion switch is used. + * from those dumps. -b can be used to include LOs even when an inclusion + * switch is used. * * -s means "schema only" and LOs are data, not schema, so we never * include LOs when -s is used. @@ -915,8 +915,8 @@ main(int argc, char **argv) * data or the associated metadata that resides in the pg_largeobject and * pg_largeobject_metadata tables, respectively. * - * However, we do need to collect LO information as there may be - * comments or other information on LOs that we do need to dump out. + * However, we do need to collect LO information as there may be comments + * or other information on LOs that we do need to dump out. */ if (dopt.outputLOs || dopt.binary_upgrade) getLOs(fout); @@ -3323,8 +3323,8 @@ dumpDatabase(Archive *fout) appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n"); for (int i = 0; i < PQntuples(lo_res); ++i) { - Oid oid; - RelFileNumber relfilenumber; + Oid oid; + RelFileNumber relfilenumber; appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n" "SET relfrozenxid = '%u', relminmxid = '%u'\n" @@ -3607,8 +3607,8 @@ getLOs(Archive *fout) loinfo[i].dobj.components |= DUMP_COMPONENT_ACL; /* - * In binary-upgrade mode for LOs, we do *not* dump out the LO - * data, as it will be copied by pg_upgrade, which simply copies the + * In binary-upgrade mode for LOs, we do *not* dump out the LO data, + * as it will be copied by pg_upgrade, which simply copies the * pg_largeobject table. We *do* however dump out anything but the * data, as pg_upgrade copies just pg_largeobject, but not * pg_largeobject_metadata, after the dump is restored. @@ -14845,7 +14845,10 @@ dumpSecLabel(Archive *fout, const char *type, const char *name, if (dopt->no_security_labels) return; - /* Security labels are schema not data ... except large object labels are data */ + /* + * Security labels are schema not data ... except large object labels are + * data + */ if (strcmp(type, "LARGE OBJECT") != 0) { if (dopt->dataOnly) @@ -15178,7 +15181,7 @@ dumpTable(Archive *fout, const TableInfo *tbinfo) if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL) { const char *objtype = - (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE"; + (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE"; tableAclDumpId = dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId, @@ -16649,10 +16652,12 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo) { appendPQExpBufferStr(q, coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE"); + /* * PRIMARY KEY constraints should not be using NULLS NOT DISTINCT * indexes. Being able to create this was fixed, but we need to - * make the index distinct in order to be able to restore the dump. + * make the index distinct in order to be able to restore the + * dump. */ if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p') appendPQExpBufferStr(q, " NULLS NOT DISTINCT"); @@ -17874,7 +17879,7 @@ processExtensionTables(Archive *fout, ExtensionInfo extinfo[], TableInfo *configtbl; Oid configtbloid = atooid(extconfigarray[j]); bool dumpobj = - curext->dobj.dump & DUMP_COMPONENT_DEFINITION; + curext->dobj.dump & DUMP_COMPONENT_DEFINITION; configtbl = findTableByOid(configtbloid); if (configtbl == NULL) diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 71a1319865..393c3e4987 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -949,7 +949,7 @@ static void dumpRoleMembership(PGconn *conn) { PQExpBuffer buf = createPQExpBuffer(); - PQExpBuffer optbuf = createPQExpBuffer(); + PQExpBuffer optbuf = createPQExpBuffer(); PGresult *res; int start = 0, end, @@ -996,8 +996,8 @@ dumpRoleMembership(PGconn *conn) /* * We can't dump these GRANT commands in arbitrary order, because a role - * that is named as a grantor must already have ADMIN OPTION on the - * role for which it is granting permissions, except for the bootstrap + * that is named as a grantor must already have ADMIN OPTION on the role + * for which it is granting permissions, except for the bootstrap * superuser, who can always be named as the grantor. * * We handle this by considering these grants role by role. For each role, @@ -1005,8 +1005,8 @@ dumpRoleMembership(PGconn *conn) * superuser. Every time we grant ADMIN OPTION on the role to some user, * that user also becomes an allowable grantor. We make repeated passes * over the grants for the role, each time dumping those whose grantors - * are allowable and which we haven't done yet. Eventually this should - * let us dump all the grants. + * are allowable and which we haven't done yet. Eventually this should let + * us dump all the grants. */ total = PQntuples(res); while (start < total) @@ -1021,7 +1021,7 @@ dumpRoleMembership(PGconn *conn) /* All memberships for a single role should be adjacent. */ for (end = start; end < total; ++end) { - char *otherrole; + char *otherrole; otherrole = PQgetvalue(res, end, 0); if (strcmp(role, otherrole) != 0) @@ -1104,7 +1104,7 @@ dumpRoleMembership(PGconn *conn) appendPQExpBufferStr(optbuf, "ADMIN OPTION"); if (dump_grant_options) { - char *inherit_option; + char *inherit_option; if (optbuf->data[0] != '\0') appendPQExpBufferStr(optbuf, ", "); @@ -1401,7 +1401,7 @@ dumpUserConfig(PGconn *conn, const char *username) for (int i = 0; i < PQntuples(res); i++) { - char *userset = NULL; + char *userset = NULL; if (server_version >= 160000) userset = PQgetvalue(res, i, 1); diff --git a/src/bin/pg_test_fsync/pg_test_fsync.c b/src/bin/pg_test_fsync/pg_test_fsync.c index 3d5e8f30ab..435df8d808 100644 --- a/src/bin/pg_test_fsync/pg_test_fsync.c +++ b/src/bin/pg_test_fsync/pg_test_fsync.c @@ -623,7 +623,7 @@ static void print_elapse(struct timeval start_t, struct timeval stop_t, int ops) { double total_time = (stop_t.tv_sec - start_t.tv_sec) + - (stop_t.tv_usec - start_t.tv_usec) * 0.000001; + (stop_t.tv_usec - start_t.tv_usec) * 0.000001; double per_second = ops / total_time; double avg_op_time_us = (total_time / ops) * USECS_SEC; diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index fea159689e..f36ef85702 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -105,8 +105,8 @@ check_and_dump_old_cluster(bool live_check) check_for_isn_and_int8_passing_mismatch(&old_cluster); /* - * PG 16 increased the size of the 'aclitem' type, which breaks the on-disk - * format for existing data. + * PG 16 increased the size of the 'aclitem' type, which breaks the + * on-disk format for existing data. */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1500) check_for_aclitem_data_type_usage(&old_cluster); diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c index 85ed15ae4a..a9988abfe1 100644 --- a/src/bin/pg_upgrade/info.c +++ b/src/bin/pg_upgrade/info.c @@ -61,9 +61,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, new_relnum < new_db->rel_arr.nrels) { RelInfo *old_rel = (old_relnum < old_db->rel_arr.nrels) ? - &old_db->rel_arr.rels[old_relnum] : NULL; + &old_db->rel_arr.rels[old_relnum] : NULL; RelInfo *new_rel = (new_relnum < new_db->rel_arr.nrels) ? - &new_db->rel_arr.rels[new_relnum] : NULL; + &new_db->rel_arr.rels[new_relnum] : NULL; /* handle running off one array before the other */ if (!new_rel) @@ -302,14 +302,14 @@ get_db_and_rel_infos(ClusterInfo *cluster) static void get_template0_info(ClusterInfo *cluster) { - PGconn *conn = connectToServer(cluster, "template1"); - DbLocaleInfo *locale; - PGresult *dbres; - int i_datencoding; - int i_datlocprovider; - int i_datcollate; - int i_datctype; - int i_daticulocale; + PGconn *conn = connectToServer(cluster, "template1"); + DbLocaleInfo *locale; + PGresult *dbres; + int i_datencoding; + int i_datlocprovider; + int i_datcollate; + int i_datctype; + int i_daticulocale; if (GET_MAJOR_VERSION(cluster->major_version) >= 1500) dbres = executeQueryOrDie(conn, diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c index 75bab0a04c..4562dafcff 100644 --- a/src/bin/pg_upgrade/pg_upgrade.c +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -379,10 +379,10 @@ setup(char *argv0, bool *live_check) static void set_locale_and_encoding(void) { - PGconn *conn_new_template1; - char *datcollate_literal; - char *datctype_literal; - char *daticulocale_literal = NULL; + PGconn *conn_new_template1; + char *datcollate_literal; + char *datctype_literal; + char *daticulocale_literal = NULL; DbLocaleInfo *locale = old_cluster.template0; prep_status("Setting locale and encoding for new cluster"); diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 70ed034e70..7dbb2ed6a7 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -4621,7 +4621,7 @@ processXactStats(TState *thread, CState *st, pg_time_usec_t *now, double latency = 0.0, lag = 0.0; bool detailed = progress || throttle_delay || latency_limit || - use_log || per_script_stats; + use_log || per_script_stats; if (detailed && !skipped && st->estatus == ESTATUS_NO_ERROR) { @@ -6400,7 +6400,7 @@ printResults(StatsData *total, StatsData *sstats = &sql_script[i].stats; int64 script_failures = getFailures(sstats); int64 script_total_cnt = - sstats->cnt + sstats->skipped + script_failures; + sstats->cnt + sstats->skipped + script_failures; printf("SQL script %d: %s\n" " - weight: %d (targets %.1f%% of total)\n" diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 97f7d97220..5818a2e79e 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -4511,7 +4511,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet) /* header line width in expanded mode */ else if (strcmp(param, "xheader_width") == 0) { - if (! value) + if (!value) ; else if (pg_strcasecmp(value, "full") == 0) popt->topt.expanded_header_width_type = PRINT_XHEADER_FULL; @@ -5059,15 +5059,16 @@ pset_value_string(const char *param, printQueryOpt *popt) else if (strcmp(param, "xheader_width") == 0) { if (popt->topt.expanded_header_width_type == PRINT_XHEADER_FULL) - return(pstrdup("full")); + return (pstrdup("full")); else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_COLUMN) - return(pstrdup("column")); + return (pstrdup("column")); else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_PAGE) - return(pstrdup("page")); + return (pstrdup("page")); else { /* must be PRINT_XHEADER_EXACT_WIDTH */ - char wbuff[32]; + char wbuff[32]; + snprintf(wbuff, sizeof(wbuff), "%d", popt->topt.expanded_header_exact_width); return pstrdup(wbuff); diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index c0e6e8e6ed..5973df2e39 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -1432,7 +1432,7 @@ ExecQueryAndProcessResults(const char *query, INSTR_TIME_SET_ZERO(before); if (pset.bind_flag) - success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char * const *) pset.bind_params, NULL, NULL, 0); + success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char *const *) pset.bind_params, NULL, NULL, 0); else success = PQsendQuery(pset.db, query); diff --git a/src/bin/psql/crosstabview.c b/src/bin/psql/crosstabview.c index 67fcdb49dd..e1ad0e61d9 100644 --- a/src/bin/psql/crosstabview.c +++ b/src/bin/psql/crosstabview.c @@ -532,7 +532,7 @@ avlInsertNode(avl_tree *tree, avl_node **node, pivot_field field) if (current == tree->end) { avl_node *new_node = (avl_node *) - pg_malloc(sizeof(avl_node)); + pg_malloc(sizeof(avl_node)); new_node->height = 1; new_node->field = field; diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 83a37ee601..1d836cac3f 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -1160,8 +1160,8 @@ permissionsList(const char *pattern, bool showSystem) return true; error_return: - termPQExpBuffer(&buf); - return false; + termPQExpBuffer(&buf); + return false; } diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h index 73d4b393bc..1106954236 100644 --- a/src/bin/psql/settings.h +++ b/src/bin/psql/settings.h @@ -96,7 +96,8 @@ typedef struct _psqlSettings char *gset_prefix; /* one-shot prefix argument for \gset */ bool gdesc_flag; /* one-shot request to describe query result */ bool gexec_flag; /* one-shot request to execute query result */ - bool bind_flag; /* one-shot request to use extended query protocol */ + bool bind_flag; /* one-shot request to use extended query + * protocol */ int bind_nparams; /* number of parameters */ char **bind_params; /* parameters for extended query protocol call */ bool crosstab_flag; /* one-shot request to crosstab result */ diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c index 687af9c1f3..4b17a07089 100644 --- a/src/bin/scripts/vacuumdb.c +++ b/src/bin/scripts/vacuumdb.c @@ -52,12 +52,12 @@ typedef struct vacuumingOptions /* object filter options */ typedef enum { - OBJFILTER_NONE = 0, /* no filter used */ - OBJFILTER_ALL_DBS = (1 << 0), /* -a | --all */ - OBJFILTER_DATABASE = (1 << 1), /* -d | --dbname */ - OBJFILTER_TABLE = (1 << 2), /* -t | --table */ - OBJFILTER_SCHEMA = (1 << 3), /* -n | --schema */ - OBJFILTER_SCHEMA_EXCLUDE = (1 << 4) /* -N | --exclude-schema */ + OBJFILTER_NONE = 0, /* no filter used */ + OBJFILTER_ALL_DBS = (1 << 0), /* -a | --all */ + OBJFILTER_DATABASE = (1 << 1), /* -d | --dbname */ + OBJFILTER_TABLE = (1 << 2), /* -t | --table */ + OBJFILTER_SCHEMA = (1 << 3), /* -n | --schema */ + OBJFILTER_SCHEMA_EXCLUDE = (1 << 4) /* -N | --exclude-schema */ } VacObjFilter; VacObjFilter objfilter = OBJFILTER_NONE; @@ -83,7 +83,7 @@ static void run_vacuum_command(PGconn *conn, const char *sql, bool echo, static void help(const char *progname); -void check_objfilter(void); +void check_objfilter(void); /* For analyze-in-stages mode */ #define ANALYZE_NO_STAGE -1 diff --git a/src/fe_utils/print.c b/src/fe_utils/print.c index 3396f9b462..7af1ccb6b5 100644 --- a/src/fe_utils/print.c +++ b/src/fe_utils/print.c @@ -1295,10 +1295,11 @@ print_aligned_vertical_line(const printTableOpt *topt, dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth))); if (opt_border == 1) dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 3))); + /* - * Handling the xheader width for border=2 doesn't make - * much sense because this format has an additional - * right border, but keep this for consistency. + * Handling the xheader width for border=2 doesn't make much + * sense because this format has an additional right border, + * but keep this for consistency. */ if (opt_border == 2) dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 7))); diff --git a/src/include/access/amapi.h b/src/include/access/amapi.h index 281039ef67..4476ff7fba 100644 --- a/src/include/access/amapi.h +++ b/src/include/access/amapi.h @@ -245,7 +245,7 @@ typedef struct IndexAmRoutine /* does AM use maintenance_work_mem? */ bool amusemaintenanceworkmem; /* does AM store tuple information only at block granularity? */ - bool amsummarizing; + bool amsummarizing; /* OR of parallel vacuum flags. See vacuum.h for flags. */ uint8 amparallelvacuumoptions; /* type of data stored in index, or InvalidOid if variable */ diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index ee275650bd..3edc740a3f 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -550,6 +550,7 @@ extern void gistSplitByKey(Relation r, Page page, IndexTuple *itup, /* gistbuild.c */ extern IndexBuildResult *gistbuild(Relation heap, Relation index, struct IndexInfo *indexInfo); + /* gistbuildbuffers.c */ extern GISTBuildBuffers *gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel); diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index bb6d4f0315..0a19a233db 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -902,7 +902,7 @@ table_beginscan(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key) { uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags); } diff --git a/src/include/access/xlogreader.h b/src/include/access/xlogreader.h index 30d20c323e..da32c7db77 100644 --- a/src/include/access/xlogreader.h +++ b/src/include/access/xlogreader.h @@ -332,6 +332,7 @@ extern XLogReaderState *XLogReaderAllocate(int wal_segment_size, const char *waldir, XLogReaderRoutine *routine, void *private_data); + /* Free an XLogReader */ extern void XLogReaderFree(XLogReaderState *state); diff --git a/src/include/catalog/pg_auth_members.h b/src/include/catalog/pg_auth_members.h index b8c751b9d1..3a313001fa 100644 --- a/src/include/catalog/pg_auth_members.h +++ b/src/include/catalog/pg_auth_members.h @@ -34,7 +34,7 @@ CATALOG(pg_auth_members,1261,AuthMemRelationId) BKI_SHARED_RELATION BKI_ROWTYPE_ Oid member BKI_LOOKUP(pg_authid); /* ID of a member of that role */ Oid grantor BKI_LOOKUP(pg_authid); /* who granted the membership */ bool admin_option; /* granted with admin option? */ - bool inherit_option; /* exercise privileges without SET ROLE? */ + bool inherit_option; /* exercise privileges without SET ROLE? */ bool set_option; /* use SET ROLE to the target role? */ } FormData_pg_auth_members; diff --git a/src/include/catalog/pg_subscription.h b/src/include/catalog/pg_subscription.h index 91d729d62d..1d40eebc78 100644 --- a/src/include/catalog/pg_subscription.h +++ b/src/include/catalog/pg_subscription.h @@ -88,10 +88,10 @@ CATALOG(pg_subscription,6100,SubscriptionRelationId) BKI_SHARED_RELATION BKI_ROW bool subdisableonerr; /* True if a worker error should cause the * subscription to be disabled */ - bool subpasswordrequired; /* Must connection use a password? */ + bool subpasswordrequired; /* Must connection use a password? */ - bool subrunasowner; /* True if replication should execute as - * the subscription owner */ + bool subrunasowner; /* True if replication should execute as the + * subscription owner */ #ifdef CATALOG_VARLEN /* variable-length fields start here */ /* Connection string to the publisher */ diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h index 8ee59d2c71..0a1c8ff7c4 100644 --- a/src/include/executor/hashjoin.h +++ b/src/include/executor/hashjoin.h @@ -360,6 +360,6 @@ typedef struct HashJoinTableData ParallelHashJoinState *parallel_state; ParallelHashJoinBatchAccessor *batches; dsa_pointer current_chunk_shared; -} HashJoinTableData; +} HashJoinTableData; #endif /* HASHJOIN_H */ diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index ff64b7cb98..3d34575a22 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -409,7 +409,7 @@ slot_getattr(TupleTableSlot *slot, int attnum, static inline Datum slot_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull) { - Assert(attnum < 0); /* caller error */ + Assert(attnum < 0); /* caller error */ if (attnum == TableOidAttributeNumber) { diff --git a/src/include/fe_utils/print.h b/src/include/fe_utils/print.h index 54f783c907..cc6652def9 100644 --- a/src/include/fe_utils/print.h +++ b/src/include/fe_utils/print.h @@ -69,10 +69,13 @@ typedef enum printTextLineWrap typedef enum printXheaderWidthType { /* Expanded header line width variants */ - PRINT_XHEADER_FULL, /* do not truncate header line (this is the default) */ - PRINT_XHEADER_COLUMN, /* only print header line above the first column */ - PRINT_XHEADER_PAGE, /* header line must not be longer than terminal width */ - PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */ + PRINT_XHEADER_FULL, /* do not truncate header line (this is the + * default) */ + PRINT_XHEADER_COLUMN, /* only print header line above the first + * column */ + PRINT_XHEADER_PAGE, /* header line must not be longer than + * terminal width */ + PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */ } printXheaderWidthType; typedef struct printTextFormat @@ -110,8 +113,10 @@ typedef struct printTableOpt enum printFormat format; /* see enum above */ unsigned short int expanded; /* expanded/vertical output (if supported * by output format); 0=no, 1=yes, 2=auto */ - printXheaderWidthType expanded_header_width_type; /* width type for header line in expanded mode */ - int expanded_header_exact_width; /* explicit width for header line in expanded mode */ + printXheaderWidthType expanded_header_width_type; /* width type for header + * line in expanded mode */ + int expanded_header_exact_width; /* explicit width for header + * line in expanded mode */ unsigned short int border; /* Print a border around the table. 0=none, * 1=dividing lines, 2=full */ unsigned short int pager; /* use pager for output (if to stdout and diff --git a/src/include/funcapi.h b/src/include/funcapi.h index 11febb138b..cc0cca3272 100644 --- a/src/include/funcapi.h +++ b/src/include/funcapi.h @@ -231,6 +231,7 @@ HeapTupleGetDatum(const HeapTupleData *tuple) { return HeapTupleHeaderGetDatum(tuple->t_data); } + /* obsolete version of above */ #define TupleGetDatum(_slot, _tuple) HeapTupleGetDatum(_tuple) diff --git a/src/include/partitioning/partprune.h b/src/include/partitioning/partprune.h index c0d6889d47..d4b7a1c910 100644 --- a/src/include/partitioning/partprune.h +++ b/src/include/partitioning/partprune.h @@ -70,10 +70,10 @@ typedef struct PartitionPruneContext #define PruneCxtStateIdx(partnatts, step_id, keyno) \ ((partnatts) * (step_id) + (keyno)) -extern int make_partition_pruneinfo(struct PlannerInfo *root, - struct RelOptInfo *parentrel, - List *subpaths, - List *prunequal); +extern int make_partition_pruneinfo(struct PlannerInfo *root, + struct RelOptInfo *parentrel, + List *subpaths, + List *prunequal); extern Bitmapset *prune_append_rel_partitions(struct RelOptInfo *rel); extern Bitmapset *get_matching_partitions(PartitionPruneContext *context, List *pruning_steps); diff --git a/src/include/port/win32ntdll.h b/src/include/port/win32ntdll.h index 18ff6f4b41..1ce9360ec1 100644 --- a/src/include/port/win32ntdll.h +++ b/src/include/port/win32ntdll.h @@ -21,9 +21,9 @@ #define FLUSH_FLAGS_FILE_DATA_SYNC_ONLY 0x4 #endif -typedef NTSTATUS (__stdcall *RtlGetLastNtStatus_t) (void); -typedef ULONG (__stdcall *RtlNtStatusToDosError_t) (NTSTATUS); -typedef NTSTATUS (__stdcall *NtFlushBuffersFileEx_t) (HANDLE, ULONG, PVOID, ULONG, PIO_STATUS_BLOCK); +typedef NTSTATUS (__stdcall * RtlGetLastNtStatus_t) (void); +typedef ULONG (__stdcall * RtlNtStatusToDosError_t) (NTSTATUS); +typedef NTSTATUS (__stdcall * NtFlushBuffersFileEx_t) (HANDLE, ULONG, PVOID, ULONG, PIO_STATUS_BLOCK); extern PGDLLIMPORT RtlGetLastNtStatus_t pg_RtlGetLastNtStatus; extern PGDLLIMPORT RtlNtStatusToDosError_t pg_RtlNtStatusToDosError; diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index e37f5120eb..1b9db22acb 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -26,7 +26,7 @@ typedef enum { LOGICAL_REP_MODE_BUFFERED, LOGICAL_REP_MODE_IMMEDIATE -} LogicalRepMode; +} LogicalRepMode; /* an individual tuple, stored in one chunk of memory */ typedef struct ReorderBufferTupleBuf diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h index 6ab00daa2e..0f5fb6be00 100644 --- a/src/include/storage/bufmgr.h +++ b/src/include/storage/bufmgr.h @@ -89,7 +89,7 @@ typedef enum ExtendBufferedFlags /* internal flags follow */ EB_LOCK_TARGET = (1 << 5), -} ExtendBufferedFlags; +} ExtendBufferedFlags; /* * To identify the relation - either relation or smgr + relpersistence has to diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h index 6ae434596a..8575bea25c 100644 --- a/src/include/storage/lock.h +++ b/src/include/storage/lock.h @@ -314,7 +314,7 @@ typedef struct LOCK LOCKMASK grantMask; /* bitmask for lock types already granted */ LOCKMASK waitMask; /* bitmask for lock types awaited */ dlist_head procLocks; /* list of PROCLOCK objects assoc. with lock */ - dclist_head waitProcs; /* list of PGPROC objects waiting on lock */ + dclist_head waitProcs; /* list of PGPROC objects waiting on lock */ int requested[MAX_LOCKMODES]; /* counts of requested locks */ int nRequested; /* total of requested[] array */ int granted[MAX_LOCKMODES]; /* counts of granted locks */ diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index d2c7afb8f4..34169e5889 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -26,10 +26,11 @@ struct PGPROC; /* what state of the wait process is a backend in */ typedef enum LWLockWaitState { - LW_WS_NOT_WAITING, /* not currently waiting / woken up */ - LW_WS_WAITING, /* currently waiting */ - LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet signalled */ -} LWLockWaitState; + LW_WS_NOT_WAITING, /* not currently waiting / woken up */ + LW_WS_WAITING, /* currently waiting */ + LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet + * signalled */ +} LWLockWaitState; /* * Code outside of lwlock.c should not manipulate the contents of this diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h index 142a195d0e..93f84500bf 100644 --- a/src/include/storage/predicate_internals.h +++ b/src/include/storage/predicate_internals.h @@ -196,7 +196,7 @@ typedef struct RWConflictData dlist_node inLink; /* link for list of conflicts in to a sxact */ SERIALIZABLEXACT *sxactOut; SERIALIZABLEXACT *sxactIn; -} RWConflictData; +} RWConflictData; typedef struct RWConflictData *RWConflict; diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 4258cd92c9..ef74f32693 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -387,11 +387,11 @@ typedef struct PROC_HDR /* Head of list of free PGPROC structures */ dlist_head freeProcs; /* Head of list of autovacuum's free PGPROC structures */ - dlist_head autovacFreeProcs; + dlist_head autovacFreeProcs; /* Head of list of bgworker free PGPROC structures */ - dlist_head bgworkerFreeProcs; + dlist_head bgworkerFreeProcs; /* Head of list of walsender free PGPROC structures */ - dlist_head walsenderFreeProcs; + dlist_head walsenderFreeProcs; /* First pgproc waiting for group XID clear */ pg_atomic_uint32 procArrayGroupFirst; /* First pgproc waiting for group transaction status update */ diff --git a/src/include/utils/backend_status.h b/src/include/utils/backend_status.h index 9651cb1d0c..cfb26d2bcc 100644 --- a/src/include/utils/backend_status.h +++ b/src/include/utils/backend_status.h @@ -271,13 +271,13 @@ typedef struct LocalPgBackendStatus /* * Number of cached subtransactions in the current session. */ - int backend_subxact_count; + int backend_subxact_count; /* * The number of subtransactions in the current session which exceeded the * cached subtransaction limit. */ - bool backend_subxact_overflowed; + bool backend_subxact_overflowed; } LocalPgBackendStatus; diff --git a/src/include/utils/pg_locale.h b/src/include/utils/pg_locale.h index 03ab598215..e2a7243542 100644 --- a/src/include/utils/pg_locale.h +++ b/src/include/utils/pg_locale.h @@ -40,7 +40,7 @@ extern PGDLLIMPORT char *locale_messages; extern PGDLLIMPORT char *locale_monetary; extern PGDLLIMPORT char *locale_numeric; extern PGDLLIMPORT char *locale_time; -extern PGDLLIMPORT int icu_validation_level; +extern PGDLLIMPORT int icu_validation_level; /* lc_time localization cache */ extern PGDLLIMPORT char *localized_abbrev_days[]; @@ -49,7 +49,7 @@ extern PGDLLIMPORT char *localized_abbrev_months[]; extern PGDLLIMPORT char *localized_full_months[]; /* is the databases's LC_CTYPE the C locale? */ -extern PGDLLIMPORT bool database_ctype_is_c; +extern PGDLLIMPORT bool database_ctype_is_c; extern bool check_locale(int category, const char *locale, char **canonname); extern char *pg_perm_setlocale(int category, const char *locale); @@ -104,9 +104,9 @@ extern bool pg_locale_deterministic(pg_locale_t locale); extern pg_locale_t pg_newlocale_from_collation(Oid collid); extern char *get_collation_actual_version(char collprovider, const char *collcollate); -extern int pg_strcoll(const char *arg1, const char *arg2, pg_locale_t locale); -extern int pg_strncoll(const char *arg1, size_t len1, - const char *arg2, size_t len2, pg_locale_t locale); +extern int pg_strcoll(const char *arg1, const char *arg2, pg_locale_t locale); +extern int pg_strncoll(const char *arg1, size_t len1, + const char *arg2, size_t len2, pg_locale_t locale); extern bool pg_strxfrm_enabled(pg_locale_t locale); extern size_t pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale); diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 31f84e90eb..1426a353cd 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -161,7 +161,7 @@ typedef struct RelationData Bitmapset *rd_keyattr; /* cols that can be ref'd by foreign keys */ Bitmapset *rd_pkattr; /* cols included in primary key */ Bitmapset *rd_idattr; /* included in replica identity index */ - Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */ + Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */ Bitmapset *rd_summarizedattr; /* cols indexed by summarizing indexes */ PublicationDesc *rd_pubdesc; /* publication descriptor, or NULL */ diff --git a/src/include/utils/varlena.h b/src/include/utils/varlena.h index e72ebaddbf..77f5b24735 100644 --- a/src/include/utils/varlena.h +++ b/src/include/utils/varlena.h @@ -44,7 +44,7 @@ typedef struct ClosestMatchState int min_d; int max_d; const char *match; -} ClosestMatchState; +} ClosestMatchState; extern void initClosestMatch(ClosestMatchState *state, const char *source, int max_d); extern void updateClosestMatch(ClosestMatchState *state, const char *candidate); diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c index 7036e7c48d..fa56276758 100644 --- a/src/interfaces/ecpg/ecpglib/data.c +++ b/src/interfaces/ecpg/ecpglib/data.c @@ -521,7 +521,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, case ECPGt_bytea: { struct ECPGgeneric_bytea *variable = - (struct ECPGgeneric_bytea *) (var + offset * act_tuple); + (struct ECPGgeneric_bytea *) (var + offset * act_tuple); long dst_size, src_size, dec_size; @@ -690,7 +690,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, case ECPGt_varchar: { struct ECPGgeneric_varchar *variable = - (struct ECPGgeneric_varchar *) (var + offset * act_tuple); + (struct ECPGgeneric_varchar *) (var + offset * act_tuple); variable->len = size; if (varcharsize == 0) diff --git a/src/interfaces/ecpg/ecpglib/descriptor.c b/src/interfaces/ecpg/ecpglib/descriptor.c index 649a71c286..883a210a81 100644 --- a/src/interfaces/ecpg/ecpglib/descriptor.c +++ b/src/interfaces/ecpg/ecpglib/descriptor.c @@ -210,7 +210,7 @@ get_char_item(int lineno, void *var, enum ECPGttype vartype, char *value, int va case ECPGt_varchar: { struct ECPGgeneric_varchar *variable = - (struct ECPGgeneric_varchar *) var; + (struct ECPGgeneric_varchar *) var; if (varcharsize == 0) memcpy(variable->arr, value, strlen(value)); @@ -597,7 +597,7 @@ set_desc_attr(struct descriptor_item *desc_item, struct variable *var, else { struct ECPGgeneric_bytea *variable = - (struct ECPGgeneric_bytea *) (var->value); + (struct ECPGgeneric_bytea *) (var->value); desc_item->is_binary = true; desc_item->data_len = variable->len; diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c index 641851983d..93926fd4fb 100644 --- a/src/interfaces/ecpg/ecpglib/execute.c +++ b/src/interfaces/ecpg/ecpglib/execute.c @@ -820,7 +820,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari case ECPGt_bytea: { struct ECPGgeneric_bytea *variable = - (struct ECPGgeneric_bytea *) (var->value); + (struct ECPGgeneric_bytea *) (var->value); if (!(mallocedval = (char *) ecpg_alloc(variable->len, lineno))) return false; @@ -833,7 +833,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari case ECPGt_varchar: { struct ECPGgeneric_varchar *variable = - (struct ECPGgeneric_varchar *) (var->value); + (struct ECPGgeneric_varchar *) (var->value); if (!(newcopy = (char *) ecpg_alloc(variable->len + 1, lineno))) return false; diff --git a/src/interfaces/ecpg/include/pgtypes_interval.h b/src/interfaces/ecpg/include/pgtypes_interval.h index 8471b609db..2809b356f7 100644 --- a/src/interfaces/ecpg/include/pgtypes_interval.h +++ b/src/interfaces/ecpg/include/pgtypes_interval.h @@ -36,10 +36,10 @@ extern "C" #endif extern interval * PGTYPESinterval_new(void); -extern void PGTYPESinterval_free(interval *intvl); +extern void PGTYPESinterval_free(interval * intvl); extern interval * PGTYPESinterval_from_asc(char *str, char **endptr); -extern char *PGTYPESinterval_to_asc(interval *span); -extern int PGTYPESinterval_copy(interval *intvlsrc, interval *intvldest); +extern char *PGTYPESinterval_to_asc(interval * span); +extern int PGTYPESinterval_copy(interval * intvlsrc, interval * intvldest); #ifdef __cplusplus } diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h b/src/interfaces/ecpg/pgtypeslib/dt.h index 1ec38791f8..00a45799d5 100644 --- a/src/interfaces/ecpg/pgtypeslib/dt.h +++ b/src/interfaces/ecpg/pgtypeslib/dt.h @@ -315,7 +315,7 @@ int DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct tm *tm int DecodeTime(char *str, int *tmask, struct tm *tm, fsec_t *fsec); void EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates); void EncodeInterval(struct tm *tm, fsec_t fsec, int style, char *str); -int tm2timestamp(struct tm *tm, fsec_t fsec, int *tzp, timestamp *result); +int tm2timestamp(struct tm *tm, fsec_t fsec, int *tzp, timestamp * result); int DecodeUnits(int field, char *lowtoken, int *val); bool CheckDateTokenTables(void); void EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates); diff --git a/src/interfaces/ecpg/pgtypeslib/interval.c b/src/interfaces/ecpg/pgtypeslib/interval.c index dc083c1327..936a688381 100644 --- a/src/interfaces/ecpg/pgtypeslib/interval.c +++ b/src/interfaces/ecpg/pgtypeslib/interval.c @@ -780,17 +780,17 @@ EncodeInterval(struct /* pg_ */ tm *tm, fsec_t fsec, int style, char *str) case INTSTYLE_SQL_STANDARD: { bool has_negative = year < 0 || mon < 0 || - mday < 0 || hour < 0 || - min < 0 || sec < 0 || fsec < 0; + mday < 0 || hour < 0 || + min < 0 || sec < 0 || fsec < 0; bool has_positive = year > 0 || mon > 0 || - mday > 0 || hour > 0 || - min > 0 || sec > 0 || fsec > 0; + mday > 0 || hour > 0 || + min > 0 || sec > 0 || fsec > 0; bool has_year_month = year != 0 || mon != 0; bool has_day_time = mday != 0 || hour != 0 || - min != 0 || sec != 0 || fsec != 0; + min != 0 || sec != 0 || fsec != 0; bool has_day = mday != 0; bool sql_standard_value = !(has_negative && has_positive) && - !(has_year_month && has_day_time); + !(has_year_month && has_day_time); /* * SQL Standard wants only 1 "<sign>" preceding the whole diff --git a/src/interfaces/ecpg/preproc/type.c b/src/interfaces/ecpg/preproc/type.c index 58119d1102..91adb89de9 100644 --- a/src/interfaces/ecpg/preproc/type.c +++ b/src/interfaces/ecpg/preproc/type.c @@ -78,7 +78,7 @@ ECPGmake_struct_member(const char *name, struct ECPGtype *type, struct ECPGstruc { struct ECPGstruct_member *ptr, *ne = - (struct ECPGstruct_member *) mm_alloc(sizeof(struct ECPGstruct_member)); + (struct ECPGstruct_member *) mm_alloc(sizeof(struct ECPGstruct_member)); ne->name = mm_strdup(name); ne->type = type; diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index c713d11d30..8f7ee472af 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -1051,9 +1051,9 @@ libpq_prng_init(PGconn *conn) gettimeofday(&tval, NULL); rseed = ((uintptr_t) conn) ^ - ((uint64) getpid()) ^ - ((uint64) tval.tv_usec) ^ - ((uint64) tval.tv_sec); + ((uint64) getpid()) ^ + ((uint64) tval.tv_usec) ^ + ((uint64) tval.tv_sec); pg_prng_seed(&conn->prng_state, rseed); } diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index a16bbf32ef..14d706efd5 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -1448,7 +1448,7 @@ PQsendQueryInternal(PGconn *conn, const char *query, bool newQuery) if (conn->pipelineStatus != PQ_PIPELINE_OFF) { libpq_append_conn_error(conn, "%s not allowed in pipeline mode", - "PQsendQuery"); + "PQsendQuery"); return 0; } @@ -1516,7 +1516,7 @@ PQsendQueryParams(PGconn *conn, if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT) { libpq_append_conn_error(conn, "number of parameters must be between 0 and %d", - PQ_QUERY_PARAM_MAX_LIMIT); + PQ_QUERY_PARAM_MAX_LIMIT); return 0; } @@ -1562,7 +1562,7 @@ PQsendPrepare(PGconn *conn, if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT) { libpq_append_conn_error(conn, "number of parameters must be between 0 and %d", - PQ_QUERY_PARAM_MAX_LIMIT); + PQ_QUERY_PARAM_MAX_LIMIT); return 0; } @@ -1656,7 +1656,7 @@ PQsendQueryPrepared(PGconn *conn, if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT) { libpq_append_conn_error(conn, "number of parameters must be between 0 and %d", - PQ_QUERY_PARAM_MAX_LIMIT); + PQ_QUERY_PARAM_MAX_LIMIT); return 0; } @@ -2103,10 +2103,9 @@ PQgetResult(PGconn *conn) /* * We're about to return the NULL that terminates the round of - * results from the current query; prepare to send the results - * of the next query, if any, when we're called next. If there's - * no next element in the command queue, this gets us in IDLE - * state. + * results from the current query; prepare to send the results of + * the next query, if any, when we're called next. If there's no + * next element in the command queue, this gets us in IDLE state. */ pqPipelineProcessQueue(conn); res = NULL; /* query is complete */ @@ -3051,6 +3050,7 @@ pqPipelineProcessQueue(PGconn *conn) return; case PGASYNC_IDLE: + /* * If we're in IDLE mode and there's some command in the queue, * get us into PIPELINE_IDLE mode and process normally. Otherwise diff --git a/src/interfaces/libpq/fe-lobj.c b/src/interfaces/libpq/fe-lobj.c index 4cb6a46859..206266fd04 100644 --- a/src/interfaces/libpq/fe-lobj.c +++ b/src/interfaces/libpq/fe-lobj.c @@ -142,7 +142,7 @@ lo_truncate(PGconn *conn, int fd, size_t len) if (conn->lobjfuncs->fn_lo_truncate == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_truncate"); + "lo_truncate"); return -1; } @@ -205,7 +205,7 @@ lo_truncate64(PGconn *conn, int fd, pg_int64 len) if (conn->lobjfuncs->fn_lo_truncate64 == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_truncate64"); + "lo_truncate64"); return -1; } @@ -395,7 +395,7 @@ lo_lseek64(PGconn *conn, int fd, pg_int64 offset, int whence) if (conn->lobjfuncs->fn_lo_lseek64 == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_lseek64"); + "lo_lseek64"); return -1; } @@ -485,7 +485,7 @@ lo_create(PGconn *conn, Oid lobjId) if (conn->lobjfuncs->fn_lo_create == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_create"); + "lo_create"); return InvalidOid; } @@ -558,7 +558,7 @@ lo_tell64(PGconn *conn, int fd) if (conn->lobjfuncs->fn_lo_tell64 == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_tell64"); + "lo_tell64"); return -1; } @@ -667,7 +667,7 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) if (fd < 0) { /* error */ libpq_append_conn_error(conn, "could not open file \"%s\": %s", - filename, strerror_r(errno, sebuf, sizeof(sebuf))); + filename, strerror_r(errno, sebuf, sizeof(sebuf))); return InvalidOid; } @@ -723,8 +723,8 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) /* deliberately overwrite any error from lo_close */ pqClearConnErrorState(conn); libpq_append_conn_error(conn, "could not read from file \"%s\": %s", - filename, - strerror_r(save_errno, sebuf, sizeof(sebuf))); + filename, + strerror_r(save_errno, sebuf, sizeof(sebuf))); return InvalidOid; } @@ -778,8 +778,8 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) /* deliberately overwrite any error from lo_close */ pqClearConnErrorState(conn); libpq_append_conn_error(conn, "could not open file \"%s\": %s", - filename, - strerror_r(save_errno, sebuf, sizeof(sebuf))); + filename, + strerror_r(save_errno, sebuf, sizeof(sebuf))); return -1; } @@ -799,8 +799,8 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) /* deliberately overwrite any error from lo_close */ pqClearConnErrorState(conn); libpq_append_conn_error(conn, "could not write to file \"%s\": %s", - filename, - strerror_r(save_errno, sebuf, sizeof(sebuf))); + filename, + strerror_r(save_errno, sebuf, sizeof(sebuf))); return -1; } } @@ -822,7 +822,7 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) if (close(fd) != 0 && result >= 0) { libpq_append_conn_error(conn, "could not write to file \"%s\": %s", - filename, strerror_r(errno, sebuf, sizeof(sebuf))); + filename, strerror_r(errno, sebuf, sizeof(sebuf))); result = -1; } @@ -954,56 +954,56 @@ lo_initialize(PGconn *conn) if (lobjfuncs->fn_lo_open == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_open"); + "lo_open"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_close == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_close"); + "lo_close"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_creat == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_creat"); + "lo_creat"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_unlink == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_unlink"); + "lo_unlink"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_lseek == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_lseek"); + "lo_lseek"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_tell == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_tell"); + "lo_tell"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_read == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "loread"); + "loread"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_write == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lowrite"); + "lowrite"); free(lobjfuncs); return -1; } diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c index 3653a1a8a6..660cdec93c 100644 --- a/src/interfaces/libpq/fe-misc.c +++ b/src/interfaces/libpq/fe-misc.c @@ -749,8 +749,8 @@ retry4: */ definitelyEOF: libpq_append_conn_error(conn, "server closed the connection unexpectedly\n" - "\tThis probably means the server terminated abnormally\n" - "\tbefore or while processing the request."); + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request."); /* Come here if lower-level code already set a suitable errorMessage */ definitelyFailed: @@ -1067,7 +1067,7 @@ pqSocketCheck(PGconn *conn, int forRead, int forWrite, time_t end_time) char sebuf[PG_STRERROR_R_BUFLEN]; libpq_append_conn_error(conn, "%s() failed: %s", "select", - SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); + SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); } return result; @@ -1280,7 +1280,7 @@ libpq_ngettext(const char *msgid, const char *msgid_plural, unsigned long n) * newline. */ void -libpq_append_error(PQExpBuffer errorMessage, const char *fmt, ...) +libpq_append_error(PQExpBuffer errorMessage, const char *fmt,...) { int save_errno = errno; bool done; @@ -1309,7 +1309,7 @@ libpq_append_error(PQExpBuffer errorMessage, const char *fmt, ...) * format should not end with a newline. */ void -libpq_append_conn_error(PGconn *conn, const char *fmt, ...) +libpq_append_conn_error(PGconn *conn, const char *fmt,...) { int save_errno = errno; bool done; diff --git a/src/interfaces/libpq/fe-print.c b/src/interfaces/libpq/fe-print.c index bd60543c03..40620b47e9 100644 --- a/src/interfaces/libpq/fe-print.c +++ b/src/interfaces/libpq/fe-print.c @@ -124,7 +124,7 @@ PQprint(FILE *fout, const PGresult *res, const PQprintOpt *po) { int len; const char *s = (j < numFieldName && po->fieldName[j][0]) ? - po->fieldName[j] : PQfname(res, j); + po->fieldName[j] : PQfname(res, j); fieldNames[j] = s; len = s ? strlen(s) : 0; diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index 8ab6a88416..b79d74f748 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -466,7 +466,7 @@ static void handleSyncLoss(PGconn *conn, char id, int msgLength) { libpq_append_conn_error(conn, "lost synchronization with server: got message type \"%c\", length %d", - id, msgLength); + id, msgLength); /* build an error result holding the error message */ pqSaveErrorResult(conn); conn->asyncStatus = PGASYNC_READY; /* drop out of PQgetResult wait loop */ diff --git a/src/interfaces/libpq/fe-secure-common.c b/src/interfaces/libpq/fe-secure-common.c index de115b3764..3ecc7bf615 100644 --- a/src/interfaces/libpq/fe-secure-common.c +++ b/src/interfaces/libpq/fe-secure-common.c @@ -226,7 +226,7 @@ pq_verify_peer_name_matches_certificate_ip(PGconn *conn, * wrong given the subject matter. */ libpq_append_conn_error(conn, "certificate contains IP address with invalid length %zu", - iplen); + iplen); return -1; } @@ -235,7 +235,7 @@ pq_verify_peer_name_matches_certificate_ip(PGconn *conn, if (!addrstr) { libpq_append_conn_error(conn, "could not convert certificate's IP address to string: %s", - strerror_r(errno, sebuf, sizeof(sebuf))); + strerror_r(errno, sebuf, sizeof(sebuf))); return -1; } @@ -292,7 +292,7 @@ pq_verify_peer_name_matches_certificate(PGconn *conn) else if (names_examined == 1) { libpq_append_conn_error(conn, "server certificate for \"%s\" does not match host name \"%s\"", - first_name, host); + first_name, host); } else { diff --git a/src/interfaces/libpq/fe-secure-gssapi.c b/src/interfaces/libpq/fe-secure-gssapi.c index 95ded9eeaa..3b2d0fd140 100644 --- a/src/interfaces/libpq/fe-secure-gssapi.c +++ b/src/interfaces/libpq/fe-secure-gssapi.c @@ -213,8 +213,8 @@ pg_GSS_write(PGconn *conn, const void *ptr, size_t len) if (output.length > PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32)) { libpq_append_conn_error(conn, "client tried to send oversize GSSAPI packet (%zu > %zu)", - (size_t) output.length, - PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32)); + (size_t) output.length, + PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32)); errno = EIO; /* for lack of a better idea */ goto cleanup; } @@ -349,8 +349,8 @@ pg_GSS_read(PGconn *conn, void *ptr, size_t len) if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)) { libpq_append_conn_error(conn, "oversize GSSAPI packet sent by the server (%zu > %zu)", - (size_t) input.length, - PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)); + (size_t) input.length, + PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)); errno = EIO; /* for lack of a better idea */ return -1; } @@ -591,8 +591,8 @@ pqsecure_open_gss(PGconn *conn) if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)) { libpq_append_conn_error(conn, "oversize GSSAPI packet sent by the server (%zu > %zu)", - (size_t) input.length, - PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)); + (size_t) input.length, + PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)); return PGRES_POLLING_FAILED; } diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index 470e926540..390c888c96 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -213,12 +213,12 @@ rloop: if (result_errno == EPIPE || result_errno == ECONNRESET) libpq_append_conn_error(conn, "server closed the connection unexpectedly\n" - "\tThis probably means the server terminated abnormally\n" - "\tbefore or while processing the request."); + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request."); else libpq_append_conn_error(conn, "SSL SYSCALL error: %s", - SOCK_STRERROR(result_errno, - sebuf, sizeof(sebuf))); + SOCK_STRERROR(result_errno, + sebuf, sizeof(sebuf))); } else { @@ -313,12 +313,12 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len) result_errno = SOCK_ERRNO; if (result_errno == EPIPE || result_errno == ECONNRESET) libpq_append_conn_error(conn, "server closed the connection unexpectedly\n" - "\tThis probably means the server terminated abnormally\n" - "\tbefore or while processing the request."); + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request."); else libpq_append_conn_error(conn, "SSL SYSCALL error: %s", - SOCK_STRERROR(result_errno, - sebuf, sizeof(sebuf))); + SOCK_STRERROR(result_errno, + sebuf, sizeof(sebuf))); } else { @@ -415,7 +415,7 @@ pgtls_get_peer_certificate_hash(PGconn *conn, size_t *len) if (algo_type == NULL) { libpq_append_conn_error(conn, "could not find digest for NID %s", - OBJ_nid2sn(algo_nid)); + OBJ_nid2sn(algo_nid)); return NULL; } break; @@ -1000,7 +1000,7 @@ initialize_SSL(PGconn *conn) if (ssl_min_ver == -1) { libpq_append_conn_error(conn, "invalid value \"%s\" for minimum SSL protocol version", - conn->ssl_min_protocol_version); + conn->ssl_min_protocol_version); SSL_CTX_free(SSL_context); return -1; } @@ -1026,7 +1026,7 @@ initialize_SSL(PGconn *conn) if (ssl_max_ver == -1) { libpq_append_conn_error(conn, "invalid value \"%s\" for maximum SSL protocol version", - conn->ssl_max_protocol_version); + conn->ssl_max_protocol_version); SSL_CTX_free(SSL_context); return -1; } @@ -1091,7 +1091,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not read root certificate file \"%s\": %s", - fnbuf, err); + fnbuf, err); SSLerrfree(err); SSL_CTX_free(SSL_context); return -1; @@ -1161,7 +1161,7 @@ initialize_SSL(PGconn *conn) else fnbuf[0] = '\0'; - if (conn->sslcertmode[0] == 'd') /* disable */ + if (conn->sslcertmode[0] == 'd') /* disable */ { /* don't send a client cert even if we have one */ have_cert = false; @@ -1181,7 +1181,7 @@ initialize_SSL(PGconn *conn) if (errno != ENOENT && errno != ENOTDIR) { libpq_append_conn_error(conn, "could not open certificate file \"%s\": %s", - fnbuf, strerror_r(errno, sebuf, sizeof(sebuf))); + fnbuf, strerror_r(errno, sebuf, sizeof(sebuf))); SSL_CTX_free(SSL_context); return -1; } @@ -1199,7 +1199,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not read certificate file \"%s\": %s", - fnbuf, err); + fnbuf, err); SSLerrfree(err); SSL_CTX_free(SSL_context); return -1; @@ -1298,7 +1298,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not load SSL engine \"%s\": %s", - engine_str, err); + engine_str, err); SSLerrfree(err); free(engine_str); return -1; @@ -1309,7 +1309,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not initialize SSL engine \"%s\": %s", - engine_str, err); + engine_str, err); SSLerrfree(err); ENGINE_free(conn->engine); conn->engine = NULL; @@ -1324,7 +1324,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not read private SSL key \"%s\" from engine \"%s\": %s", - engine_colon, engine_str, err); + engine_colon, engine_str, err); SSLerrfree(err); ENGINE_finish(conn->engine); ENGINE_free(conn->engine); @@ -1337,7 +1337,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not load private SSL key \"%s\" from engine \"%s\": %s", - engine_colon, engine_str, err); + engine_colon, engine_str, err); SSLerrfree(err); ENGINE_finish(conn->engine); ENGINE_free(conn->engine); @@ -1374,10 +1374,10 @@ initialize_SSL(PGconn *conn) { if (errno == ENOENT) libpq_append_conn_error(conn, "certificate present, but not private key file \"%s\"", - fnbuf); + fnbuf); else libpq_append_conn_error(conn, "could not stat private key file \"%s\": %m", - fnbuf); + fnbuf); return -1; } @@ -1385,7 +1385,7 @@ initialize_SSL(PGconn *conn) if (!S_ISREG(buf.st_mode)) { libpq_append_conn_error(conn, "private key file \"%s\" is not a regular file", - fnbuf); + fnbuf); return -1; } @@ -1442,7 +1442,7 @@ initialize_SSL(PGconn *conn) if (SSL_use_PrivateKey_file(conn->ssl, fnbuf, SSL_FILETYPE_ASN1) != 1) { libpq_append_conn_error(conn, "could not load private key file \"%s\": %s", - fnbuf, err); + fnbuf, err); SSLerrfree(err); return -1; } @@ -1458,7 +1458,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "certificate does not match private key file \"%s\": %s", - fnbuf, err); + fnbuf, err); SSLerrfree(err); return -1; } @@ -1520,8 +1520,8 @@ open_client_SSL(PGconn *conn) * it means that verification failed due to a missing * system CA pool without it being a protocol error. We * inspect the sslrootcert setting to ensure that the user - * was using the system CA pool. For other errors, log them - * using the normal SYSCALL logging. + * was using the system CA pool. For other errors, log + * them using the normal SYSCALL logging. */ if (!save_errno && vcode == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY && strcmp(conn->sslrootcert, "system") == 0) @@ -1529,7 +1529,7 @@ open_client_SSL(PGconn *conn) X509_verify_cert_error_string(vcode)); else if (r == -1) libpq_append_conn_error(conn, "SSL SYSCALL error: %s", - SOCK_STRERROR(save_errno, sebuf, sizeof(sebuf))); + SOCK_STRERROR(save_errno, sebuf, sizeof(sebuf))); else libpq_append_conn_error(conn, "SSL SYSCALL error: EOF detected"); pgtls_close(conn); @@ -1571,12 +1571,12 @@ open_client_SSL(PGconn *conn) case SSL_R_VERSION_TOO_LOW: #endif libpq_append_conn_error(conn, "This may indicate that the server does not support any SSL protocol version between %s and %s.", - conn->ssl_min_protocol_version ? - conn->ssl_min_protocol_version : - MIN_OPENSSL_TLS_VERSION, - conn->ssl_max_protocol_version ? - conn->ssl_max_protocol_version : - MAX_OPENSSL_TLS_VERSION); + conn->ssl_min_protocol_version ? + conn->ssl_min_protocol_version : + MIN_OPENSSL_TLS_VERSION, + conn->ssl_max_protocol_version ? + conn->ssl_max_protocol_version : + MAX_OPENSSL_TLS_VERSION); break; default: break; diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c index 66e401bf3d..8069e38142 100644 --- a/src/interfaces/libpq/fe-secure.c +++ b/src/interfaces/libpq/fe-secure.c @@ -255,14 +255,14 @@ pqsecure_raw_read(PGconn *conn, void *ptr, size_t len) case EPIPE: case ECONNRESET: libpq_append_conn_error(conn, "server closed the connection unexpectedly\n" - "\tThis probably means the server terminated abnormally\n" - "\tbefore or while processing the request."); + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request."); break; default: libpq_append_conn_error(conn, "could not receive data from server: %s", - SOCK_STRERROR(result_errno, - sebuf, sizeof(sebuf))); + SOCK_STRERROR(result_errno, + sebuf, sizeof(sebuf))); break; } } diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index ce0167c1b6..16321aed25 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -919,8 +919,8 @@ extern char *libpq_ngettext(const char *msgid, const char *msgid_plural, unsigne */ #undef _ -extern void libpq_append_error(PQExpBuffer errorMessage, const char *fmt, ...) pg_attribute_printf(2, 3); -extern void libpq_append_conn_error(PGconn *conn, const char *fmt, ...) pg_attribute_printf(2, 3); +extern void libpq_append_error(PQExpBuffer errorMessage, const char *fmt,...) pg_attribute_printf(2, 3); +extern void libpq_append_conn_error(PGconn *conn, const char *fmt,...) pg_attribute_printf(2, 3); /* * These macros are needed to let error-handling code be portable between diff --git a/src/port/dirmod.c b/src/port/dirmod.c index 6557cf8785..07dd190cbc 100644 --- a/src/port/dirmod.c +++ b/src/port/dirmod.c @@ -145,10 +145,10 @@ pgunlink(const char *path) * the retry loop, but that seems like over-engineering for now. * * In the special case of a STATUS_DELETE_PENDING error (file already - * unlinked, but someone still has it open), we don't want to report ENOENT - * to the caller immediately, because rmdir(parent) would probably fail. - * We want to wait until the file truly goes away so that simple recursive - * directory unlink algorithms work. + * unlinked, but someone still has it open), we don't want to report + * ENOENT to the caller immediately, because rmdir(parent) would probably + * fail. We want to wait until the file truly goes away so that simple + * recursive directory unlink algorithms work. */ if (lstat(path, &st) < 0) { diff --git a/src/test/modules/libpq_pipeline/libpq_pipeline.c b/src/test/modules/libpq_pipeline/libpq_pipeline.c index f48da7d963..f5b4d4d1ff 100644 --- a/src/test/modules/libpq_pipeline/libpq_pipeline.c +++ b/src/test/modules/libpq_pipeline/libpq_pipeline.c @@ -985,7 +985,7 @@ test_prepared(PGconn *conn) static void notice_processor(void *arg, const char *message) { - int *n_notices = (int *) arg; + int *n_notices = (int *) arg; (*n_notices)++; fprintf(stderr, "NOTICE %d: %s", *n_notices, message); diff --git a/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c b/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c index 1727910ce7..a304ba54bb 100644 --- a/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c +++ b/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c @@ -31,7 +31,7 @@ PG_MODULE_MAGIC; */ typedef struct xl_testcustomrmgrs_message { - Size message_size; /* size of the message */ + Size message_size; /* size of the message */ char message[FLEXIBLE_ARRAY_MEMBER]; /* payload */ } xl_testcustomrmgrs_message; diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c index b7c6f98577..82f937fca4 100644 --- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c +++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c @@ -318,6 +318,7 @@ get_altertable_subcmdinfo(PG_FUNCTION_ARGS) if (OidIsValid(sub->address.objectId)) { char *objdesc; + objdesc = getObjectDescription((const ObjectAddress *) &sub->address, false); values[1] = CStringGetTextDatum(objdesc); } diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index 48008fa8c3..6ee3f77053 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -85,14 +85,14 @@ typedef enum TAPtype TEST_STATUS, PLAN, NONE -} TAPtype; +} TAPtype; /* options settable from command line */ _stringlist *dblist = NULL; bool debug = false; char *inputdir = "."; char *outputdir = "."; -char *expecteddir = "."; +char *expecteddir = "."; char *bindir = PGBINDIR; char *launcher = NULL; static _stringlist *loadextension = NULL; diff --git a/src/timezone/zic.c b/src/timezone/zic.c index d6c5141923..d605c721ec 100644 --- a/src/timezone/zic.c +++ b/src/timezone/zic.c @@ -906,16 +906,16 @@ namecheck(const char *name) /* Benign characters in a portable file name. */ static char const benign[] = - "-/_" - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + "-/_" + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; /* * Non-control chars in the POSIX portable character set, excluding the * benign characters. */ static char const printable_and_not_benign[] = - " !\"#$%&'()*+,.0123456789:;<=>?@[\\]^`{|}~"; + " !\"#$%&'()*+,.0123456789:;<=>?@[\\]^`{|}~"; char const *component = name; @@ -3203,7 +3203,7 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) else if (jtime == ktime) { char const *dup_rules_msg = - _("two rules for same instant"); + _("two rules for same instant"); eats(zp->z_filename, zp->z_linenum, rp->r_filename, rp->r_linenum); diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index b4058b88c3..1aa98c6484 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -132,6 +132,7 @@ ArchiveModuleInit ArchiveModuleState ArchiveOpts ArchiveShutdownCB +ArchiveStartupCB ArchiveStreamState ArchiverOutput ArchiverStage @@ -240,6 +241,7 @@ Barrier BaseBackupCmd BaseBackupTargetHandle BaseBackupTargetType +BasicArchiveData BeginDirectModify_function BeginForeignInsert_function BeginForeignModify_function @@ -264,7 +266,6 @@ BitmapOr BitmapOrPath BitmapOrState Bitmapset -BlobInfo Block BlockId BlockIdData @@ -381,6 +382,7 @@ ClientData ClonePtrType ClosePortalStmt ClosePtrType +ClosestMatchState Clump ClusterInfo ClusterParams @@ -396,6 +398,7 @@ CoercionForm CoercionPathType CollAliasData CollInfo +CollParam CollateClause CollateExpr CollateStrength @@ -426,7 +429,6 @@ CommitTimestampShared CommonEntry CommonTableExpr CompareScalarsContext -CompiledExprState CompositeIOData CompositeTypeStmt CompoundAffixFlag @@ -540,16 +542,17 @@ DR_intorel DR_printtup DR_sqlfunction DR_transientrel -DSA DWORD DataDumperPtr DataPageDeleteStack DatabaseInfo DateADT +DateTimeErrorExtra Datum DatumTupleFields DbInfo DbInfoArr +DbLocaleInfo DeClonePtrType DeadLockState DeallocateStmt @@ -565,6 +568,7 @@ DeleteStmt DependencyGenerator DependencyGeneratorData DependencyType +DeserialIOData DestReceiver DictISpell DictInt @@ -626,13 +630,13 @@ Edge EditableObjectType ElementsState EnableTimeoutParams -EndBlobPtrType -EndBlobsPtrType EndDataPtrType EndDirectModify_function EndForeignInsert_function EndForeignModify_function EndForeignScan_function +EndLOPtrType +EndLOsPtrType EndOfWalRecoveryInfo EndSampleScan_function EnumItem @@ -660,7 +664,6 @@ ExceptionLabelMap ExceptionMap ExecAuxRowMark ExecEvalBoolSubroutine -ExecEvalJsonExprContext ExecEvalSubroutine ExecForeignBatchInsert_function ExecForeignDelete_function @@ -706,9 +709,9 @@ ExprEvalOp ExprEvalOpLookup ExprEvalRowtypeCache ExprEvalStep +ExprSetupInfo ExprState ExprStateEvalFunc -ExtendBufferedFlags ExtendBufferedWhat ExtensibleNode ExtensibleNodeEntry @@ -720,7 +723,6 @@ FDWCollateState FD_SET FILE FILETIME -FPI FSMAddress FSMPage FSMPageData @@ -943,6 +945,7 @@ GISTTYPE GIST_SPLITVEC GMReaderTupleBuffer GROUP +GUCHashEntry GV Gather GatherMerge @@ -1008,6 +1011,7 @@ GistVacState GlobalTransaction GlobalVisHorizonKind GlobalVisState +GrantRoleOptions GrantRoleStmt GrantStmt GrantTargetType @@ -1075,6 +1079,7 @@ HashInstrumentation HashJoin HashJoinState HashJoinTable +HashJoinTableData HashJoinTuple HashMemoryChunk HashMetaPage @@ -1110,14 +1115,16 @@ HistControl HotStandbyState I32 ICU_Convert_Func -ID INFIX +INT INT128 INTERFACE_INFO +IO IOContext IOFuncSelector IOObject IOOp +IO_STATUS_BLOCK IPCompareMethod ITEM IV @@ -1216,7 +1223,6 @@ IterateForeignScan_function IterateJsonStringValuesState JEntry JHashState -JOBOBJECTINFOCLASS JOBOBJECT_BASIC_LIMIT_INFORMATION JOBOBJECT_BASIC_UI_RESTRICTIONS JOBOBJECT_SECURITY_LIMIT_INFORMATION @@ -1229,38 +1235,30 @@ JitProviderReleaseContextCB JitProviderResetAfterErrorCB Join JoinCostWorkspace +JoinDomain JoinExpr JoinHashEntry JoinPath JoinPathExtraData JoinState +JoinTreeItem JoinType JsObject JsValue JsonAggConstructor JsonAggState -JsonArgument JsonArrayAgg JsonArrayConstructor JsonArrayQueryConstructor JsonBaseObjectInfo -JsonBehavior -JsonBehaviorType -JsonCoercion -JsonCommon JsonConstructorExpr JsonConstructorExprState JsonConstructorType JsonEncoding -JsonExpr -JsonExprOp JsonFormat JsonFormatType -JsonFunc -JsonFuncExpr JsonHashEntry JsonIsPredicate -JsonItemCoercions JsonIterateStringValuesAction JsonKeyValue JsonLexContext @@ -1275,10 +1273,8 @@ JsonObjectConstructor JsonOutput JsonParseContext JsonParseErrorType -JsonParseExpr JsonPath JsonPathBool -JsonPathDatatypeStatus JsonPathExecContext JsonPathExecResult JsonPathGinAddPathItemFunc @@ -1291,16 +1287,11 @@ JsonPathGinPathItem JsonPathItem JsonPathItemType JsonPathKeyword -JsonPathMutableContext JsonPathParseItem JsonPathParseResult JsonPathPredicateCallback JsonPathString -JsonPathVarCallback -JsonPathVariableEvalContext -JsonQuotes JsonReturning -JsonScalarExpr JsonSemAction JsonTokenType JsonTransformStringValuesAction @@ -1314,7 +1305,6 @@ JsonValueExpr JsonValueList JsonValueListIterator JsonValueType -JsonWrapper Jsonb JsonbAggState JsonbContainer @@ -1329,6 +1319,7 @@ JsonbTypeCategory JsonbValue JumbleState JunkFilter +KAXCompressReason KeyAction KeyActions KeyArray @@ -1340,24 +1331,6 @@ LDAPMessage LDAPURLDesc LDAP_TIMEVAL LINE -LLVMAttributeRef -LLVMBasicBlockRef -LLVMBuilderRef -LLVMIntPredicate -LLVMJitContext -LLVMJitHandle -LLVMMemoryBufferRef -LLVMModuleRef -LLVMOrcJITStackRef -LLVMOrcModuleHandle -LLVMOrcTargetAddress -LLVMPassManagerBuilderRef -LLVMPassManagerRef -LLVMSharedModuleRef -LLVMTargetMachineRef -LLVMTargetRef -LLVMTypeRef -LLVMValueRef LOCALLOCK LOCALLOCKOWNER LOCALLOCKTAG @@ -1370,12 +1343,9 @@ LOCKTAG LONG LONG_PTR LOOP +LPARAM LPBYTE -LPCTSTR LPCWSTR -LPDWORD -LPFILETIME -LPSECURITY_ATTRIBUTES LPSERVICE_STATUS LPSTR LPTHREAD_START_ROUTINE @@ -1391,18 +1361,17 @@ LWLock LWLockHandle LWLockMode LWLockPadded -LZ4CompressorState LZ4F_compressionContext_t LZ4F_decompressOptions_t LZ4F_decompressionContext_t LZ4F_errorCode_t LZ4F_preferences_t -LZ4File +LZ4State LabelProvider LagTracker LargeObjectDesc -LastAttnumInfo Latch +LauncherLastStartTimesEntry LerpFunc LexDescr LexemeEntry @@ -1423,6 +1392,7 @@ ListParsedLex ListenAction ListenActionKind ListenStmt +LoInfo LoadStmt LocalBufferLookupEnt LocalPgBackendStatus @@ -1479,7 +1449,6 @@ LogicalRepBeginData LogicalRepCommitData LogicalRepCommitPreparedTxnData LogicalRepCtxStruct -LogicalRepMode LogicalRepMsgType LogicalRepPartMapEntry LogicalRepPreparedTxnData @@ -1575,6 +1544,7 @@ MultirangeIOData MultirangeParseState MultirangeType NDBOX +NLSVERSIONINFOEX NODE NTSTATUS NUMCacheEntry @@ -1608,10 +1578,12 @@ NotificationList NotifyStmt Nsrt NtDllRoutine +NtFlushBuffersFileEx_t NullIfExpr NullTest NullTestType NullableDatum +NullingRelsMatch Numeric NumericAggState NumericDigit @@ -1670,7 +1642,7 @@ OprCacheKey OprInfo OprProofCacheEntry OprProofCacheKey -OutputContext +OuterJoinClauseInfo OutputPluginCallbacks OutputPluginOptions OutputPluginOutputType @@ -1680,7 +1652,6 @@ OverridingKind PACE_HEADER PACL PATH -PBOOL PCtxtHandle PERL_CONTEXT PERL_SI @@ -1743,10 +1714,9 @@ PGresAttValue PGresParamDesc PGresult PGresult_data -PHANDLE +PIO_STATUS_BLOCK PLAINTREE PLAssignStmt -PLUID_AND_ATTRIBUTES PLcword PLpgSQL_case_when PLpgSQL_condition @@ -1863,7 +1833,6 @@ PROCLOCK PROCLOCKTAG PROC_HDR PSID -PSID_AND_ATTRIBUTES PSQL_COMP_CASE PSQL_ECHO PSQL_ECHO_HIDDEN @@ -1872,7 +1841,6 @@ PTEntryArray PTIterationArray PTOKEN_PRIVILEGES PTOKEN_USER -PULONG PUTENVPROC PVIndStats PVIndVacStatus @@ -1972,6 +1940,7 @@ PartitionRangeDatum PartitionRangeDatumKind PartitionScheme PartitionSpec +PartitionStrategy PartitionTupleRouting PartitionedRelPruneInfo PartitionedRelPruningData @@ -1982,11 +1951,8 @@ PathClauseUsage PathCostComparison PathHashStack PathKey -PathKeyInfo PathKeysComparison PathTarget -PathkeyMutatorState -PathkeySortCost PatternInfo PatternInfoArray Pattern_Prefix_Status @@ -2022,6 +1988,7 @@ PgFdwModifyState PgFdwOption PgFdwPathExtraData PgFdwRelationInfo +PgFdwSamplingMethod PgFdwScanState PgIfAddrCallback PgStatShared_Archiver @@ -2105,13 +2072,11 @@ PortalStrategy PostParseColumnRefHook PostgresPollingStatusType PostingItem -PostponedQual PreParseColumnRefHook PredClass PredIterInfo PredIterInfoData PredXactList -PredXactListElement PredicateLockData PredicateLockTargetType PrefetchBufferResult @@ -2183,7 +2148,6 @@ QPRS_STATE QTN2QTState QTNode QUERYTYPE -QUERY_SECURITY_CONTEXT_TOKEN_FN QualCost QualItem Query @@ -2216,6 +2180,7 @@ RI_QueryKey RTEKind RTEPermissionInfo RWConflict +RWConflictData RWConflictPoolHeader Range RangeBound @@ -2251,7 +2216,8 @@ RecheckForeignScan_function RecordCacheEntry RecordCompareData RecordIOData -RecoveryLockListsEntry +RecoveryLockEntry +RecoveryLockXidEntry RecoveryPauseState RecoveryState RecoveryTargetTimeLineGoal @@ -2275,6 +2241,7 @@ ReindexStmt ReindexType RelFileLocator RelFileLocatorBackend +RelFileNumber RelIdCacheEnt RelInfo RelInfoArr @@ -2363,6 +2330,7 @@ ResultState ReturnSetInfo ReturnStmt RevmapContents +RevokeRoleGrantAction RewriteMappingDataEntry RewriteMappingFile RewriteRule @@ -2370,6 +2338,7 @@ RewriteState RmgrData RmgrDescData RmgrId +RoleNameEntry RoleNameItem RoleSpec RoleSpecType @@ -2384,6 +2353,7 @@ RowMarkType RowSecurityDesc RowSecurityPolicy RtlGetLastNtStatus_t +RtlNtStatusToDosError_t RuleInfo RuleLock RuleStmt @@ -2468,6 +2438,7 @@ SeqTable SeqTableData SerCommitSeqNo SerialControl +SerialIOData SerializableXactHandle SerializedActiveRelMaps SerializedClientConnectionInfo @@ -2608,9 +2579,9 @@ SplitTextOutputData SplitVar SplitedPageLayout StackElem -StartBlobPtrType -StartBlobsPtrType StartDataPtrType +StartLOPtrType +StartLOsPtrType StartReplicationCmd StartupStatusEnum StatEntry @@ -2658,6 +2629,7 @@ SubscriptionInfo SubscriptionRelState SupportRequestCost SupportRequestIndexCondition +SupportRequestOptimizeWindowClause SupportRequestRows SupportRequestSelectivity SupportRequestSimplify @@ -2674,6 +2646,7 @@ SyscacheCallbackFunction SystemRowsSamplerData SystemSamplerData SystemTimeSamplerData +TAPtype TAR_MEMBER TBMIterateResult TBMIteratingState @@ -2726,6 +2699,7 @@ TSVectorStat TState TStatus TStoreState +TU_UpdateIndexes TXNEntryFile TYPCATEGORY T_Action @@ -2878,12 +2852,10 @@ TypeCat TypeFuncClass TypeInfo TypeName -U U32 U8 UChar UCharIterator -UColAttribute UColAttributeValue UCollator UConverter @@ -2908,16 +2880,19 @@ UpdateStmt UpperRelationKind UpperUniquePath UserAuth +UserContext UserMapping UserOpts VacAttrStats VacAttrStatsP VacDeadItems VacErrPhase +VacObjFilter VacOptValue VacuumParams VacuumRelation VacuumStmt +ValidIOData ValidateIndexState ValuesScan ValuesScanState @@ -2938,6 +2913,8 @@ VariableSpace VariableStatData VariableSubstituteHook Variables +Vector32 +Vector8 VersionedQuery Vfd ViewCheckOption @@ -2954,7 +2931,6 @@ WALInsertLock WALInsertLockPadded WALOpenSegment WALReadError -WalRcvWakeupReason WALSegmentCloseCB WALSegmentContext WALSegmentOpenCB @@ -2984,6 +2960,7 @@ WalRcvExecResult WalRcvExecStatus WalRcvState WalRcvStreamOptions +WalRcvWakeupReason WalReceiverConn WalReceiverFunctionsType WalSnd @@ -2993,6 +2970,7 @@ WalSndState WalTimeSample WalUsage WalWriteMethod +WalWriteMethodOps Walfile WindowAgg WindowAggPath @@ -3091,17 +3069,16 @@ YYLTYPE YYSTYPE YY_BUFFER_STATE ZSTD_CCtx +ZSTD_CStream ZSTD_DCtx +ZSTD_DStream +ZSTD_cParameter ZSTD_inBuffer ZSTD_outBuffer +ZstdCompressorState _SPI_connection _SPI_plan -__AssignProcessToJobObject -__CreateJobObject -__CreateRestrictedToken -__IsProcessInJob -__QueryInformationJobObject -__SetInformationJobObject +__m128i __time64_t _dev_t _ino_t @@ -3109,8 +3086,8 @@ _locale_t _resultmap _stringlist acquireLocksOnSubLinks_context +add_nulling_relids_context adjust_appendrel_attrs_context -aff_regex_struct allocfunc amadjustmembers_function ambeginscan_function @@ -3137,6 +3114,7 @@ amvalidate_function array_iter array_unnest_fctx assign_collations_context +auth_password_hook_typ autovac_table av_relation avl_dbase @@ -3187,7 +3165,6 @@ cached_re_str canonicalize_state cashKEY catalogid_hash -cfp check_agg_arguments_context check_function_callback check_network_data @@ -3195,7 +3172,6 @@ check_object_relabel_type check_password_hook_type check_ungrouped_columns_context chr -clock_t cmpEntriesArg codes_t collation_cache_entry @@ -3204,6 +3180,7 @@ colormaprange compare_context config_var_value contain_aggs_of_level_context +contain_placeholder_references_context convert_testexpr_context copy_data_dest_cb copy_data_source_cb @@ -3236,6 +3213,10 @@ dlist_head dlist_iter dlist_mutable_iter dlist_node +dm_code +dm_codes +dm_letter +dm_node ds_state dsa_area dsa_area_control @@ -3308,7 +3289,6 @@ fmStringInfo fmgr_hook_type foreign_glob_cxt foreign_loc_cxt -freeaddrinfo_ptr_t freefunc fsec_t gbt_vsrt_arg @@ -3323,8 +3303,6 @@ get_attavgwidth_hook_type get_index_stats_hook_type get_relation_info_hook_type get_relation_stats_hook_type -getaddrinfo_ptr_t -getnameinfo_ptr_t gid_t gin_leafpage_items_state ginxlogCreatePostingTree @@ -3346,9 +3324,13 @@ gistxlogPageSplit gistxlogPageUpdate grouping_sets_data gseg_picksplit_item +gss_OID_set gss_buffer_desc gss_cred_id_t +gss_cred_usage_t gss_ctx_id_t +gss_key_value_element_desc +gss_key_value_set_desc gss_name_t gtrgm_consistent_cache gzFile @@ -3364,7 +3346,6 @@ hstoreUniquePairs_t hstoreUpgrade_t hyperLogLogState ifState -ilist import_error_callback_arg indexed_tlist inet @@ -3394,7 +3375,6 @@ intvKEY io_stat_col itemIdCompact itemIdCompactData -iterator jmp_buf join_search_hook_type json_aelem_action @@ -3473,13 +3453,13 @@ on_dsm_detach_callback on_exit_nicely_callback openssl_tls_init_hook_typ ossl_EVP_cipher_func -other output_type pagetable_hash pagetable_iterator pairingheap pairingheap_comparator pairingheap_node +pam_handle_t parallel_worker_main_type parse_error_callback_arg parser_context @@ -3562,6 +3542,7 @@ pgthreadlock_t pid_t pivot_field planner_hook_type +planstate_tree_walker_callback plperl_array_info plperl_call_data plperl_interp_desc @@ -3579,7 +3560,6 @@ pltcl_proc_desc pltcl_proc_key pltcl_proc_ptr pltcl_query_desc -pointer polymorphic_actuals pos_trgm post_parse_analyze_hook_type @@ -3616,6 +3596,7 @@ pull_varattnos_context pull_varnos_context pull_vars_context pullup_replace_vars_context +pushdown_safe_type pushdown_safety_info qc_hash_func qsort_arg_comparator @@ -3629,8 +3610,9 @@ rbt_allocfunc rbt_combiner rbt_comparator rbt_freefunc -reduce_outer_joins_state -reference +reduce_outer_joins_partial_state +reduce_outer_joins_pass1_state +reduce_outer_joins_pass2_state regex_arc_t regex_t regexp @@ -3654,15 +3636,16 @@ relopts_validator remoteConn remoteConnHashEnt remoteDep +remove_nulling_relids_context rendezvousHashEntry replace_rte_variables_callback replace_rte_variables_context -ret_type rewind_source rewrite_event rf_context rm_detail_t role_auth_extra +rolename_hash row_security_policy_hook_type rsv_callback saophash_hash @@ -3732,7 +3715,6 @@ stmtCacheEntry storeInfo storeRes_func stream_stop_callback -string substitute_actual_parameters_context substitute_actual_srf_parameters_context substitute_phv_relids_context @@ -3756,6 +3738,8 @@ toast_compress_header tokenize_error_callback_arg transferMode transfer_thread_arg +tree_mutator_callback +tree_walker_callback trgm trgm_mb_char trivalue @@ -3766,10 +3750,11 @@ ts_tokentype tsearch_readline_state tuplehash_hash tuplehash_iterator -type tzEntry u_char u_int +ua_page_items +ua_page_stats uchr uid_t uint128 @@ -3777,10 +3762,12 @@ uint16 uint16_t uint32 uint32_t +uint32x4_t uint64 uint64_t uint8 uint8_t +uint8x16_t uintptr_t unicodeStyleBorderFormat unicodeStyleColumnFormat @@ -3824,7 +3811,6 @@ win32_deadchild_waitinfo wint_t worker_state worktable -wrap xl_brin_createidx xl_brin_desummarize xl_brin_insert @@ -3863,7 +3849,6 @@ xl_heap_confirm xl_heap_delete xl_heap_freeze_page xl_heap_freeze_plan -xl_heap_freeze_tuple xl_heap_header xl_heap_inplace xl_heap_insert @@ -3898,6 +3883,7 @@ xl_standby_lock xl_standby_locks xl_tblspc_create_rec xl_tblspc_drop_rec +xl_testcustomrmgrs_message xl_xact_abort xl_xact_assignment xl_xact_commit @@ -3925,6 +3911,8 @@ xmlNodePtr xmlNodeSetPtr xmlParserCtxtPtr xmlParserInputPtr +xmlSaveCtxt +xmlSaveCtxtPtr xmlStructuredErrorFunc xmlTextWriter xmlTextWriterPtr @@ -3942,4 +3930,3 @@ yyscan_t z_stream z_streamp zic_t -ZSTD_CStream diff --git a/src/tutorial/funcs.c b/src/tutorial/funcs.c index ceffb56835..f597777a1f 100644 --- a/src/tutorial/funcs.c +++ b/src/tutorial/funcs.c @@ -78,8 +78,8 @@ copytext(PG_FUNCTION_ARGS) * VARDATA is a pointer to the data region of the new struct. The source * could be a short datum, so retrieve its data through VARDATA_ANY. */ - memcpy(VARDATA(new_t), /* destination */ - VARDATA_ANY(t), /* source */ + memcpy(VARDATA(new_t), /* destination */ + VARDATA_ANY(t), /* source */ VARSIZE_ANY_EXHDR(t)); /* how many bytes */ PG_RETURN_TEXT_P(new_t); } |