diff options
Diffstat (limited to 'src/backend')
128 files changed, 561 insertions, 549 deletions
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index 41bf950a4a..21d7c2d4e9 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -689,8 +689,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) } /* - * If we found a scan key eliminating the range, no need to - * check additional ones. + * If we found a scan key eliminating the range, no need + * to check additional ones. */ if (!addrange) break; @@ -1212,7 +1212,7 @@ brin_build_desc(Relation rel) * Obtain BrinOpcInfo for each indexed column. While at it, accumulate * the number of columns stored, since the number is opclass-defined. */ - opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts); + opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts); for (keyno = 0; keyno < tupdesc->natts; keyno++) { FmgrInfo *opcInfoFn; diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 90cb3951fc..11cc431677 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -1717,7 +1717,7 @@ allocateReloptStruct(Size base, relopt_value *options, int numoptions) if (optstr->fill_cb) { const char *val = optval->isset ? optval->values.string_val : - optstr->default_isnull ? NULL : optstr->default_val; + optstr->default_isnull ? NULL : optstr->default_val; size += optstr->fill_cb(val, NULL); } @@ -1796,8 +1796,8 @@ fillRelOptions(void *rdopts, Size basesize, if (optstring->fill_cb) { Size size = - optstring->fill_cb(string_val, - (char *) rdopts + offset); + optstring->fill_cb(string_val, + (char *) rdopts + offset); if (size) { diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index b5c1754e78..516465f8b7 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1117,7 +1117,7 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset)) { IndexTuple ituple = (IndexTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (downlink == NULL) downlink = CopyIndexTuple(ituple); diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 95cbed4337..1423b4b047 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -598,7 +598,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate, { GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc); GISTNodeBuffer *newNodeBuffer; - int i = foreach_current_index(lc); + int i = foreach_current_index(lc); /* Decompress parent index tuple of node buffer page. */ gistDeCompressAtt(giststate, r, diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 7382b0921d..e2c9b5f069 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -657,7 +657,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage @@ -694,7 +694,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index 9a86fb3fef..dcd302d3de 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -125,7 +125,7 @@ gistRedoPageUpdateRecord(XLogReaderState *record) if (data - begin < datalen) { OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber : - OffsetNumberNext(PageGetMaxOffsetNumber(page)); + OffsetNumberNext(PageGetMaxOffsetNumber(page)); while (data - begin < datalen) { diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index d850edd1d5..37646cc9a1 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -289,7 +289,8 @@ hashtext(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; const char *keydata = VARDATA_ANY(key); size_t keylen = VARSIZE_ANY_EXHDR(key); @@ -304,8 +305,8 @@ hashtext(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any((uint8_t *) buf, bsize + 1); @@ -343,7 +344,8 @@ hashtextextended(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; const char *keydata = VARDATA_ANY(key); size_t keylen = VARSIZE_ANY_EXHDR(key); @@ -357,8 +359,8 @@ hashtextextended(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any_extended((uint8_t *) buf, bsize + 1, PG_GETARG_INT64(1)); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index b300a4675e..8e60fb74a0 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2491,7 +2491,7 @@ static inline bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) { const uint16 interesting = - HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; + HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; if ((new_infomask & interesting) != (old_infomask & interesting)) return true; diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index e2e35b71ea..e76fb1dbdd 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -334,8 +334,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. * - * If the update is not HOT, we must update all indexes. If the update - * is HOT, it could be that we updated summarized columns, so we either + * If the update is not HOT, we must update all indexes. If the update is + * HOT, it could be that we updated summarized columns, so we either * update only summarized indexes, or none at all. */ if (result != TM_Ok) diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index fb95c19e90..c275b08494 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -376,7 +376,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, if (use_fsm && i >= not_in_fsm_pages) { Size freespace = BufferGetPageSize(victim_buffers[i]) - - SizeOfPageHeaderData; + SizeOfPageHeaderData; RecordPageWithFreeSpace(relation, curBlock, freespace); } diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 3f0342351f..ea75c5399b 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -533,7 +533,7 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer) if (!TransactionIdIsValid(prstate->old_snap_xmin)) { TransactionId horizon = - GlobalVisTestNonRemovableHorizon(prstate->vistest); + GlobalVisTestNonRemovableHorizon(prstate->vistest); TransactionIdLimitedForOldSnapshots(horizon, prstate->rel, &prstate->old_snap_xmin, diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 0a9ebd22bd..f232cad592 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1809,12 +1809,12 @@ retry: { /* * We have no freeze plans to execute, so there's no added cost - * from following the freeze path. That's why it was chosen. - * This is important in the case where the page only contains - * totally frozen tuples at this point (perhaps only following - * pruning). Such pages can be marked all-frozen in the VM by our - * caller, even though none of its tuples were newly frozen here - * (note that the "no freeze" path never sets pages all-frozen). + * from following the freeze path. That's why it was chosen. This + * is important in the case where the page only contains totally + * frozen tuples at this point (perhaps only following pruning). + * Such pages can be marked all-frozen in the VM by our caller, + * even though none of its tuples were newly frozen here (note + * that the "no freeze" path never sets pages all-frozen). * * We never increment the frozen_pages instrumentation counter * here, since it only counts pages with newly frozen tuples @@ -3113,8 +3113,8 @@ dead_items_max_items(LVRelState *vacrel) { int64 max_items; int vac_work_mem = IsAutoVacuumWorkerProcess() && - autovacuum_work_mem != -1 ? - autovacuum_work_mem : maintenance_work_mem; + autovacuum_work_mem != -1 ? + autovacuum_work_mem : maintenance_work_mem; if (vacrel->nindexes > 0) { diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index ac91d1a14d..7d54ec9c0f 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -626,7 +626,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend) static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks) { - Buffer buf; + Buffer buf; buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL, EB_CREATE_FORK_IF_NEEDED | diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 41aa1c4ccd..6be8915229 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -2947,7 +2947,7 @@ void _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate) { IndexBulkDeleteResult *stats = vstate->stats; - Relation heaprel = vstate->info->heaprel; + Relation heaprel = vstate->info->heaprel; Assert(stats->pages_newly_deleted >= vstate->npendingpages); @@ -3027,7 +3027,7 @@ _bt_pendingfsm_add(BTVacState *vstate, if (vstate->npendingpages > 0) { FullTransactionId lastsafexid = - vstate->pendingpages[vstate->npendingpages - 1].safexid; + vstate->pendingpages[vstate->npendingpages - 1].safexid; Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid)); } diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c index 7d12e0ef91..3922120d64 100644 --- a/src/backend/access/rmgrdesc/dbasedesc.c +++ b/src/backend/access/rmgrdesc/dbasedesc.c @@ -27,7 +27,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record) if (info == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) rec; + (xl_dbase_create_file_copy_rec *) rec; appendStringInfo(buf, "copy dir %u/%u to %u/%u", xlrec->src_tablespace_id, xlrec->src_db_id, @@ -36,7 +36,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record) else if (info == XLOG_DBASE_CREATE_WAL_LOG) { xl_dbase_create_wal_log_rec *xlrec = - (xl_dbase_create_wal_log_rec *) rec; + (xl_dbase_create_wal_log_rec *) rec; appendStringInfo(buf, "create dir %u/%u", xlrec->tablespace_id, xlrec->db_id); diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index 9ef4981ad1..246a6a6b85 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -120,7 +120,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) else { ginxlogInsertDataInternal *insertData = - (ginxlogInsertDataInternal *) payload; + (ginxlogInsertDataInternal *) payload; appendStringInfo(buf, " pitem: %u-%u/%u", PostingItemGetBlockNumber(&insertData->newitem), @@ -156,7 +156,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) else { ginxlogVacuumDataLeafPage *xlrec = - (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); + (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); desc_recompress_leaf(buf, &xlrec->data); } diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index f323699165..cbfaf0c00a 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -115,7 +115,7 @@ spgAllocSearchItem(SpGistScanOpaque so, bool isnull, double *distances) { /* allocate distance array only for non-NULL items */ SpGistSearchItem *item = - palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys)); + palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys)); item->isNull = isnull; @@ -130,7 +130,7 @@ static void spgAddStartItem(SpGistScanOpaque so, bool isnull) { SpGistSearchItem *startEntry = - spgAllocSearchItem(so, isnull, so->zeroDistances); + spgAllocSearchItem(so, isnull, so->zeroDistances); ItemPointerSet(&startEntry->heapPtr, isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO, @@ -768,7 +768,7 @@ spgTestLeafTuple(SpGistScanOpaque so, storeRes_func storeRes) { SpGistLeafTuple leafTuple = (SpGistLeafTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (leafTuple->tupstate != SPGIST_LIVE) { @@ -896,7 +896,7 @@ redirect: else /* page is inner */ { SpGistInnerTuple innerTuple = (SpGistInnerTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (innerTuple->tupstate != SPGIST_LIVE) { @@ -974,7 +974,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr, else { IndexOrderByDistance *distances = - palloc(sizeof(distances[0]) * so->numberOfOrderBys); + palloc(sizeof(distances[0]) * so->numberOfOrderBys); int i; for (i = 0; i < so->numberOfOrderBys; i++) diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index a5e6c92f35..771438c8ce 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -112,7 +112,7 @@ TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key) { uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; Oid relid = RelationGetRelid(relation); Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); @@ -176,7 +176,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan) { Snapshot snapshot; uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; Assert(RelationGetRelid(relation) == pscan->phs_relid); diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index fe6698d5ff..abb022e067 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -3270,7 +3270,7 @@ multixact_redo(XLogReaderState *record) else if (info == XLOG_MULTIXACT_CREATE_ID) { xl_multixact_create *xlrec = - (xl_multixact_create *) XLogRecGetData(record); + (xl_multixact_create *) XLogRecGetData(record); TransactionId max_xid; int i; diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 7133ec0b22..2b8bc2f58d 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -375,8 +375,8 @@ InitializeParallelDSM(ParallelContext *pcxt) shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace); /* - * Serialize the transaction snapshot if the transaction - * isolation level uses a transaction snapshot. + * Serialize the transaction snapshot if the transaction isolation + * level uses a transaction snapshot. */ if (IsolationUsesXactSnapshot()) { @@ -1497,8 +1497,8 @@ ParallelWorkerMain(Datum main_arg) RestoreClientConnectionInfo(clientconninfospace); /* - * Initialize SystemUser now that MyClientConnectionInfo is restored. - * Also ensure that auth_method is actually valid, aka authn_id is not NULL. + * Initialize SystemUser now that MyClientConnectionInfo is restored. Also + * ensure that auth_method is actually valid, aka authn_id is not NULL. */ if (MyClientConnectionInfo.authn_id) InitializeSystemUser(MyClientConnectionInfo.authn_id, diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 6a837e1539..8daaa535ed 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -3152,10 +3152,9 @@ CommitTransactionCommand(void) break; /* - * The user issued a SAVEPOINT inside a transaction block. - * Start a subtransaction. (DefineSavepoint already did - * PushTransaction, so as to have someplace to put the SUBBEGIN - * state.) + * The user issued a SAVEPOINT inside a transaction block. Start a + * subtransaction. (DefineSavepoint already did PushTransaction, + * so as to have someplace to put the SUBBEGIN state.) */ case TBLOCK_SUBBEGIN: StartSubTransaction(); @@ -4696,9 +4695,9 @@ RollbackAndReleaseCurrentSubTransaction(void) s = CurrentTransactionState; /* changed by pop */ Assert(s->blockState == TBLOCK_SUBINPROGRESS || - s->blockState == TBLOCK_INPROGRESS || - s->blockState == TBLOCK_IMPLICIT_INPROGRESS || - s->blockState == TBLOCK_STARTED); + s->blockState == TBLOCK_INPROGRESS || + s->blockState == TBLOCK_IMPLICIT_INPROGRESS || + s->blockState == TBLOCK_STARTED); } /* diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 63481d826f..408467df12 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5460,8 +5460,8 @@ StartupXLOG(void) missingContrecPtr = endOfRecoveryInfo->missingContrecPtr; /* - * Reset ps status display, so as no information related to recovery - * shows up. + * Reset ps status display, so as no information related to recovery shows + * up. */ set_ps_display(""); @@ -5596,9 +5596,9 @@ StartupXLOG(void) if (!XLogRecPtrIsInvalid(missingContrecPtr)) { /* - * We should only have a missingContrecPtr if we're not switching to - * a new timeline. When a timeline switch occurs, WAL is copied from - * the old timeline to the new only up to the end of the last complete + * We should only have a missingContrecPtr if we're not switching to a + * new timeline. When a timeline switch occurs, WAL is copied from the + * old timeline to the new only up to the end of the last complete * record, so there can't be an incomplete WAL record that we need to * disregard. */ @@ -8494,7 +8494,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, */ if (rllen > datadirpathlen && strncmp(linkpath, DataDir, datadirpathlen) == 0 && - IS_DIR_SEP(linkpath[datadirpathlen])) + IS_DIR_SEP(linkpath[datadirpathlen])) relpath = pstrdup(linkpath + datadirpathlen + 1); /* diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index ea7e2f67af..54247e1d81 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -897,8 +897,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, * * XLogReader machinery is only able to handle records up to a certain * size (ignoring machine resource limitations), so make sure that we will - * not emit records larger than the sizes advertised to be supported. - * This cap is based on DecodeXLogRecordRequiredSpace(). + * not emit records larger than the sizes advertised to be supported. This + * cap is based on DecodeXLogRecordRequiredSpace(). */ if (total_len >= XLogRecordMaxSize) ereport(ERROR, diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c index 906e3d9469..539928cb85 100644 --- a/src/backend/access/transam/xlogprefetcher.c +++ b/src/backend/access/transam/xlogprefetcher.c @@ -569,7 +569,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) if (record_type == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) record->main_data; + (xl_dbase_create_file_copy_rec *) record->main_data; RelFileLocator rlocator = {InvalidOid, xlrec->db_id, InvalidRelFileNumber}; @@ -596,7 +596,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) if (record_type == XLOG_SMGR_CREATE) { xl_smgr_create *xlrec = (xl_smgr_create *) - record->main_data; + record->main_data; if (xlrec->forkNum == MAIN_FORKNUM) { @@ -624,7 +624,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) else if (record_type == XLOG_SMGR_TRUNCATE) { xl_smgr_truncate *xlrec = (xl_smgr_truncate *) - record->main_data; + record->main_data; /* * Don't consider prefetching anything in the truncated diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index cadea21b37..6eee1abccd 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -282,7 +282,7 @@ XLogRecPtr XLogReleasePreviousRecord(XLogReaderState *state) { DecodedXLogRecord *record; - XLogRecPtr next_lsn; + XLogRecPtr next_lsn; if (!state->record) return InvalidXLogRecPtr; diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index 188f6d6f85..4883fcb512 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -3215,7 +3215,7 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *readBuf) { XLogPageReadPrivate *private = - (XLogPageReadPrivate *) xlogreader->private_data; + (XLogPageReadPrivate *) xlogreader->private_data; int emode = private->emode; uint32 targetPageOff; XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY; diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c index 5baea7535b..45be21131c 100644 --- a/src/backend/backup/basebackup.c +++ b/src/backend/backup/basebackup.c @@ -1609,10 +1609,10 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename, * * There's no guarantee that this will actually * happen, though: the torn write could take an - * arbitrarily long time to complete. Retrying multiple - * times wouldn't fix this problem, either, though - * it would reduce the chances of it happening in - * practice. The only real fix here seems to be to + * arbitrarily long time to complete. Retrying + * multiple times wouldn't fix this problem, either, + * though it would reduce the chances of it happening + * in practice. The only real fix here seems to be to * have some kind of interlock that allows us to wait * until we can be certain that no write to the block * is in progress. Since we don't have any such thing diff --git a/src/backend/backup/basebackup_copy.c b/src/backend/backup/basebackup_copy.c index 73a3f4a970..1db80cde1b 100644 --- a/src/backend/backup/basebackup_copy.c +++ b/src/backend/backup/basebackup_copy.c @@ -350,6 +350,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli) tupdesc = CreateTemplateTupleDesc(2); TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0); + /* * int8 may seem like a surprising data type for this, but in theory int4 * would not be wide enough for this, as TimeLineID is unsigned. @@ -360,7 +361,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli) tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual); /* Data row */ - values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr))); + values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr))); values[1] = Int64GetDatum(tli); do_tup_output(tstate, values, nulls); diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 45cdcd3dc6..bc2ad773c9 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -3389,8 +3389,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask, result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)); /* - * Check if ACL_MAINTAIN is being checked and, if so, and not already set as - * part of the result, then check if the user is a member of the + * Check if ACL_MAINTAIN is being checked and, if so, and not already set + * as part of the result, then check if the user is a member of the * pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH * MATERIALIZED VIEW, and REINDEX on all relations. */ diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c index feddff654e..522da0ac85 100644 --- a/src/backend/catalog/indexing.c +++ b/src/backend/catalog/indexing.c @@ -148,8 +148,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple, #endif /* USE_ASSERT_CHECKING */ /* - * Skip insertions into non-summarizing indexes if we only need - * to update summarizing indexes. + * Skip insertions into non-summarizing indexes if we only need to + * update summarizing indexes. */ if (onlySummarized && !indexInfo->ii_Summarizing) continue; diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 14e57adee2..51d5ba669d 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -3838,7 +3838,7 @@ recomputeNamespacePath(void) if (OidIsValid(namespaceId) && !list_member_oid(oidlist, namespaceId) && object_aclcheck(NamespaceRelationId, namespaceId, roleid, - ACL_USAGE) == ACLCHECK_OK && + ACL_USAGE) == ACLCHECK_OK && InvokeNamespaceSearchHook(namespaceId, false)) oidlist = lappend_oid(oidlist, namespaceId); } @@ -3866,7 +3866,7 @@ recomputeNamespacePath(void) if (OidIsValid(namespaceId) && !list_member_oid(oidlist, namespaceId) && object_aclcheck(NamespaceRelationId, namespaceId, roleid, - ACL_USAGE) == ACLCHECK_OK && + ACL_USAGE) == ACLCHECK_OK && InvokeNamespaceSearchHook(namespaceId, false)) oidlist = lappend_oid(oidlist, namespaceId); } @@ -4002,7 +4002,7 @@ InitTempTableNamespace(void) * temp table creation request is made by someone with appropriate rights. */ if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CREATE_TEMP) != ACLCHECK_OK) + ACL_CREATE_TEMP) != ACLCHECK_OK) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create temporary tables in database \"%s\"", diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index 792b0ef414..95918a77a1 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -625,7 +625,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId, /* not in catalogs, different from operator, so make shell */ aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(otherNamespace)); diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 64d326f073..91c7f3426f 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -1414,6 +1414,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior) /* FALLTHROUGH */ case SHARED_DEPENDENCY_OWNER: + /* * Save it for deletion below, if it's a local object or a * role grant. Other shared objects, such as databases, diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 10f28f94bc..e95dc31bde 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -231,7 +231,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) if (OidIsValid(namespaceId)) { aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -1035,7 +1035,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) AclResult aclresult; aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c index c91fe66d9b..2969a2bb21 100644 --- a/src/backend/commands/collationcmds.c +++ b/src/backend/commands/collationcmds.c @@ -270,8 +270,8 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e */ if (!IsBinaryUpgrade) { - char *langtag = icu_language_tag(colliculocale, - icu_validation_level); + char *langtag = icu_language_tag(colliculocale, + icu_validation_level); if (langtag && strcmp(colliculocale, langtag) != 0) { @@ -476,17 +476,18 @@ AlterCollation(AlterCollationStmt *stmt) Datum pg_collation_actual_version(PG_FUNCTION_ARGS) { - Oid collid = PG_GETARG_OID(0); - char provider; - char *locale; - char *version; - Datum datum; + Oid collid = PG_GETARG_OID(0); + char provider; + char *locale; + char *version; + Datum datum; if (collid == DEFAULT_COLLATION_OID) { /* retrieve from pg_database */ HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId)); + if (!HeapTupleIsValid(dbtup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -506,7 +507,8 @@ pg_collation_actual_version(PG_FUNCTION_ARGS) { /* retrieve from pg_collation */ - HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); + HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); + if (!HeapTupleIsValid(colltp)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -657,11 +659,10 @@ create_collation_from_locale(const char *locale, int nspid, Oid collid; /* - * Some systems have locale names that don't consist entirely of - * ASCII letters (such as "bokmål" or "français"). - * This is pretty silly, since we need the locale itself to - * interpret the non-ASCII characters. We can't do much with - * those, so we filter them out. + * Some systems have locale names that don't consist entirely of ASCII + * letters (such as "bokmål" or "français"). This is pretty + * silly, since we need the locale itself to interpret the non-ASCII + * characters. We can't do much with those, so we filter them out. */ if (!pg_is_ascii(locale)) { @@ -681,19 +682,18 @@ create_collation_from_locale(const char *locale, int nspid, return -1; } if (enc == PG_SQL_ASCII) - return -1; /* C/POSIX are already in the catalog */ + return -1; /* C/POSIX are already in the catalog */ /* count valid locales found in operating system */ (*nvalidp)++; /* - * Create a collation named the same as the locale, but quietly - * doing nothing if it already exists. This is the behavior we - * need even at initdb time, because some versions of "locale -a" - * can report the same locale name more than once. And it's - * convenient for later import runs, too, since you just about - * always want to add on new locales without a lot of chatter - * about existing ones. + * Create a collation named the same as the locale, but quietly doing + * nothing if it already exists. This is the behavior we need even at + * initdb time, because some versions of "locale -a" can report the same + * locale name more than once. And it's convenient for later import runs, + * too, since you just about always want to add on new locales without a + * lot of chatter about existing ones. */ collid = CollationCreate(locale, nspid, GetUserId(), COLLPROVIDER_LIBC, true, enc, @@ -995,8 +995,8 @@ pg_import_system_collations(PG_FUNCTION_ARGS) param.nvalidp = &nvalid; /* - * Enumerate the locales that are either installed on or supported - * by the OS. + * Enumerate the locales that are either installed on or supported by + * the OS. */ if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL, (LPARAM) ¶m, NULL)) diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 2e242eeff2..99d4080ea9 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -259,7 +259,7 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath) List *rlocatorlist = NIL; LockRelId relid; Snapshot snapshot; - SMgrRelation smgr; + SMgrRelation smgr; BufferAccessStrategy bstrategy; /* Get pg_class relfilenumber. */ @@ -1065,8 +1065,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) */ if (!IsBinaryUpgrade && dbiculocale != src_iculocale) { - char *langtag = icu_language_tag(dbiculocale, - icu_validation_level); + char *langtag = icu_language_tag(dbiculocale, + icu_validation_level); if (langtag && strcmp(dbiculocale, langtag) != 0) { @@ -1219,7 +1219,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) dst_deftablespace = get_tablespace_oid(tablespacename, false); /* check permissions */ aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, tablespacename); @@ -1406,8 +1406,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) * If we're going to be reading data for the to-be-created database into * shared_buffers, take a lock on it. Nobody should know that this * database exists yet, but it's good to maintain the invariant that an - * AccessExclusiveLock on the database is sufficient to drop all - * of its buffers without worrying about more being read later. + * AccessExclusiveLock on the database is sufficient to drop all of its + * buffers without worrying about more being read later. * * Note that we need to do this before entering the * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback @@ -1933,7 +1933,7 @@ movedb(const char *dbname, const char *tblspcname) * Permission checks */ aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, tblspcname); @@ -3110,7 +3110,7 @@ dbase_redo(XLogReaderState *record) if (info == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) XLogRecGetData(record); + (xl_dbase_create_file_copy_rec *) XLogRecGetData(record); char *src_path; char *dst_path; char *parent_path; @@ -3182,7 +3182,7 @@ dbase_redo(XLogReaderState *record) else if (info == XLOG_DBASE_CREATE_WAL_LOG) { xl_dbase_create_wal_log_rec *xlrec = - (xl_dbase_create_wal_log_rec *) XLogRecGetData(record); + (xl_dbase_create_wal_log_rec *) XLogRecGetData(record); char *dbpath; char *parent_path; diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c index 82bda15889..469a6c2ee9 100644 --- a/src/backend/commands/dropcmds.c +++ b/src/backend/commands/dropcmds.c @@ -493,6 +493,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) case OBJECT_TABLE: case OBJECT_TABLESPACE: case OBJECT_VIEW: + /* * These are handled elsewhere, so if someone gets here the code * is probably wrong or should be revisited. diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 5334c503e1..15f9bddcdf 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -1523,7 +1523,7 @@ ExplainNode(PlanState *planstate, List *ancestors, { BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan; const char *indexname = - explain_get_index_name(bitmapindexscan->indexid); + explain_get_index_name(bitmapindexscan->indexid); if (es->format == EXPLAIN_FORMAT_TEXT) appendStringInfo(es->str, " on %s", @@ -3008,7 +3008,7 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate, for (n = 0; n < incrsortstate->shared_info->num_workers; n++) { IncrementalSortInfo *incsort_info = - &incrsortstate->shared_info->sinfo[n]; + &incrsortstate->shared_info->sinfo[n]; /* * If a worker hasn't processed any sort groups at all, then @@ -4212,7 +4212,7 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es) { ListCell *cell; const char *label = - (list_length(css->custom_ps) != 1 ? "children" : "child"); + (list_length(css->custom_ps) != 1 ? "children" : "child"); foreach(cell, css->custom_ps) ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es); diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 69f66dfe7d..127a3a590c 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -151,7 +151,7 @@ compute_return_type(TypeName *returnType, Oid languageOid, namespaceId = QualifiedNameGetCreationNamespace(returnType->names, &typname); aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -2117,7 +2117,7 @@ ExecuteDoStmt(ParseState *pstate, DoStmt *stmt, bool atomic) AclResult aclresult; aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index e6ee99e51f..a5168c9f09 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -748,7 +748,7 @@ DefineIndex(Oid relationId, AclResult aclresult; aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -780,7 +780,7 @@ DefineIndex(Oid relationId, AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(tablespaceId)); @@ -2708,7 +2708,7 @@ ExecReindex(ParseState *pstate, ReindexStmt *stmt, bool isTopLevel) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(params.tablespaceOid)); @@ -3066,11 +3066,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, /* * The table can be reindexed if the user has been granted MAINTAIN on * the table or one of its partition ancestors or the user is a - * superuser, the table owner, or the database/schema owner (but in the - * latter case, only if it's not a shared relation). pg_class_aclcheck - * includes the superuser case, and depending on objectKind we already - * know that the user has permission to run REINDEX on this database or - * schema per the permission checks at the beginning of this routine. + * superuser, the table owner, or the database/schema owner (but in + * the latter case, only if it's not a shared relation). + * pg_class_aclcheck includes the superuser case, and depending on + * objectKind we already know that the user has permission to run + * REINDEX on this database or schema per the permission checks at the + * beginning of this routine. */ if (classtuple->relisshared && pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK && @@ -3312,7 +3313,7 @@ ReindexMultipleInternal(List *relids, ReindexParams *params) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(params->tablespaceOid)); diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index 90de935267..db347a64cf 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -382,7 +382,7 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId) * no special case for them. */ aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index 56eafbff10..bc7e0cbddf 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -604,9 +604,9 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)"); /* - * We don't want to allow unprivileged users to be able to trigger attempts - * to access arbitrary network destinations, so require the user to have - * been specifically authorized to create subscriptions. + * We don't want to allow unprivileged users to be able to trigger + * attempts to access arbitrary network destinations, so require the user + * to have been specifically authorized to create subscriptions. */ if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION)) ereport(ERROR, @@ -629,10 +629,10 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, * exempt a subscription from this requirement. */ if (!opts.passwordrequired && !superuser_arg(owner)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* * If built with appropriate switch, whine when regression-testing @@ -1111,8 +1111,8 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, if (!sub->passwordrequired && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* Lock the subscription so nobody else can do anything with it. */ LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock); @@ -1825,8 +1825,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) if (!form->subpasswordrequired && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* Must be able to become new owner */ check_can_set_role(GetUserId(), newOwnerId); @@ -1835,8 +1835,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) * current owner must have CREATE on database * * This is consistent with how ALTER SCHEMA ... OWNER TO works, but some - * other object types behave differently (e.g. you can't give a table to - * a user who lacks CREATE privileges on a schema). + * other object types behave differently (e.g. you can't give a table to a + * user who lacks CREATE privileges on a schema). */ aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), ACL_CREATE); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 343fe61115..750b0332da 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -806,7 +806,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(tablespaceId)); @@ -1931,7 +1931,7 @@ ExecuteTruncateGuts(List *explicit_rels, resultRelInfo = resultRelInfos; foreach(cell, rels) { - UserContext ucxt; + UserContext ucxt; if (run_as_table_owner) SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner, @@ -2143,7 +2143,7 @@ ExecuteTruncateGuts(List *explicit_rels, resultRelInfo = resultRelInfos; foreach(cell, rels) { - UserContext ucxt; + UserContext ucxt; if (run_as_table_owner) SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner, @@ -2635,7 +2635,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, if (CompressionMethodIsValid(attribute->attcompression)) { const char *compression = - GetCompressionMethodName(attribute->attcompression); + GetCompressionMethodName(attribute->attcompression); if (def->compression == NULL) def->compression = pstrdup(compression); @@ -13947,7 +13947,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock /* New owner must have CREATE privilege on namespace */ aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceOid)); @@ -14377,7 +14377,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(view_query, true); + view_query_is_auto_updatable(view_query, true); if (view_updatable_error) ereport(ERROR, @@ -14656,7 +14656,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(new_tablespaceoid)); @@ -17134,7 +17134,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, if (IsA(stmt, RenameStmt)) { aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(classform->relnamespace)); diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 3dfbf6a917..13b0dee146 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -1278,7 +1278,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source) /* Check permissions, similarly complaining only if interactive */ aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) { if (source >= PGC_S_INTERACTIVE) @@ -1408,7 +1408,7 @@ PrepareTempTablespaces(void) /* Check permissions similarly */ aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) continue; diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 3440dbc440..216482095d 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -734,7 +734,7 @@ DefineDomain(CreateDomainStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(domainNamespace)); @@ -3743,8 +3743,8 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype) /* New owner must have CREATE privilege on namespace */ aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace, - newOwnerId, - ACL_CREATE); + newOwnerId, + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(typTup->typnamespace)); diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 707114bdd0..d63d3c58ca 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -86,7 +86,7 @@ typedef struct int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256; char *createrole_self_grant = ""; bool createrole_self_grant_enabled = false; -GrantRoleOptions createrole_self_grant_options; +GrantRoleOptions createrole_self_grant_options; /* Hook to check passwords in CreateRole() and AlterRole() */ check_password_hook_type check_password_hook = NULL; @@ -169,7 +169,7 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) DefElem *dadminmembers = NULL; DefElem *dvalidUntil = NULL; DefElem *dbypassRLS = NULL; - GrantRoleOptions popt; + GrantRoleOptions popt; /* The defaults can vary depending on the original statement type */ switch (stmt->stmt_type) @@ -535,8 +535,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) * * The grantor of record for this implicit grant is the bootstrap * superuser, which means that the CREATEROLE user cannot revoke the - * grant. They can however grant the created role back to themselves - * with different options, since they enjoy ADMIN OPTION on it. + * grant. They can however grant the created role back to themselves with + * different options, since they enjoy ADMIN OPTION on it. */ if (!superuser()) { @@ -561,8 +561,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) BOOTSTRAP_SUPERUSERID, &poptself); /* - * We must make the implicit grant visible to the code below, else - * the additional grants will fail. + * We must make the implicit grant visible to the code below, else the + * additional grants will fail. */ CommandCounterIncrement(); @@ -585,8 +585,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) * Add the specified members to this new role. adminmembers get the admin * option, rolemembers don't. * - * NB: No permissions check is required here. If you have enough rights - * to create a role, you can add any members you like. + * NB: No permissions check is required here. If you have enough rights to + * create a role, you can add any members you like. */ AddRoleMems(currentUserId, stmt->role, roleid, rolemembers, roleSpecsToIds(rolemembers), @@ -647,7 +647,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt) DefElem *dbypassRLS = NULL; Oid roleid; Oid currentUserId = GetUserId(); - GrantRoleOptions popt; + GrantRoleOptions popt; check_rolespec_name(stmt->role, _("Cannot alter reserved roles.")); @@ -862,7 +862,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt) */ if (dissuper) { - bool should_be_super = boolVal(dissuper->arg); + bool should_be_super = boolVal(dissuper->arg); if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID) ereport(ERROR, @@ -1021,9 +1021,9 @@ AlterRoleSet(AlterRoleSetStmt *stmt) shdepLockAndCheckObject(AuthIdRelationId, roleid); /* - * To mess with a superuser you gotta be superuser; otherwise you - * need CREATEROLE plus admin option on the target role; unless you're - * just trying to change your own settings + * To mess with a superuser you gotta be superuser; otherwise you need + * CREATEROLE plus admin option on the target role; unless you're just + * trying to change your own settings */ if (roleform->rolsuper) { @@ -1037,7 +1037,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt) else { if ((!have_createrole_privilege() || - !is_admin_of_role(GetUserId(), roleid)) + !is_admin_of_role(GetUserId(), roleid)) && roleid != GetUserId()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), @@ -1490,14 +1490,14 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt) Oid grantor; List *grantee_ids; ListCell *item; - GrantRoleOptions popt; + GrantRoleOptions popt; Oid currentUserId = GetUserId(); /* Parse options list. */ InitGrantRoleOptions(&popt); foreach(item, stmt->opt) { - DefElem *opt = (DefElem *) lfirst(item); + DefElem *opt = (DefElem *) lfirst(item); char *optval = defGetString(opt); if (strcmp(opt->defname, "admin") == 0) @@ -1546,8 +1546,8 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt) /* * Step through all of the granted roles and add, update, or remove * entries in pg_auth_members as appropriate. If stmt->is_grant is true, - * we are adding new grants or, if they already exist, updating options - * on those grants. If stmt->is_grant is false, we are revoking grants or + * we are adding new grants or, if they already exist, updating options on + * those grants. If stmt->is_grant is false, we are revoking grants or * removing options from them. */ foreach(item, stmt->granted_roles) @@ -1848,8 +1848,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, ObjectIdGetDatum(grantorId)); /* - * If we found a tuple, update it with new option values, unless - * there are no changes, in which case issue a WARNING. + * If we found a tuple, update it with new option values, unless there + * are no changes, in which case issue a WARNING. * * If we didn't find a tuple, just insert one. */ @@ -1932,8 +1932,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, popt->inherit; else { - HeapTuple mrtup; - Form_pg_authid mrform; + HeapTuple mrtup; + Form_pg_authid mrform; mrtup = SearchSysCache1(AUTHOID, memberid); if (!HeapTupleIsValid(mrtup)) @@ -2332,8 +2332,8 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, /* * If popt.specified == 0, we're revoking the grant entirely; otherwise, * we expect just one bit to be set, and we're revoking the corresponding - * option. As of this writing, there's no syntax that would allow for - * an attempt to revoke multiple options at once, and the logic below + * option. As of this writing, there's no syntax that would allow for an + * attempt to revoke multiple options at once, and the logic below * wouldn't work properly if such syntax were added, so assert that our * caller isn't trying to do that. */ @@ -2365,7 +2365,7 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, } else { - bool revoke_admin_option_only; + bool revoke_admin_option_only; /* * Revoking the grant entirely, or ADMIN option on a grant, @@ -2572,7 +2572,7 @@ check_createrole_self_grant(char **newval, void **extra, GucSource source) void assign_createrole_self_grant(const char *newval, void *extra) { - unsigned options = * (unsigned *) extra; + unsigned options = *(unsigned *) extra; createrole_self_grant_enabled = (options != 0); createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index ff98c773f5..9bd77546b9 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -437,7 +437,7 @@ DefineView(ViewStmt *stmt, const char *queryString, if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(viewParse, true); + view_query_is_auto_updatable(viewParse, true); if (view_updatable_error) ereport(ERROR, diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index dcf56446c7..5e52d5ece2 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -1214,8 +1214,8 @@ ExecInitExprRec(Expr *node, ExprState *state, /* Check permission to call function */ aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid, - GetUserId(), - ACL_EXECUTE); + GetUserId(), + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(cmpfuncid)); @@ -1224,8 +1224,8 @@ ExecInitExprRec(Expr *node, ExprState *state, if (OidIsValid(opexpr->hashfuncid)) { aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid, - GetUserId(), - ACL_EXECUTE); + GetUserId(), + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(opexpr->hashfuncid)); @@ -3602,7 +3602,7 @@ ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase, * column sorted on. */ TargetEntry *source_tle = - (TargetEntry *) linitial(pertrans->aggref->args); + (TargetEntry *) linitial(pertrans->aggref->args); Assert(list_length(pertrans->aggref->args) == 1); diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index 4cd46f1717..7561e64dfc 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1647,7 +1647,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerGroup pergroup_allaggs = - aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff]; + aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff]; if (pergroup_allaggs == NULL) EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull); @@ -1672,7 +1672,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1700,7 +1700,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1718,7 +1718,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1735,7 +1735,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -1756,7 +1756,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -1773,7 +1773,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index da28e5e40c..1d82b64b89 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -354,8 +354,8 @@ ExecInsertIndexTuples(ResultRelInfo *resultRelInfo, continue; /* - * Skip processing of non-summarizing indexes if we only - * update summarizing indexes + * Skip processing of non-summarizing indexes if we only update + * summarizing indexes */ if (onlySummarizing && !indexInfo->ii_Summarizing) continue; diff --git a/src/backend/executor/execSRF.c b/src/backend/executor/execSRF.c index d09a7758dc..73bf9152a4 100644 --- a/src/backend/executor/execSRF.c +++ b/src/backend/executor/execSRF.c @@ -260,7 +260,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, if (first_time) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); rsinfo.setResult = tupstore; @@ -290,7 +290,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, if (tupdesc == NULL) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); /* * This is the first non-NULL result from the @@ -395,7 +395,7 @@ no_function_result: if (rsinfo.setResult == NULL) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); rsinfo.setResult = tupstore; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 3aab5a0e80..f3a522571a 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -3692,7 +3692,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) /* Check permission to call aggregate function */ aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(), - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_AGGREGATE, get_func_name(aggref->aggfnoid)); @@ -3759,7 +3759,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(finalfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(finalfn_oid)); @@ -3768,7 +3768,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(serialfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(serialfn_oid)); @@ -3777,7 +3777,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(deserialfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(deserialfn_oid)); diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 5fd1c5553b..ac3eb32d97 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -1327,7 +1327,7 @@ ExecParallelHashRepartitionFirst(HashJoinTable hashtable) else { size_t tuple_size = - MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); + MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); /* It belongs in a later batch. */ hashtable->batches[batchno].estimated_size += tuple_size; @@ -1369,7 +1369,7 @@ ExecParallelHashRepartitionRest(HashJoinTable hashtable) for (i = 1; i < old_nbatch; ++i) { ParallelHashJoinBatch *shared = - NthParallelHashJoinBatch(old_batches, i); + NthParallelHashJoinBatch(old_batches, i); old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared), ParallelWorkerNumber + 1, @@ -3317,7 +3317,7 @@ ExecHashTableDetachBatch(HashJoinTable hashtable) while (DsaPointerIsValid(batch->chunks)) { HashMemoryChunk chunk = - dsa_get_address(hashtable->area, batch->chunks); + dsa_get_address(hashtable->area, batch->chunks); dsa_pointer next = chunk->next.shared; dsa_free(hashtable->area, batch->chunks); diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 0a3f32f731..b29a8ff48b 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -1170,7 +1170,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate) { SharedTuplestoreAccessor *inner_tuples; Barrier *batch_barrier = - &hashtable->batches[batchno].shared->batch_barrier; + &hashtable->batches[batchno].shared->batch_barrier; switch (BarrierAttach(batch_barrier)) { @@ -1558,7 +1558,7 @@ ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt) { int plan_node_id = state->js.ps.plan->plan_node_id; ParallelHashJoinState *pstate = - shm_toc_lookup(pcxt->toc, plan_node_id, false); + shm_toc_lookup(pcxt->toc, plan_node_id, false); /* * It would be possible to reuse the shared hash table in single-batch @@ -1593,7 +1593,7 @@ ExecHashJoinInitializeWorker(HashJoinState *state, HashState *hashNode; int plan_node_id = state->js.ps.plan->plan_node_id; ParallelHashJoinState *pstate = - shm_toc_lookup(pwcxt->toc, plan_node_id, false); + shm_toc_lookup(pwcxt->toc, plan_node_id, false); /* Attach to the space for shared temporary files. */ SharedFileSetAttach(&pstate->fileset, pwcxt->seg); diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c index 12bc22f33c..0994b2c113 100644 --- a/src/backend/executor/nodeIncrementalSort.c +++ b/src/backend/executor/nodeIncrementalSort.c @@ -1007,9 +1007,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags) if (incrsortstate->ss.ps.instrument != NULL) { IncrementalSortGroupInfo *fullsortGroupInfo = - &incrsortstate->incsort_info.fullsortGroupInfo; + &incrsortstate->incsort_info.fullsortGroupInfo; IncrementalSortGroupInfo *prefixsortGroupInfo = - &incrsortstate->incsort_info.prefixsortGroupInfo; + &incrsortstate->incsort_info.prefixsortGroupInfo; fullsortGroupInfo->groupCount = 0; fullsortGroupInfo->maxDiskSpaceUsed = 0; diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 6aa8c03def..a4b53b0474 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -111,7 +111,7 @@ typedef struct UpdateContext { bool updated; /* did UPDATE actually occur? */ bool crossPartUpdate; /* was it a cross-partition update? */ - TU_UpdateIndexes updateIndexes; /* Which index updates are required? */ + TU_UpdateIndexes updateIndexes; /* Which index updates are required? */ /* * Lock mode to acquire on the latest tuple version before performing @@ -882,7 +882,7 @@ ExecInsert(ModifyTableContext *context, { TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor); TupleDesc plan_tdesc = - CreateTupleDescCopy(planSlot->tts_tupleDescriptor); + CreateTupleDescCopy(planSlot->tts_tupleDescriptor); resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] = MakeSingleTupleTableSlot(tdesc, slot->tts_ops); diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c index 0c6c912778..791cbd2372 100644 --- a/src/backend/executor/nodeTableFuncscan.c +++ b/src/backend/executor/nodeTableFuncscan.c @@ -352,7 +352,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) int colno; Datum value; int ordinalitycol = - ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; + ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; /* * Install the document as a possibly-toasted Datum into the tablefunc diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 3ac581a711..8bf15e7236 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -2580,7 +2580,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) /* Check permission to call window function */ aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(), - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(wfunc->winfnoid)); @@ -2819,7 +2819,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (!OidIsValid(aggform->aggminvtransfn)) use_ma_code = false; /* sine qua non */ else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY && - aggform->aggfinalmodify != AGGMODIFY_READ_ONLY) + aggform->aggfinalmodify != AGGMODIFY_READ_ONLY) use_ma_code = true; /* decision forced by safety */ else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) use_ma_code = false; /* non-moving frame head */ @@ -2869,7 +2869,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, ReleaseSysCache(procTuple); aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(transfn_oid)); @@ -2878,7 +2878,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (OidIsValid(invtransfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(invtransfn_oid)); @@ -2888,7 +2888,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (OidIsValid(finalfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(finalfn_oid)); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 256632c985..33975687b3 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -3345,7 +3345,7 @@ SPI_register_trigger_data(TriggerData *tdata) if (tdata->tg_newtable) { EphemeralNamedRelation enr = - palloc(sizeof(EphemeralNamedRelationData)); + palloc(sizeof(EphemeralNamedRelationData)); int rc; enr->md.name = tdata->tg_trigger->tgnewtable; @@ -3362,7 +3362,7 @@ SPI_register_trigger_data(TriggerData *tdata) if (tdata->tg_oldtable) { EphemeralNamedRelation enr = - palloc(sizeof(EphemeralNamedRelationData)); + palloc(sizeof(EphemeralNamedRelationData)); int rc; enr->md.name = tdata->tg_trigger->tgoldtable; diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c index a8b73a9cf1..812a86d62c 100644 --- a/src/backend/jit/llvm/llvmjit.c +++ b/src/backend/jit/llvm/llvmjit.c @@ -52,7 +52,7 @@ typedef struct LLVMJitHandle LLVMOrcJITStackRef stack; LLVMOrcModuleHandle orc_handle; #endif -} LLVMJitHandle; +} LLVMJitHandle; /* types & functions commonly needed for JITing */ @@ -110,8 +110,8 @@ static LLVMOrcJITStackRef llvm_opt3_orc; static void llvm_release_context(JitContext *context); static void llvm_session_initialize(void); static void llvm_shutdown(int code, Datum arg); -static void llvm_compile_module(LLVMJitContext *context); -static void llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module); +static void llvm_compile_module(LLVMJitContext * context); +static void llvm_optimize_module(LLVMJitContext * context, LLVMModuleRef module); static void llvm_create_types(void); static uint64_t llvm_resolve_symbol(const char *name, void *ctx); @@ -227,7 +227,7 @@ llvm_release_context(JitContext *context) * Return module which may be modified, e.g. by creating new functions. */ LLVMModuleRef -llvm_mutable_module(LLVMJitContext *context) +llvm_mutable_module(LLVMJitContext * context) { llvm_assert_in_fatal_section(); @@ -273,7 +273,7 @@ llvm_expand_funcname(struct LLVMJitContext *context, const char *basename) * code to be optimized and emitted, do so first. */ void * -llvm_get_function(LLVMJitContext *context, const char *funcname) +llvm_get_function(LLVMJitContext * context, const char *funcname) { #if LLVM_VERSION_MAJOR > 11 || \ defined(HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN) && HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN @@ -493,7 +493,7 @@ llvm_copy_attributes(LLVMValueRef v_from, LLVMValueRef v_to) * Return a callable LLVMValueRef for fcinfo. */ LLVMValueRef -llvm_function_reference(LLVMJitContext *context, +llvm_function_reference(LLVMJitContext * context, LLVMBuilderRef builder, LLVMModuleRef mod, FunctionCallInfo fcinfo) @@ -556,7 +556,7 @@ llvm_function_reference(LLVMJitContext *context, * Optimize code in module using the flags set in context. */ static void -llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module) +llvm_optimize_module(LLVMJitContext * context, LLVMModuleRef module) { LLVMPassManagerBuilderRef llvm_pmb; LLVMPassManagerRef llvm_mpm; @@ -627,7 +627,7 @@ llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module) * Emit code for the currently pending module. */ static void -llvm_compile_module(LLVMJitContext *context) +llvm_compile_module(LLVMJitContext * context) { LLVMJitHandle *handle; MemoryContext oldcontext; @@ -799,9 +799,9 @@ llvm_session_initialize(void) LLVMInitializeNativeAsmParser(); /* - * When targeting an LLVM version with opaque pointers enabled by - * default, turn them off for the context we build our code in. We don't - * need to do so for other contexts (e.g. llvm_ts_context). Once the IR is + * When targeting an LLVM version with opaque pointers enabled by default, + * turn them off for the context we build our code in. We don't need to + * do so for other contexts (e.g. llvm_ts_context). Once the IR is * generated, it carries the necessary information. */ #if LLVM_VERSION_MAJOR > 14 @@ -1175,7 +1175,7 @@ static LLVMOrcObjectLayerRef llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple) { LLVMOrcObjectLayerRef objlayer = - LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES); + LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES); #if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER if (jit_debugging_support) diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c index 6b15588da6..4fbc8a0cbc 100644 --- a/src/backend/jit/llvm/llvmjit_deform.c +++ b/src/backend/jit/llvm/llvmjit_deform.c @@ -31,7 +31,7 @@ * Create a function that deforms a tuple of type desc up to natts columns. */ LLVMValueRef -slot_compile_deform(LLVMJitContext *context, TupleDesc desc, +slot_compile_deform(LLVMJitContext * context, TupleDesc desc, const TupleTableSlotOps *ops, int natts) { char *funcname; @@ -650,7 +650,7 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, { LLVMValueRef v_tmp_loaddata; LLVMTypeRef vartypep = - LLVMPointerType(LLVMIntType(att->attlen * 8), 0); + LLVMPointerType(LLVMIntType(att->attlen * 8), 0); v_tmp_loaddata = LLVMBuildPointerCast(b, v_attdatap, vartypep, ""); diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c index daefe66f40..8a515849c8 100644 --- a/src/backend/jit/llvm/llvmjit_expr.c +++ b/src/backend/jit/llvm/llvmjit_expr.c @@ -49,19 +49,19 @@ typedef struct CompiledExprState { LLVMJitContext *context; const char *funcname; -} CompiledExprState; +} CompiledExprState; static Datum ExecRunCompiledExpr(ExprState *state, ExprContext *econtext, bool *isNull); -static LLVMValueRef BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b, +static LLVMValueRef BuildV1Call(LLVMJitContext * context, LLVMBuilderRef b, LLVMModuleRef mod, FunctionCallInfo fcinfo, - LLVMValueRef *v_fcinfo_isnull); + LLVMValueRef * v_fcinfo_isnull); static LLVMValueRef build_EvalXFuncInt(LLVMBuilderRef b, LLVMModuleRef mod, const char *funcname, LLVMValueRef v_state, ExprEvalStep *op, - int natts, LLVMValueRef *v_args); + int natts, LLVMValueRef * v_args); static LLVMValueRef create_LifetimeEnd(LLVMModuleRef mod); /* macro making it easier to call ExecEval* functions */ @@ -1047,7 +1047,7 @@ llvm_compile_expr(ExprState *state) else { LLVMValueRef v_value = - LLVMBuildLoad(b, v_resvaluep, ""); + LLVMBuildLoad(b, v_resvaluep, ""); v_value = LLVMBuildZExt(b, LLVMBuildICmp(b, LLVMIntEQ, @@ -2464,9 +2464,9 @@ ExecRunCompiledExpr(ExprState *state, ExprContext *econtext, bool *isNull) } static LLVMValueRef -BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b, +BuildV1Call(LLVMJitContext * context, LLVMBuilderRef b, LLVMModuleRef mod, FunctionCallInfo fcinfo, - LLVMValueRef *v_fcinfo_isnull) + LLVMValueRef * v_fcinfo_isnull) { LLVMValueRef v_fn; LLVMValueRef v_fcinfo_isnullp; @@ -2512,7 +2512,7 @@ BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b, static LLVMValueRef build_EvalXFuncInt(LLVMBuilderRef b, LLVMModuleRef mod, const char *funcname, LLVMValueRef v_state, ExprEvalStep *op, - int nargs, LLVMValueRef *v_args) + int nargs, LLVMValueRef * v_args) { LLVMValueRef v_fn = llvm_pg_func(mod, funcname); LLVMValueRef *params; diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c index 7f52e1ee23..43d45810cd 100644 --- a/src/backend/libpq/be-secure-gssapi.c +++ b/src/backend/libpq/be-secure-gssapi.c @@ -527,8 +527,8 @@ secure_open_gssapi(Port *port) /* * Use the configured keytab, if there is one. As we now require MIT - * Kerberos, we might consider using the credential store extensions in the - * future instead of the environment variable. + * Kerberos, we might consider using the credential store extensions in + * the future instead of the environment variable. */ if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0') { diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 685aa2ed69..69662099cd 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -1104,8 +1104,8 @@ prepare_cert_name(char *name) if (namelen > MAXLEN) { /* - * Keep the end of the name, not the beginning, since the most specific - * field is likely to give users the most information. + * Keep the end of the name, not the beginning, since the most + * specific field is likely to give users the most information. */ truncated = name + namelen - MAXLEN; truncated[0] = truncated[1] = truncated[2] = '.'; @@ -1165,8 +1165,8 @@ verify_cb(int ok, X509_STORE_CTX *ctx) /* * Get the Subject and Issuer for logging, but don't let maliciously - * huge certs flood the logs, and don't reflect non-ASCII bytes into it - * either. + * huge certs flood the logs, and don't reflect non-ASCII bytes into + * it either. */ subject = X509_NAME_to_cstring(X509_get_subject_name(cert)); sub_prepared = prepare_cert_name(subject); diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index d786a01835..1ef113649f 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -2693,8 +2693,9 @@ load_hba(void) if (!ok) { /* - * File contained one or more errors, so bail out. MemoryContextDelete - * is enough to clean up everything, including regexes. + * File contained one or more errors, so bail out. + * MemoryContextDelete is enough to clean up everything, including + * regexes. */ MemoryContextDelete(hbacxt); return false; @@ -3056,8 +3057,9 @@ load_ident(void) if (!ok) { /* - * File contained one or more errors, so bail out. MemoryContextDelete - * is enough to clean up everything, including regexes. + * File contained one or more errors, so bail out. + * MemoryContextDelete is enough to clean up everything, including + * regexes. */ MemoryContextDelete(ident_context); return false; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 0a2562c149..320c9db432 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -2011,7 +2011,7 @@ cost_incremental_sort(Path *path, { PathKey *key = (PathKey *) lfirst(l); EquivalenceMember *member = (EquivalenceMember *) - linitial(key->pk_eclass->ec_members); + linitial(key->pk_eclass->ec_members); /* * Check if the expression contains Var with "varno 0" so that we diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 1812db7f2f..9148fe3054 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -1738,7 +1738,7 @@ set_customscan_references(PlannerInfo *root, static int register_partpruneinfo(PlannerInfo *root, int part_prune_index) { - PlannerGlobal *glob = root->glob; + PlannerGlobal *glob = root->glob; PartitionPruneInfo *pruneinfo; Assert(part_prune_index >= 0 && diff --git a/src/backend/optimizer/util/appendinfo.c b/src/backend/optimizer/util/appendinfo.c index c1b1557570..f456b3b0a4 100644 --- a/src/backend/optimizer/util/appendinfo.c +++ b/src/backend/optimizer/util/appendinfo.c @@ -370,7 +370,7 @@ adjust_appendrel_attrs_mutator(Node *node, if (leaf_relid) { RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *) - list_nth(context->root->row_identity_vars, var->varattno - 1); + list_nth(context->root->row_identity_vars, var->varattno - 1); if (bms_is_member(leaf_relid, ridinfo->rowidrels)) { diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 68fd033595..a1026139d5 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -1133,7 +1133,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, { /* UPDATE/DELETE/MERGE row identity vars are always needed */ RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *) - list_nth(root->row_identity_vars, var->varattno - 1); + list_nth(root->row_identity_vars, var->varattno - 1); /* Update reltarget width estimate from RowIdentityVarInfo */ joinrel->reltarget->width += ridinfo->rowidwidth; diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index 64356436ef..a2f4d66ddb 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -3297,7 +3297,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format, if (format->format_type == JS_FORMAT_JSON) { JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ? - format->encoding : JS_ENC_UTF8; + format->encoding : JS_ENC_UTF8; if (targettype != BYTEAOID && format->encoding != JS_ENC_DEFAULT) diff --git a/src/backend/parser/parse_merge.c b/src/backend/parser/parse_merge.c index d8866373b8..91b1156d99 100644 --- a/src/backend/parser/parse_merge.c +++ b/src/backend/parser/parse_merge.c @@ -165,8 +165,8 @@ transformMergeStmt(ParseState *pstate, MergeStmt *stmt) /* * Set up the MERGE target table. The target table is added to the - * namespace below and to joinlist in transform_MERGE_to_join, so don't - * do it here. + * namespace below and to joinlist in transform_MERGE_to_join, so don't do + * it here. */ qry->resultRelation = setTargetTable(pstate, stmt->relation, stmt->relation->inh, diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index b0f6fe4fa6..c8d9728362 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -995,7 +995,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) { aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TYPE, RelationGetRelationName(relation)); @@ -2357,7 +2357,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) * mentioned above. */ Datum attoptions = - get_attoptions(RelationGetRelid(index_rel), i + 1); + get_attoptions(RelationGetRelid(index_rel), i + 1); defopclass = GetDefaultOpClass(attform->atttypid, index_rel->rd_rel->relam); diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index cf1156b842..6158938d05 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -3193,7 +3193,7 @@ check_new_partition_bound(char *relname, Relation parent, * datums list. */ PartitionRangeDatum *datum = - list_nth(spec->upperdatums, abs(cmpval) - 1); + list_nth(spec->upperdatums, abs(cmpval) - 1); /* * The new partition overlaps with the diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c index 509587636e..6f9c2765d6 100644 --- a/src/backend/postmaster/fork_process.c +++ b/src/backend/postmaster/fork_process.c @@ -58,8 +58,8 @@ fork_process(void) /* * We start postmaster children with signals blocked. This allows them to * install their own handlers before unblocking, to avoid races where they - * might run the postmaster's handler and miss an important control signal. - * With more analysis this could potentially be relaxed. + * might run the postmaster's handler and miss an important control + * signal. With more analysis this could potentially be relaxed. */ sigprocmask(SIG_SETMASK, &BlockSig, &save_mask); result = fork(); diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c index 38c09b1123..9087ef95af 100644 --- a/src/backend/regex/regc_lex.c +++ b/src/backend/regex/regc_lex.c @@ -759,6 +759,7 @@ lexescape(struct vars *v) RETV(PLAIN, c); break; default: + /* * Throw an error for unrecognized ASCII alpha escape sequences, * which reserves them for future use if needed. diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 052505e46f..dc9c5c82d9 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -259,7 +259,7 @@ libpqrcv_check_conninfo(const char *conninfo, bool must_use_password) if (must_use_password) { - bool uses_password = false; + bool uses_password = false; for (opt = opts; opt->keyword != NULL; ++opt) { diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index beef399b42..d91055a440 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -155,7 +155,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_PARAMETER_CHANGE: { xl_parameter_change *xlrec = - (xl_parameter_change *) XLogRecGetData(buf->record); + (xl_parameter_change *) XLogRecGetData(buf->record); /* * If wal_level on the primary is reduced to less than @@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) * invalidated when this WAL record is replayed; and further, * slot creation fails when wal_level is not sufficient; but * all these operations are not synchronized, so a logical - * slot may creep in while the wal_level is being - * reduced. Hence this extra check. + * slot may creep in while the wal_level is being reduced. + * Hence this extra check. */ if (xlrec->wal_level < WAL_LEVEL_LOGICAL) { @@ -752,7 +752,7 @@ DecodePrepare(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, SnapBuild *builder = ctx->snapshot_builder; XLogRecPtr origin_lsn = parsed->origin_lsn; TimestampTz prepare_time = parsed->xact_time; - RepOriginId origin_id = XLogRecGetOrigin(buf->record); + RepOriginId origin_id = XLogRecGetOrigin(buf->record); int i; TransactionId xid = parsed->twophase_xid; @@ -828,7 +828,7 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, int i; XLogRecPtr origin_lsn = InvalidXLogRecPtr; TimestampTz abort_time = parsed->xact_time; - RepOriginId origin_id = XLogRecGetOrigin(buf->record); + RepOriginId origin_id = XLogRecGetOrigin(buf->record); bool skip_xact; if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN) diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 7e1f677f7a..41243d0187 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin, MemoryContext old_context; /* - * On a standby, this check is also required while creating the - * slot. Check the comments in the function. + * On a standby, this check is also required while creating the slot. + * Check the comments in the function. */ CheckLogicalDecodingRequirements(); diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 2c04c8707d..b0255ffd25 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -833,7 +833,7 @@ replorigin_redo(XLogReaderState *record) case XLOG_REPLORIGIN_SET: { xl_replorigin_set *xlrec = - (xl_replorigin_set *) XLogRecGetData(record); + (xl_replorigin_set *) XLogRecGetData(record); replorigin_advance(xlrec->node_id, xlrec->remote_lsn, record->EndRecPtr, diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 9f44974473..828613d325 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -1408,7 +1408,7 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state) { dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node); ReorderBufferChange *next_change = - dlist_container(ReorderBufferChange, node, next); + dlist_container(ReorderBufferChange, node, next); /* txn stays the same */ state->entries[off].lsn = next_change->lsn; @@ -1439,8 +1439,8 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state) { /* successfully restored changes from disk */ ReorderBufferChange *next_change = - dlist_head_element(ReorderBufferChange, node, - &entry->txn->changes); + dlist_head_element(ReorderBufferChange, node, + &entry->txn->changes); elog(DEBUG2, "restored %u/%u changes from disk", (uint32) entry->txn->nentries_mem, @@ -1582,7 +1582,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) dclist_delete_from(&rb->catchange_txns, &txn->catchange_node); /* now remove reference from buffer */ - hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found); + hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found); Assert(found); /* remove entries spilled to disk */ @@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb) ReorderBufferTXN *txn; /* - * Bail out if logical_replication_mode is buffered and we haven't exceeded - * the memory limit. + * Bail out if logical_replication_mode is buffered and we haven't + * exceeded the memory limit. */ if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED && rb->size < logical_decoding_work_mem * 1024L) @@ -3841,7 +3841,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, { char *data; Size inval_size = sizeof(SharedInvalidationMessage) * - change->data.inval.ninvalidations; + change->data.inval.ninvalidations; sz += inval_size; @@ -4206,7 +4206,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, dlist_foreach_modify(cleanup_iter, &txn->changes) { ReorderBufferChange *cleanup = - dlist_container(ReorderBufferChange, node, cleanup_iter.cur); + dlist_container(ReorderBufferChange, node, cleanup_iter.cur); dlist_delete(&cleanup->node); ReorderBufferReturnChange(rb, cleanup, true); @@ -4431,7 +4431,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, case REORDER_BUFFER_CHANGE_INVALIDATION: { Size inval_size = sizeof(SharedInvalidationMessage) * - change->data.inval.ninvalidations; + change->data.inval.ninvalidations; change->data.inval.invalidations = MemoryContextAlloc(rb->context, inval_size); @@ -4936,7 +4936,7 @@ ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn) dlist_foreach_modify(it, &ent->chunks) { ReorderBufferChange *change = - dlist_container(ReorderBufferChange, node, it.cur); + dlist_container(ReorderBufferChange, node, it.cur); dlist_delete(&change->node); ReorderBufferReturnChange(rb, change, true); diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 62542827e4..0786bb0ab7 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -574,7 +574,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder) Assert(builder->building_full_snapshot); /* don't allow older snapshots */ - InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */ + InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */ if (HaveRegisteredOrActiveSnapshot()) elog(ERROR, "cannot build an initial slot snapshot when snapshots exist"); Assert(!HistoricSnapshotActive()); @@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn */ /* - * xl_running_xacts record is older than what we can use, we might not have - * all necessary catalog rows anymore. + * xl_running_xacts record is older than what we can use, we might not + * have all necessary catalog rows anymore. */ if (TransactionIdIsNormal(builder->initial_xmin_horizon) && NormalTransactionIdPrecedes(running->oldestRunningXid, diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 0c71ae9ba7..c56d42dcd2 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -563,7 +563,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) * the lock. */ int nsyncworkers = - logicalrep_sync_worker_count(MyLogicalRepWorker->subid); + logicalrep_sync_worker_count(MyLogicalRepWorker->subid); /* Now safe to release the LWLock */ LWLockRelease(LogicalRepWorkerLock); diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 37bb884127..b655c24d0b 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -2396,7 +2396,7 @@ apply_handle_insert(StringInfo s) LogicalRepRelMapEntry *rel; LogicalRepTupleData newtup; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; TupleTableSlot *remoteslot; @@ -2544,7 +2544,7 @@ apply_handle_update(StringInfo s) { LogicalRepRelMapEntry *rel; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; LogicalRepTupleData oldtup; @@ -2729,7 +2729,7 @@ apply_handle_delete(StringInfo s) LogicalRepRelMapEntry *rel; LogicalRepTupleData oldtup; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; TupleTableSlot *remoteslot; @@ -3076,8 +3076,8 @@ apply_handle_tuple_routing(ApplyExecutionData *edata, if (map) { TupleConversionMap *PartitionToRootMap = - convert_tuples_by_name(RelationGetDescr(partrel), - RelationGetDescr(parentrel)); + convert_tuples_by_name(RelationGetDescr(partrel), + RelationGetDescr(parentrel)); remoteslot = execute_attr_map_slot(PartitionToRootMap->attrMap, @@ -3411,7 +3411,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush, dlist_foreach_modify(iter, &lsn_mapping) { FlushPosition *pos = - dlist_container(FlushPosition, node, iter.cur); + dlist_container(FlushPosition, node, iter.cur); *write = pos->remote_end; @@ -4695,11 +4695,11 @@ ApplyWorkerMain(Datum main_arg) ereport(DEBUG1, (errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s", - MySubscription->name, - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" : - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" : - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" : - "?"))); + MySubscription->name, + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" : + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" : + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" : + "?"))); } else { @@ -5073,10 +5073,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo) } /* - * If we are processing this transaction using a parallel apply worker then - * either we send the changes to the parallel worker or if the worker is busy - * then serialize the changes to the file which will later be processed by - * the parallel worker. + * If we are processing this transaction using a parallel apply worker + * then either we send the changes to the parallel worker or if the worker + * is busy then serialize the changes to the file which will later be + * processed by the parallel worker. */ *winfo = pa_find_worker(xid); @@ -5090,9 +5090,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo) } /* - * If there is no parallel worker involved to process this transaction then - * we either directly apply the change or serialize it to a file which will - * later be applied when the transaction finish message is processed. + * If there is no parallel worker involved to process this transaction + * then we either directly apply the change or serialize it to a file + * which will later be applied when the transaction finish message is + * processed. */ else if (in_streamed_transaction) { diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index f88389de84..b08ca55041 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications, * are multiple lists (one for each operation) to which row filters will * be appended. * - * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row - * filter expression" so it takes precedence. + * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter + * expression" so it takes precedence. */ foreach(lc, publications) { diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 889e20b5dd..a8a2f8f1b9 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -330,7 +330,7 @@ static void SyncRepQueueInsert(int mode) { dlist_head *queue; - dlist_iter iter; + dlist_iter iter; Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE); queue = &WalSndCtl->SyncRepQueue[mode]; @@ -879,7 +879,7 @@ SyncRepWakeQueue(bool all, int mode) dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode]) { - PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur); + PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur); /* * Assume the queue is ordered by LSN diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index 980dc1816f..0e4f76efa8 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -3548,7 +3548,7 @@ rewriteTargetView(Query *parsetree, Relation view) if (parsetree->withCheckOptions != NIL) { WithCheckOption *parent_wco = - (WithCheckOption *) linitial(parsetree->withCheckOptions); + (WithCheckOption *) linitial(parsetree->withCheckOptions); if (parent_wco->cascaded) { diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index 569c1c9467..5c3fe4eda2 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -581,7 +581,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_restrictive) { List *hook_policies = - (*row_security_policy_hook_restrictive) (cmd, relation); + (*row_security_policy_hook_restrictive) (cmd, relation); /* * As with built-in restrictive policies, we sort any hook-provided @@ -603,7 +603,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_permissive) { List *hook_policies = - (*row_security_policy_hook_permissive) (cmd, relation); + (*row_security_policy_hook_permissive) (cmd, relation); foreach(item, hook_policies) { diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index 54e3bb4aa2..28b52d8aa1 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -2237,8 +2237,8 @@ compute_expr_stats(Relation onerel, double totalrows, if (tcnt > 0) { AttributeOpts *aopt = - get_attribute_options(stats->attr->attrelid, - stats->attr->attnum); + get_attribute_options(stats->attr->attrelid, + stats->attr->attnum); stats->exprvals = exprvals; stats->exprnulls = exprnulls; diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 1fa689052e..e7a63e295b 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2667,7 +2667,7 @@ BufferSync(int flags) { BufferDesc *bufHdr = NULL; CkptTsStatus *ts_stat = (CkptTsStatus *) - DatumGetPointer(binaryheap_first(ts_heap)); + DatumGetPointer(binaryheap_first(ts_heap)); buf_id = CkptBufferIds[ts_stat->index].buf_id; Assert(buf_id != -1); diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c index 84ead85942..41ab64100e 100644 --- a/src/backend/storage/file/buffile.c +++ b/src/backend/storage/file/buffile.c @@ -98,8 +98,7 @@ struct BufFile /* * XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid - * wasting per-file alignment padding when some users create many - * files. + * wasting per-file alignment padding when some users create many files. */ PGAlignedBlock buffer; }; diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c index f0965c3481..6399fa2ad5 100644 --- a/src/backend/storage/ipc/dsm_impl.c +++ b/src/backend/storage/ipc/dsm_impl.c @@ -357,14 +357,15 @@ dsm_impl_posix_resize(int fd, off_t size) /* * Block all blockable signals, except SIGQUIT. posix_fallocate() can run * for quite a long time, and is an all-or-nothing operation. If we - * allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery - * conflicts), the retry loop might never succeed. + * allowed SIGUSR1 to interrupt us repeatedly (for example, due to + * recovery conflicts), the retry loop might never succeed. */ if (IsUnderPostmaster) sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask); pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE); #if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__) + /* * On Linux, a shm_open fd is backed by a tmpfs file. If we were to use * ftruncate, the file would contain a hole. Accessing memory backed by a @@ -374,8 +375,8 @@ dsm_impl_posix_resize(int fd, off_t size) * SIGBUS later. * * We still use a traditional EINTR retry loop to handle SIGCONT. - * posix_fallocate() doesn't restart automatically, and we don't want - * this to fail if you attach a debugger. + * posix_fallocate() doesn't restart automatically, and we don't want this + * to fail if you attach a debugger. */ do { @@ -383,9 +384,9 @@ dsm_impl_posix_resize(int fd, off_t size) } while (rc == EINTR); /* - * The caller expects errno to be set, but posix_fallocate() doesn't - * set it. Instead it returns error numbers directly. So set errno, - * even though we'll also return rc to indicate success or failure. + * The caller expects errno to be set, but posix_fallocate() doesn't set + * it. Instead it returns error numbers directly. So set errno, even + * though we'll also return rc to indicate success or failure. */ errno = rc; #else diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 42595b38b2..193f50fc0f 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -3936,6 +3936,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data) dclist_foreach(proc_iter, waitQueue) { PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur); + if (queued_proc == blocked_proc) break; data->waiter_pids[data->npids++] = queued_proc->pid; diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index d2ec396045..4fb4280f05 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -1118,9 +1118,9 @@ LWLockDequeueSelf(LWLock *lock) LWLockWaitListLock(lock); /* - * Remove ourselves from the waitlist, unless we've already been - * removed. The removal happens with the wait list lock held, so there's - * no race in this check. + * Remove ourselves from the waitlist, unless we've already been removed. + * The removal happens with the wait list lock held, so there's no race in + * this check. */ on_waitlist = MyProc->lwWaiting == LW_WS_WAITING; if (on_waitlist) diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 203b189559..533f616541 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -625,7 +625,7 @@ RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer) dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (conflict->sxactIn == writer) return true; @@ -708,7 +708,7 @@ FlagSxactUnsafe(SERIALIZABLEXACT *sxact) dlist_foreach_modify(iter, &sxact->possibleUnsafeConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); Assert(!SxactIsReadOnly(conflict->sxactOut)); Assert(sxact == conflict->sxactIn); @@ -1587,7 +1587,7 @@ GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size) dlist_foreach(iter, &blocking_sxact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); output[num_written++] = possibleUnsafeConflict->sxactOut->pid; @@ -1825,8 +1825,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot, /* * If we didn't find any possibly unsafe conflicts because every * uncommitted writable transaction turned out to be doomed, then we - * can "opt out" immediately. See comments above the earlier check for - * PredXact->WritableSxactCount == 0. + * can "opt out" immediately. See comments above the earlier check + * for PredXact->WritableSxactCount == 0. */ if (dlist_is_empty(&sxact->possibleUnsafeConflicts)) { @@ -2613,7 +2613,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); bool found; dlist_delete(&(predlock->xactLink)); @@ -2754,7 +2754,7 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag, dlist_foreach_modify(iter, &oldtarget->predicateLocks) { PREDICATELOCK *oldpredlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); PREDICATELOCK *newpredlock; SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo; @@ -2976,7 +2976,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer) dlist_foreach_modify(iter, &oldtarget->predicateLocks) { PREDICATELOCK *oldpredlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); PREDICATELOCK *newpredlock; SerCommitSeqNo oldCommitSeqNo; SERIALIZABLEXACT *oldXact; @@ -3194,7 +3194,7 @@ SetNewSxactGlobalXmin(void) dlist_foreach(iter, &PredXact->activeList) { SERIALIZABLEXACT *sxact = - dlist_container(SERIALIZABLEXACT, xactLink, iter.cur); + dlist_container(SERIALIZABLEXACT, xactLink, iter.cur); if (!SxactIsRolledBack(sxact) && !SxactIsCommitted(sxact) @@ -3440,7 +3440,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut)); Assert(MySerializableXact == possibleUnsafeConflict->sxactIn); @@ -3471,7 +3471,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (isCommit && !SxactIsReadOnly(MySerializableXact) @@ -3496,7 +3496,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->inConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); if (!isCommit || SxactIsCommitted(conflict->sxactOut) @@ -3515,7 +3515,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); roXact = possibleUnsafeConflict->sxactIn; Assert(MySerializableXact == possibleUnsafeConflict->sxactOut); @@ -3564,8 +3564,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) * xmin and purge any transactions which finished before this transaction * was launched. * - * For parallel queries in read-only transactions, it might run twice. - * We only release the reference on the first call. + * For parallel queries in read-only transactions, it might run twice. We + * only release the reference on the first call. */ needToClear = false; if ((partiallyReleasing || @@ -3641,7 +3641,7 @@ ClearOldPredicateLocks(void) dlist_foreach_modify(iter, FinishedSerializableTransactions) { SERIALIZABLEXACT *finishedSxact = - dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur); + dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur); if (!TransactionIdIsValid(PredXact->SxactGlobalXmin) || TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore, @@ -3700,7 +3700,7 @@ ClearOldPredicateLocks(void) dlist_foreach_modify(iter, &OldCommittedSxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); bool canDoPartialCleanup; LWLockAcquire(SerializableXactHashLock, LW_SHARED); @@ -3787,7 +3787,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); PREDICATELOCKTAG tag; PREDICATELOCKTARGET *target; PREDICATELOCKTARGETTAG targettag; @@ -3864,7 +3864,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (summarize) conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN; @@ -3876,7 +3876,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->inConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); if (summarize) conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT; @@ -4134,7 +4134,7 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); SERIALIZABLEXACT *sxact = predlock->tag.myXact; if (sxact == MySerializableXact) @@ -4407,7 +4407,7 @@ CheckTableForSerializableConflictIn(Relation relation) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); if (predlock->tag.myXact != MySerializableXact && !RWConflictExists(predlock->tag.myXact, MySerializableXact)) @@ -4519,7 +4519,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, dlist_foreach(iter, &writer->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); SERIALIZABLEXACT *t2 = conflict->sxactIn; if (SxactIsPrepared(t2) @@ -4566,7 +4566,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->inConflicts) { const RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); const SERIALIZABLEXACT *t0 = conflict->sxactOut; if (!SxactIsDoomed(t0) @@ -4664,7 +4664,7 @@ PreCommit_CheckForSerializationFailure(void) dlist_foreach(near_iter, &MySerializableXact->inConflicts) { RWConflict nearConflict = - dlist_container(RWConflictData, inLink, near_iter.cur); + dlist_container(RWConflictData, inLink, near_iter.cur); if (!SxactIsCommitted(nearConflict->sxactOut) && !SxactIsDoomed(nearConflict->sxactOut)) @@ -4674,7 +4674,7 @@ PreCommit_CheckForSerializationFailure(void) dlist_foreach(far_iter, &nearConflict->sxactOut->inConflicts) { RWConflict farConflict = - dlist_container(RWConflictData, inLink, far_iter.cur); + dlist_container(RWConflictData, inLink, far_iter.cur); if (farConflict->sxactOut == MySerializableXact || (!SxactIsCommitted(farConflict->sxactOut) @@ -4770,7 +4770,7 @@ AtPrepare_PredicateLocks(void) dlist_foreach(iter, &sxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); record.type = TWOPHASEPREDICATERECORD_LOCK; lockRecord->target = predlock->tag.myTarget->tag; diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 22b4278610..dac921219f 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -101,7 +101,7 @@ ProcGlobalShmemSize(void) { Size size = 0; Size TotalProcs = - add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts)); + add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts)); /* ProcGlobal */ size = add_size(size, sizeof(PROC_HDR)); @@ -331,7 +331,7 @@ InitProcess(void) if (!dlist_is_empty(procgloballist)) { - MyProc = (PGPROC*) dlist_pop_head_node(procgloballist); + MyProc = (PGPROC *) dlist_pop_head_node(procgloballist); SpinLockRelease(ProcStructLock); } else @@ -1009,7 +1009,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) uint32 hashcode = locallock->hashcode; LWLock *partitionLock = LockHashPartitionLock(hashcode); dclist_head *waitQueue = &lock->waitProcs; - PGPROC *insert_before = NULL; + PGPROC *insert_before = NULL; LOCKMASK myHeldLocks = MyProc->heldLocks; TimestampTz standbyWaitStart = 0; bool early_deadlock = false; @@ -1244,7 +1244,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) if (InHotStandby) { bool maybe_log_conflict = - (standbyWaitStart != 0 && !logged_recovery_conflict); + (standbyWaitStart != 0 && !logged_recovery_conflict); /* Set a timer and wait for that or for the lock to be granted */ ResolveRecoveryConflictWithLock(locallock->tag.lock, diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index e982a8dd7f..acd75b40d2 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -549,7 +549,7 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, while (remblocks > 0) { - BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE); + BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE); off_t seekpos = (off_t) BLCKSZ * segstartblock; int numblocks; @@ -597,9 +597,9 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, /* * Even if we don't want to use fallocate, we can still extend a * bit more efficiently than writing each 8kB block individually. - * pg_pwrite_zeros() (via FileZero()) uses - * pg_pwritev_with_retry() to avoid multiple writes or needing a - * zeroed buffer for the whole length of the extension. + * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry() + * to avoid multiple writes or needing a zeroed buffer for the + * whole length of the extension. */ ret = FileZero(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * numblocks, diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c index fe4fd3a929..8a2cb55876 100644 --- a/src/backend/tsearch/spell.c +++ b/src/backend/tsearch/spell.c @@ -2256,7 +2256,7 @@ NormalizeSubWord(IspellDict *Conf, char *word, int flag) { /* prefix success */ char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ? - VoidString : prefix->aff[j]->flag; + VoidString : prefix->aff[j]->flag; if (FindWord(Conf, pnewword, ff, flag)) cur += addToResult(forms, cur, pnewword); diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c index b125802b21..2f6fccc567 100644 --- a/src/backend/utils/activity/pgstat.c +++ b/src/backend/utils/activity/pgstat.c @@ -1169,7 +1169,7 @@ pgstat_flush_pending_entries(bool nowait) while (cur) { PgStat_EntryRef *entry_ref = - dlist_container(PgStat_EntryRef, pending_node, cur); + dlist_container(PgStat_EntryRef, pending_node, cur); PgStat_HashKey key = entry_ref->shared_entry->key; PgStat_Kind kind = key.kind; const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind); diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c index 09fffd0e82..d1149adf70 100644 --- a/src/backend/utils/activity/pgstat_shmem.c +++ b/src/backend/utils/activity/pgstat_shmem.c @@ -865,7 +865,7 @@ pgstat_drop_entry(PgStat_Kind kind, Oid dboid, Oid objoid) if (pgStatEntryRefHash) { PgStat_EntryRefHashEntry *lohashent = - pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key); + pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key); if (lohashent) pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref, diff --git a/src/backend/utils/activity/pgstat_xact.c b/src/backend/utils/activity/pgstat_xact.c index 91cdd9222e..369239d501 100644 --- a/src/backend/utils/activity/pgstat_xact.c +++ b/src/backend/utils/activity/pgstat_xact.c @@ -76,7 +76,7 @@ AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit) dclist_foreach_modify(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); xl_xact_stats_item *it = &pending->item; if (isCommit && !pending->is_create) @@ -148,7 +148,7 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, dclist_foreach_modify(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); xl_xact_stats_item *it = &pending->item; dclist_delete_from(&xact_state->pending_drops, &pending->node); @@ -290,7 +290,7 @@ pgstat_get_transactional_drops(bool isCommit, xl_xact_stats_item **items) dclist_foreach(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); if (isCommit && pending->is_create) continue; @@ -335,7 +335,7 @@ create_drop_transactional_internal(PgStat_Kind kind, Oid dboid, Oid objoid, bool int nest_level = GetCurrentTransactionNestLevel(); PgStat_SubXactStatus *xact_state; PgStat_PendingDroppedStatsItem *drop = (PgStat_PendingDroppedStatsItem *) - MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem)); + MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem)); xact_state = pgstat_get_xact_stack_level(nest_level); diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index be2e55bb29..5d8d583ddc 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -4482,17 +4482,17 @@ EncodeInterval(struct pg_itm *itm, int style, char *str) case INTSTYLE_SQL_STANDARD: { bool has_negative = year < 0 || mon < 0 || - mday < 0 || hour < 0 || - min < 0 || sec < 0 || fsec < 0; + mday < 0 || hour < 0 || + min < 0 || sec < 0 || fsec < 0; bool has_positive = year > 0 || mon > 0 || - mday > 0 || hour > 0 || - min > 0 || sec > 0 || fsec > 0; + mday > 0 || hour > 0 || + min > 0 || sec > 0 || fsec > 0; bool has_year_month = year != 0 || mon != 0; bool has_day_time = mday != 0 || hour != 0 || - min != 0 || sec != 0 || fsec != 0; + min != 0 || sec != 0 || fsec != 0; bool has_day = mday != 0; bool sql_standard_value = !(has_negative && has_positive) && - !(has_year_month && has_day_time); + !(has_year_month && has_day_time); /* * SQL Standard wants only 1 "<sign>" preceding the whole diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index 9b51da2382..dfa90a04fb 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -189,8 +189,7 @@ float4in_internal(char *num, char **endptr_p, /* * endptr points to the first character _after_ the sequence we recognized * as a valid floating point number. orig_string points to the original - * input - * string. + * input string. */ /* skip leading whitespace */ diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 4c5abaff25..70cb922e6b 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -3219,9 +3219,9 @@ static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns) { RecordIOData *data = (RecordIOData *) - MemoryContextAlloc(mcxt, - offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + MemoryContextAlloc(mcxt, + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); data->record_type = InvalidOid; data->record_typmod = 0; diff --git a/src/backend/utils/adt/jsonpath.c b/src/backend/utils/adt/jsonpath.c index 0021b01830..7891fde310 100644 --- a/src/backend/utils/adt/jsonpath.c +++ b/src/backend/utils/adt/jsonpath.c @@ -76,7 +76,7 @@ static Datum jsonPathFromCstring(char *in, int len, struct Node *escontext); static char *jsonPathToCstring(StringInfo out, JsonPath *in, int estimated_len); -static bool flattenJsonPathParseItem(StringInfo buf, int *result, +static bool flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, JsonPathParseItem *item, int nestingLevel, bool insideArraySubscript); @@ -234,7 +234,7 @@ jsonPathToCstring(StringInfo out, JsonPath *in, int estimated_len) * children into a binary representation. */ static bool -flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, +flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, JsonPathParseItem *item, int nestingLevel, bool insideArraySubscript) { @@ -306,19 +306,19 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (!item->value.args.left) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.args.left, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.args.left, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + left) = chld - pos; if (!item->value.args.right) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.args.right, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.args.right, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + right) = chld - pos; } @@ -338,10 +338,10 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, item->value.like_regex.patternlen); appendStringInfoChar(buf, '\0'); - if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.like_regex.expr, - nestingLevel, - insideArraySubscript)) + if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.like_regex.expr, + nestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + offs) = chld - pos; } @@ -360,10 +360,10 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (!item->value.arg) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.arg, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.arg, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + arg) = chld - pos; } @@ -405,17 +405,17 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, int32 topos; int32 frompos; - if (! flattenJsonPathParseItem(buf, &frompos, escontext, - item->value.array.elems[i].from, - nestingLevel, true)) + if (!flattenJsonPathParseItem(buf, &frompos, escontext, + item->value.array.elems[i].from, + nestingLevel, true)) return false; frompos -= pos; if (item->value.array.elems[i].to) { - if (! flattenJsonPathParseItem(buf, &topos, escontext, - item->value.array.elems[i].to, - nestingLevel, true)) + if (!flattenJsonPathParseItem(buf, &topos, escontext, + item->value.array.elems[i].to, + nestingLevel, true)) return false; topos -= pos; } @@ -451,9 +451,9 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (item->next) { - if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->next, nestingLevel, - insideArraySubscript)) + if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->next, nestingLevel, + insideArraySubscript)) return false; chld -= pos; *(int32 *) (buf->data + next) = chld; diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c index b561f0e7e8..41430bab7e 100644 --- a/src/backend/utils/adt/jsonpath_exec.c +++ b/src/backend/utils/adt/jsonpath_exec.c @@ -1326,8 +1326,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp, */ JsonValueList vals = {0}; JsonPathExecResult res = - executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, - false, &vals); + executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, + false, &vals); if (jperIsError(res)) return jpbUnknown; @@ -1337,8 +1337,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp, else { JsonPathExecResult res = - executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, - false, NULL); + executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, + false, NULL); if (jperIsError(res)) return jpbUnknown; @@ -1869,7 +1869,7 @@ executeDateTimeMethod(JsonPathExecContext *cxt, JsonPathItem *jsp, if (!fmt_txt[i]) { MemoryContext oldcxt = - MemoryContextSwitchTo(TopMemoryContext); + MemoryContextSwitchTo(TopMemoryContext); fmt_txt[i] = cstring_to_text(fmt_str[i]); MemoryContextSwitchTo(oldcxt); diff --git a/src/backend/utils/adt/jsonpath_internal.h b/src/backend/utils/adt/jsonpath_internal.h index 2e12de038c..90eea6e961 100644 --- a/src/backend/utils/adt/jsonpath_internal.h +++ b/src/backend/utils/adt/jsonpath_internal.h @@ -20,7 +20,7 @@ typedef struct JsonPathString char *val; int len; int total; -} JsonPathString; +} JsonPathString; #include "utils/jsonpath.h" #include "jsonpath_gram.h" @@ -29,8 +29,8 @@ typedef struct JsonPathString JsonPathParseResult **result, \ struct Node *escontext) YY_DECL; -extern int jsonpath_yyparse(JsonPathParseResult **result, - struct Node *escontext); +extern int jsonpath_yyparse(JsonPathParseResult **result, + struct Node *escontext); extern void jsonpath_yyerror(JsonPathParseResult **result, struct Node *escontext, const char *message); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 51df570ce9..bdb285aa3b 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1794,8 +1794,7 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2, else #endif result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p); - if (result == 2147483647) /* _NLSCMPERROR; missing from mingw - * headers */ + if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */ ereport(ERROR, (errmsg("could not compare Unicode strings: %m"))); @@ -1818,14 +1817,15 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2, static int pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale) { - int result; + int result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); #ifdef WIN32 if (GetDatabaseEncoding() == PG_UTF8) { - size_t len1 = strlen(arg1); - size_t len2 = strlen(arg2); + size_t len1 = strlen(arg1); + size_t len2 = strlen(arg2); + result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale); } else @@ -1854,13 +1854,13 @@ static int pg_strncoll_libc(const char *arg1, size_t len1, const char *arg2, size_t len2, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - size_t bufsize1 = len1 + 1; - size_t bufsize2 = len2 + 1; - char *arg1n; - char *arg2n; - int result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + size_t bufsize1 = len1 + 1; + size_t bufsize2 = len2 + 1; + char *arg1n; + char *arg2n; + int result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); @@ -1906,15 +1906,15 @@ static int pg_strncoll_icu_no_utf8(const char *arg1, int32_t len1, const char *arg2, int32_t len2, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - int32_t ulen1; - int32_t ulen2; - size_t bufsize1; - size_t bufsize2; - UChar *uchar1, - *uchar2; - int result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + int32_t ulen1; + int32_t ulen2; + size_t bufsize1; + size_t bufsize2; + UChar *uchar1, + *uchar2; + int result; Assert(locale->provider == COLLPROVIDER_ICU); #ifdef HAVE_UCOL_STRCOLLUTF8 @@ -1961,7 +1961,7 @@ static int pg_strncoll_icu(const char *arg1, int32_t len1, const char *arg2, int32_t len2, pg_locale_t locale) { - int result; + int result; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2042,7 +2042,7 @@ int pg_strncoll(const char *arg1, size_t len1, const char *arg2, size_t len2, pg_locale_t locale) { - int result; + int result; if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strncoll_libc(arg1, len1, arg2, len2, locale); @@ -2074,7 +2074,7 @@ pg_strxfrm_libc(char *dest, const char *src, size_t destsize, #else /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return 0; /* keep compiler quiet */ + return 0; /* keep compiler quiet */ #endif } @@ -2082,10 +2082,10 @@ static size_t pg_strnxfrm_libc(char *dest, const char *src, size_t srclen, size_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - size_t bufsize = srclen + 1; - size_t result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + size_t bufsize = srclen + 1; + size_t result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); @@ -2114,12 +2114,12 @@ static size_t pg_strnxfrm_icu(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - UChar *uchar; - int32_t ulen; - size_t uchar_bsize; - Size result_bsize; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + UChar *uchar; + int32_t ulen; + size_t uchar_bsize; + Size result_bsize; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2161,15 +2161,15 @@ static size_t pg_strnxfrm_prefix_icu_no_utf8(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - UCharIterator iter; - uint32_t state[2]; - UErrorCode status; - int32_t ulen = -1; - UChar *uchar = NULL; - size_t uchar_bsize; - Size result_bsize; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + UCharIterator iter; + uint32_t state[2]; + UErrorCode status; + int32_t ulen = -1; + UChar *uchar = NULL; + size_t uchar_bsize; + Size result_bsize; Assert(locale->provider == COLLPROVIDER_ICU); Assert(GetDatabaseEncoding() != PG_UTF8); @@ -2209,7 +2209,7 @@ static size_t pg_strnxfrm_prefix_icu(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - size_t result; + size_t result; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2271,7 +2271,7 @@ pg_strxfrm_enabled(pg_locale_t locale) /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } /* @@ -2291,7 +2291,7 @@ pg_strxfrm_enabled(pg_locale_t locale) size_t pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strxfrm_libc(dest, src, destsize, locale); @@ -2328,7 +2328,7 @@ size_t pg_strnxfrm(char *dest, size_t destsize, const char *src, size_t srclen, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strnxfrm_libc(dest, src, srclen, destsize, locale); @@ -2358,7 +2358,7 @@ pg_strxfrm_prefix_enabled(pg_locale_t locale) /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } /* @@ -2378,7 +2378,7 @@ size_t pg_strxfrm_prefix(char *dest, const char *src, size_t destsize, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) elog(ERROR, "collprovider '%c' does not support pg_strxfrm_prefix()", @@ -2415,7 +2415,7 @@ size_t pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src, size_t srclen, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) elog(ERROR, "collprovider '%c' does not support pg_strnxfrm_prefix()", @@ -2491,7 +2491,7 @@ pg_ucol_open(const char *loc_str) collator = ucol_open(loc_str, &status); if (U_FAILURE(status)) ereport(ERROR, - /* use original string for error report */ + /* use original string for error report */ (errmsg("could not open collator for locale \"%s\": %s", orig_str, u_errorName(status)))); @@ -2554,6 +2554,7 @@ uchar_length(UConverter *converter, const char *str, int32_t len) { UErrorCode status = U_ZERO_ERROR; int32_t ulen; + ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status); if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR) ereport(ERROR, @@ -2571,6 +2572,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen, { UErrorCode status = U_ZERO_ERROR; int32_t ulen; + status = U_ZERO_ERROR; ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status); if (U_FAILURE(status)) @@ -2594,7 +2596,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen, int32_t icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes) { - int32_t len_uchar; + int32_t len_uchar; init_icu_converter(); @@ -2781,11 +2783,11 @@ char * icu_language_tag(const char *loc_str, int elevel) { #ifdef USE_ICU - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - char *langtag; - size_t buflen = 32; /* arbitrary starting buffer size */ - const bool strict = true; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + char *langtag; + size_t buflen = 32; /* arbitrary starting buffer size */ + const bool strict = true; status = U_ZERO_ERROR; uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status); @@ -2803,8 +2805,8 @@ icu_language_tag(const char *loc_str, int elevel) return pstrdup("en-US-u-va-posix"); /* - * A BCP47 language tag doesn't have a clearly-defined upper limit - * (cf. RFC5646 section 4.4). Additionally, in older ICU versions, + * A BCP47 language tag doesn't have a clearly-defined upper limit (cf. + * RFC5646 section 4.4). Additionally, in older ICU versions, * uloc_toLanguageTag() doesn't always return the ultimate length on the * first call, necessitating a loop. */ @@ -2850,7 +2852,7 @@ icu_language_tag(const char *loc_str, int elevel) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ICU is not supported in this build"))); - return NULL; /* keep compiler quiet */ + return NULL; /* keep compiler quiet */ #endif /* not USE_ICU */ } @@ -2861,11 +2863,11 @@ void icu_validate_locale(const char *loc_str) { #ifdef USE_ICU - UCollator *collator; - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - bool found = false; - int elevel = icu_validation_level; + UCollator *collator; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + bool found = false; + int elevel = icu_validation_level; /* no validation */ if (elevel < 0) @@ -2896,8 +2898,8 @@ icu_validate_locale(const char *loc_str) /* search for matching language within ICU */ for (int32_t i = 0; !found && i < uloc_countAvailable(); i++) { - const char *otherloc = uloc_getAvailable(i); - char otherlang[ULOC_LANG_CAPACITY]; + const char *otherloc = uloc_getAvailable(i); + char otherlang[ULOC_LANG_CAPACITY]; status = U_ZERO_ERROR; uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 461735e84f..bcedfcfad2 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -12445,7 +12445,7 @@ get_range_partbound_string(List *bound_datums) foreach(cell, bound_datums) { PartitionRangeDatum *datum = - lfirst_node(PartitionRangeDatum, cell); + lfirst_node(PartitionRangeDatum, cell); appendStringInfoString(buf, sep); if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE) diff --git a/src/backend/utils/adt/tsquery_op.c b/src/backend/utils/adt/tsquery_op.c index 7e3bd51c1f..2bc4ec904f 100644 --- a/src/backend/utils/adt/tsquery_op.c +++ b/src/backend/utils/adt/tsquery_op.c @@ -150,9 +150,9 @@ Datum tsquery_phrase(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(DirectFunctionCall3(tsquery_phrase_distance, - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1), - Int32GetDatum(1))); + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1), + Int32GetDatum(1))); } Datum diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c index a38db4697d..4457c5d4f9 100644 --- a/src/backend/utils/adt/tsvector_op.c +++ b/src/backend/utils/adt/tsvector_op.c @@ -525,7 +525,7 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete, if (arrin[i].haspos) { int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) - + sizeof(uint16); + + sizeof(uint16); curoff = SHORTALIGN(curoff); memcpy(dataout + curoff, diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 592afc18ec..b92ff4d266 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -1021,7 +1021,8 @@ hashbpchar(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale); @@ -1033,8 +1034,8 @@ hashbpchar(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any((uint8_t *) buf, bsize + 1); @@ -1076,7 +1077,8 @@ hashbpcharextended(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale); @@ -1088,8 +1090,8 @@ hashbpcharextended(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any_extended((uint8_t *) buf, bsize + 1, PG_GETARG_INT64(1)); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index b571876468..884bfbc8ce 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -2312,8 +2312,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) memcpy(sss->buf1, authoritative_data, len); /* - * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated - * strings. + * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings. */ sss->buf1[len] = '\0'; sss->last_len1 = len; @@ -4523,7 +4522,7 @@ text_to_array(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID)); PG_RETURN_DATUM(makeArrayResult(tstate.astate, - CurrentMemoryContext)); + CurrentMemoryContext)); } /* diff --git a/src/backend/utils/adt/xid8funcs.c b/src/backend/utils/adt/xid8funcs.c index 24271dfff7..06ae940df6 100644 --- a/src/backend/utils/adt/xid8funcs.c +++ b/src/backend/utils/adt/xid8funcs.c @@ -519,7 +519,7 @@ pg_snapshot_recv(PG_FUNCTION_ARGS) for (i = 0; i < nxip; i++) { FullTransactionId cur = - FullTransactionIdFromU64((uint64) pq_getmsgint64(buf)); + FullTransactionIdFromU64((uint64) pq_getmsgint64(buf)); if (FullTransactionIdPrecedes(cur, last) || FullTransactionIdPrecedes(cur, xmin) || diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 15adbd6a01..866d0d649a 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -630,7 +630,7 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent) XmlOptionType parsed_xmloptiontype; xmlNodePtr content_nodes; volatile xmlBufferPtr buf = NULL; - volatile xmlSaveCtxtPtr ctxt = NULL; + volatile xmlSaveCtxtPtr ctxt = NULL; ErrorSaveContext escontext = {T_ErrorSaveContext}; PgXmlErrorContext *xmlerrcxt; #endif diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index c7607895cd..60978f9415 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -3603,7 +3603,7 @@ char * get_publication_name(Oid pubid, bool missing_ok) { HeapTuple tup; - char *pubname; + char *pubname; Form_pg_publication pubform; tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid)); @@ -3630,16 +3630,16 @@ get_publication_name(Oid pubid, bool missing_ok) * return InvalidOid. */ Oid -get_subscription_oid(const char* subname, bool missing_ok) +get_subscription_oid(const char *subname, bool missing_ok) { Oid oid; oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid, - MyDatabaseId, CStringGetDatum(subname)); + MyDatabaseId, CStringGetDatum(subname)); if (!OidIsValid(oid) && !missing_ok) ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("subscription \"%s\" does not exist", subname))); + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("subscription \"%s\" does not exist", subname))); return oid; } @@ -3653,7 +3653,7 @@ char * get_subscription_name(Oid subid, bool missing_ok) { HeapTuple tup; - char* subname; + char *subname; Form_pg_subscription subform; tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid)); diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 40140de958..8a08463c2b 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -3084,10 +3084,10 @@ static void AssertPendingSyncConsistency(Relation relation) { bool relcache_verdict = - RelationIsPermanent(relation) && - ((relation->rd_createSubid != InvalidSubTransactionId && - RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) || - relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId); + RelationIsPermanent(relation) && + ((relation->rd_createSubid != InvalidSubTransactionId && + RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) || + relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId); Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator)); @@ -3765,12 +3765,12 @@ RelationSetNewRelfilenumber(Relation relation, char persistence) */ if (IsBinaryUpgrade) { - SMgrRelation srel; + SMgrRelation srel; /* * During a binary upgrade, we use this code path to ensure that - * pg_largeobject and its index have the same relfilenumbers as in - * the old cluster. This is necessary because pg_upgrade treats + * pg_largeobject and its index have the same relfilenumbers as in the + * old cluster. This is necessary because pg_upgrade treats * pg_largeobject like a user table, not a system table. It is however * possible that a table or index may need to end up with the same * relfilenumber in the new cluster as what it had in the old cluster. @@ -5171,8 +5171,8 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind) Bitmapset *uindexattrs; /* columns in unique indexes */ Bitmapset *pkindexattrs; /* columns in the primary index */ Bitmapset *idindexattrs; /* columns in the replica identity */ - Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */ - Bitmapset *summarizedattrs; /* columns with summarizing indexes */ + Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */ + Bitmapset *summarizedattrs; /* columns with summarizing indexes */ List *indexoidlist; List *newindexoidlist; Oid relpkindex; @@ -5314,8 +5314,8 @@ restart: * when the column value changes, thus require a separate * attribute bitmapset. * - * Obviously, non-key columns couldn't be referenced by - * foreign key or identity key. Hence we do not include them into + * Obviously, non-key columns couldn't be referenced by foreign + * key or identity key. Hence we do not include them into * uindexattrs, pkindexattrs and idindexattrs bitmaps. */ if (attrnum != 0) diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index 4c21129707..26575cae6c 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -801,11 +801,11 @@ read_relmap_file(RelMapFile *map, char *dbpath, bool lock_held, int elevel) /* * Open the target file. * - * Because Windows isn't happy about the idea of renaming over a file - * that someone has open, we only open this file after acquiring the lock, - * and for the same reason, we close it before releasing the lock. That - * way, by the time write_relmap_file() acquires an exclusive lock, no - * one else will have it open. + * Because Windows isn't happy about the idea of renaming over a file that + * someone has open, we only open this file after acquiring the lock, and + * for the same reason, we close it before releasing the lock. That way, + * by the time write_relmap_file() acquires an exclusive lock, no one else + * will have it open. */ snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath, RELMAPPER_FILENAME); diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index f72dd25efa..b4b01dc5e0 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -2151,7 +2151,7 @@ CheckFunctionValidatorAccess(Oid validatorOid, Oid functionOid) /* first validate that we have permissions to use the language */ aclresult = object_aclcheck(LanguageRelationId, procStruct->prolang, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(langStruct->lanname)); diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 53420f4974..88434c3e5d 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -362,7 +362,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect */ if (!am_superuser && object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CONNECT) != ACLCHECK_OK) + ACL_CONNECT) != ACLCHECK_OK) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied for database \"%s\"", name), @@ -933,10 +933,10 @@ InitPostgres(const char *in_dbname, Oid dboid, } /* - * The last few connection slots are reserved for superusers and roles with - * privileges of pg_use_reserved_connections. Replication connections are - * drawn from slots reserved with max_wal_senders and are not limited by - * max_connections, superuser_reserved_connections, or + * The last few connection slots are reserved for superusers and roles + * with privileges of pg_use_reserved_connections. Replication + * connections are drawn from slots reserved with max_wal_senders and are + * not limited by max_connections, superuser_reserved_connections, or * reserved_connections. * * Note: At this point, the new backend has already claimed a proc struct, diff --git a/src/backend/utils/init/usercontext.c b/src/backend/utils/init/usercontext.c index 38bcfa60df..dd9a0dd6a8 100644 --- a/src/backend/utils/init/usercontext.c +++ b/src/backend/utils/init/usercontext.c @@ -61,15 +61,15 @@ SwitchToUntrustedUser(Oid userid, UserContext *context) } else { - int sec_context = context->save_sec_context; + int sec_context = context->save_sec_context; /* * This user can SET ROLE to the target user, but not the other way * around, so protect ourselves against the target user by setting * SECURITY_RESTRICTED_OPERATION to prevent certain changes to the - * session state. Also set up a new GUC nest level, so that we can roll - * back any GUC changes that may be made by code running as the target - * user, inasmuch as they could be malicious. + * session state. Also set up a new GUC nest level, so that we can + * roll back any GUC changes that may be made by code running as the + * target user, inasmuch as they could be malicious. */ sec_context |= SECURITY_RESTRICTED_OPERATION; SetUserIdAndSecContext(userid, sec_context); diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 9dd624b3ae..53047104f5 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -1469,8 +1469,8 @@ check_GUC_init(struct config_generic *gconf) /* Flag combinations */ /* - * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part - * of SHOW ALL should not be hidden in postgresql.conf.sample. + * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of + * SHOW ALL should not be hidden in postgresql.conf.sample. */ if ((gconf->flags & GUC_NO_SHOW_ALL) && !(gconf->flags & GUC_NOT_IN_SAMPLE)) diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index cab3ddbe11..ca8558d662 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -4694,8 +4694,8 @@ struct config_enum ConfigureNamesEnum[] = { {"icu_validation_level", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Log level for reporting invalid ICU locale strings."), - NULL + gettext_noop("Log level for reporting invalid ICU locale strings."), + NULL }, &icu_validation_level, ERROR, icu_validation_level_options, diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c index f5a62061a3..7a3781466e 100644 --- a/src/backend/utils/mmgr/dsa.c +++ b/src/backend/utils/mmgr/dsa.c @@ -1369,7 +1369,7 @@ init_span(dsa_area *area, if (DsaPointerIsValid(pool->spans[1])) { dsa_area_span *head = (dsa_area_span *) - dsa_get_address(area, pool->spans[1]); + dsa_get_address(area, pool->spans[1]); head->prevspan = span_pointer; } @@ -2215,7 +2215,7 @@ make_new_segment(dsa_area *area, size_t requested_pages) if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE) { dsa_segment_map *next = - get_segment_by_index(area, segment_map->header->next); + get_segment_by_index(area, segment_map->header->next); Assert(next->header->bin == segment_map->header->bin); next->header->prev = new_index; diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 722a2e34db..8f9ea090fa 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -285,7 +285,7 @@ sum_free_pages(FreePageManager *fpm) if (!relptr_is_null(fpm->freelist[list])) { FreePageSpanLeader *candidate = - relptr_access(base, fpm->freelist[list]); + relptr_access(base, fpm->freelist[list]); do { diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index 42b90e4d4f..9fc83f11f6 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -734,9 +734,9 @@ MemoryContextStatsDetail(MemoryContext context, int max_children, * * We don't buffer the information about all memory contexts in a * backend into StringInfo and log it as one message. That would - * require the buffer to be enlarged, risking an OOM as there could - * be a large number of memory contexts in a backend. Instead, we - * log one message per memory context. + * require the buffer to be enlarged, risking an OOM as there could be + * a large number of memory contexts in a backend. Instead, we log + * one message per memory context. */ ereport(LOG_SERVER_ONLY, (errhidestmt(true), diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c index 7dec652106..f926f1faad 100644 --- a/src/backend/utils/resowner/resowner.c +++ b/src/backend/utils/resowner/resowner.c @@ -587,7 +587,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner, while (ResourceArrayGetAny(&(owner->cryptohasharr), &foundres)) { pg_cryptohash_ctx *context = - (pg_cryptohash_ctx *) DatumGetPointer(foundres); + (pg_cryptohash_ctx *) DatumGetPointer(foundres); if (isCommit) PrintCryptoHashLeakWarning(foundres); diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 95c3970437..e5a4e5b371 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -1438,8 +1438,8 @@ tuplesort_performsort(Tuplesortstate *state) /* * We were able to accumulate all the tuples required for output * in memory, using a heap to eliminate excess tuples. Now we - * have to transform the heap to a properly-sorted array. - * Note that sort_bounded_heap sets the correct state->status. + * have to transform the heap to a properly-sorted array. Note + * that sort_bounded_heap sets the correct state->status. */ sort_bounded_heap(state); state->current = 0; diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index c9ca44d8b7..3a419e348f 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -1990,7 +1990,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) int bucket = (oldSnapshotControl->head_offset + ((ts - oldSnapshotControl->head_timestamp) / USECS_PER_MINUTE)) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin)) oldSnapshotControl->xid_by_minute[bucket] = xmin; @@ -2057,7 +2057,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) /* Extend map to unused entry. */ int new_tail = (oldSnapshotControl->head_offset + oldSnapshotControl->count_used) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; oldSnapshotControl->count_used++; oldSnapshotControl->xid_by_minute[new_tail] = xmin; @@ -2188,7 +2188,7 @@ SerializeSnapshot(Snapshot snapshot, char *start_address) if (serialized_snapshot.subxcnt > 0) { Size subxipoff = sizeof(SerializedSnapshotData) + - snapshot->xcnt * sizeof(TransactionId); + snapshot->xcnt * sizeof(TransactionId); memcpy((TransactionId *) (start_address + subxipoff), snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId)); |