summaryrefslogtreecommitdiff
path: root/src/backend/replication
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/replication')
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c2
-rw-r--r--src/backend/replication/logical/decode.c10
-rw-r--r--src/backend/replication/logical/logical.c4
-rw-r--r--src/backend/replication/logical/origin.c2
-rw-r--r--src/backend/replication/logical/reorderbuffer.c20
-rw-r--r--src/backend/replication/logical/snapbuild.c6
-rw-r--r--src/backend/replication/logical/tablesync.c2
-rw-r--r--src/backend/replication/logical/worker.c37
-rw-r--r--src/backend/replication/pgoutput/pgoutput.c4
-rw-r--r--src/backend/replication/syncrep.c4
10 files changed, 46 insertions, 45 deletions
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 052505e46f..dc9c5c82d9 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -259,7 +259,7 @@ libpqrcv_check_conninfo(const char *conninfo, bool must_use_password)
if (must_use_password)
{
- bool uses_password = false;
+ bool uses_password = false;
for (opt = opts; opt->keyword != NULL; ++opt)
{
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index beef399b42..d91055a440 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -155,7 +155,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
case XLOG_PARAMETER_CHANGE:
{
xl_parameter_change *xlrec =
- (xl_parameter_change *) XLogRecGetData(buf->record);
+ (xl_parameter_change *) XLogRecGetData(buf->record);
/*
* If wal_level on the primary is reduced to less than
@@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
* invalidated when this WAL record is replayed; and further,
* slot creation fails when wal_level is not sufficient; but
* all these operations are not synchronized, so a logical
- * slot may creep in while the wal_level is being
- * reduced. Hence this extra check.
+ * slot may creep in while the wal_level is being reduced.
+ * Hence this extra check.
*/
if (xlrec->wal_level < WAL_LEVEL_LOGICAL)
{
@@ -752,7 +752,7 @@ DecodePrepare(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
SnapBuild *builder = ctx->snapshot_builder;
XLogRecPtr origin_lsn = parsed->origin_lsn;
TimestampTz prepare_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
int i;
TransactionId xid = parsed->twophase_xid;
@@ -828,7 +828,7 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
int i;
XLogRecPtr origin_lsn = InvalidXLogRecPtr;
TimestampTz abort_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
bool skip_xact;
if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN)
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index 7e1f677f7a..41243d0187 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin,
MemoryContext old_context;
/*
- * On a standby, this check is also required while creating the
- * slot. Check the comments in the function.
+ * On a standby, this check is also required while creating the slot.
+ * Check the comments in the function.
*/
CheckLogicalDecodingRequirements();
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index 2c04c8707d..b0255ffd25 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -833,7 +833,7 @@ replorigin_redo(XLogReaderState *record)
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec =
- (xl_replorigin_set *) XLogRecGetData(record);
+ (xl_replorigin_set *) XLogRecGetData(record);
replorigin_advance(xlrec->node_id,
xlrec->remote_lsn, record->EndRecPtr,
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 9f44974473..828613d325 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -1408,7 +1408,7 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
{
dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node);
ReorderBufferChange *next_change =
- dlist_container(ReorderBufferChange, node, next);
+ dlist_container(ReorderBufferChange, node, next);
/* txn stays the same */
state->entries[off].lsn = next_change->lsn;
@@ -1439,8 +1439,8 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
{
/* successfully restored changes from disk */
ReorderBufferChange *next_change =
- dlist_head_element(ReorderBufferChange, node,
- &entry->txn->changes);
+ dlist_head_element(ReorderBufferChange, node,
+ &entry->txn->changes);
elog(DEBUG2, "restored %u/%u changes from disk",
(uint32) entry->txn->nentries_mem,
@@ -1582,7 +1582,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
dclist_delete_from(&rb->catchange_txns, &txn->catchange_node);
/* now remove reference from buffer */
- hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
+ hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
Assert(found);
/* remove entries spilled to disk */
@@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
ReorderBufferTXN *txn;
/*
- * Bail out if logical_replication_mode is buffered and we haven't exceeded
- * the memory limit.
+ * Bail out if logical_replication_mode is buffered and we haven't
+ * exceeded the memory limit.
*/
if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED &&
rb->size < logical_decoding_work_mem * 1024L)
@@ -3841,7 +3841,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
{
char *data;
Size inval_size = sizeof(SharedInvalidationMessage) *
- change->data.inval.ninvalidations;
+ change->data.inval.ninvalidations;
sz += inval_size;
@@ -4206,7 +4206,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
dlist_foreach_modify(cleanup_iter, &txn->changes)
{
ReorderBufferChange *cleanup =
- dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
+ dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
dlist_delete(&cleanup->node);
ReorderBufferReturnChange(rb, cleanup, true);
@@ -4431,7 +4431,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
case REORDER_BUFFER_CHANGE_INVALIDATION:
{
Size inval_size = sizeof(SharedInvalidationMessage) *
- change->data.inval.ninvalidations;
+ change->data.inval.ninvalidations;
change->data.inval.invalidations =
MemoryContextAlloc(rb->context, inval_size);
@@ -4936,7 +4936,7 @@ ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
dlist_foreach_modify(it, &ent->chunks)
{
ReorderBufferChange *change =
- dlist_container(ReorderBufferChange, node, it.cur);
+ dlist_container(ReorderBufferChange, node, it.cur);
dlist_delete(&change->node);
ReorderBufferReturnChange(rb, change, true);
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 62542827e4..0786bb0ab7 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -574,7 +574,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder)
Assert(builder->building_full_snapshot);
/* don't allow older snapshots */
- InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
+ InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
if (HaveRegisteredOrActiveSnapshot())
elog(ERROR, "cannot build an initial slot snapshot when snapshots exist");
Assert(!HistoricSnapshotActive());
@@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
*/
/*
- * xl_running_xacts record is older than what we can use, we might not have
- * all necessary catalog rows anymore.
+ * xl_running_xacts record is older than what we can use, we might not
+ * have all necessary catalog rows anymore.
*/
if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
NormalTransactionIdPrecedes(running->oldestRunningXid,
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index 0c71ae9ba7..c56d42dcd2 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -563,7 +563,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
* the lock.
*/
int nsyncworkers =
- logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
+ logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
/* Now safe to release the LWLock */
LWLockRelease(LogicalRepWorkerLock);
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 37bb884127..b655c24d0b 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -2396,7 +2396,7 @@ apply_handle_insert(StringInfo s)
LogicalRepRelMapEntry *rel;
LogicalRepTupleData newtup;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
@@ -2544,7 +2544,7 @@ apply_handle_update(StringInfo s)
{
LogicalRepRelMapEntry *rel;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
LogicalRepTupleData oldtup;
@@ -2729,7 +2729,7 @@ apply_handle_delete(StringInfo s)
LogicalRepRelMapEntry *rel;
LogicalRepTupleData oldtup;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
@@ -3076,8 +3076,8 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
if (map)
{
TupleConversionMap *PartitionToRootMap =
- convert_tuples_by_name(RelationGetDescr(partrel),
- RelationGetDescr(parentrel));
+ convert_tuples_by_name(RelationGetDescr(partrel),
+ RelationGetDescr(parentrel));
remoteslot =
execute_attr_map_slot(PartitionToRootMap->attrMap,
@@ -3411,7 +3411,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush,
dlist_foreach_modify(iter, &lsn_mapping)
{
FlushPosition *pos =
- dlist_container(FlushPosition, node, iter.cur);
+ dlist_container(FlushPosition, node, iter.cur);
*write = pos->remote_end;
@@ -4695,11 +4695,11 @@ ApplyWorkerMain(Datum main_arg)
ereport(DEBUG1,
(errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s",
- MySubscription->name,
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
- "?")));
+ MySubscription->name,
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
+ "?")));
}
else
{
@@ -5073,10 +5073,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
}
/*
- * If we are processing this transaction using a parallel apply worker then
- * either we send the changes to the parallel worker or if the worker is busy
- * then serialize the changes to the file which will later be processed by
- * the parallel worker.
+ * If we are processing this transaction using a parallel apply worker
+ * then either we send the changes to the parallel worker or if the worker
+ * is busy then serialize the changes to the file which will later be
+ * processed by the parallel worker.
*/
*winfo = pa_find_worker(xid);
@@ -5090,9 +5090,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
}
/*
- * If there is no parallel worker involved to process this transaction then
- * we either directly apply the change or serialize it to a file which will
- * later be applied when the transaction finish message is processed.
+ * If there is no parallel worker involved to process this transaction
+ * then we either directly apply the change or serialize it to a file
+ * which will later be applied when the transaction finish message is
+ * processed.
*/
else if (in_streamed_transaction)
{
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index f88389de84..b08ca55041 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications,
* are multiple lists (one for each operation) to which row filters will
* be appended.
*
- * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row
- * filter expression" so it takes precedence.
+ * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter
+ * expression" so it takes precedence.
*/
foreach(lc, publications)
{
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 889e20b5dd..a8a2f8f1b9 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -330,7 +330,7 @@ static void
SyncRepQueueInsert(int mode)
{
dlist_head *queue;
- dlist_iter iter;
+ dlist_iter iter;
Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
queue = &WalSndCtl->SyncRepQueue[mode];
@@ -879,7 +879,7 @@ SyncRepWakeQueue(bool all, int mode)
dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode])
{
- PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
+ PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
/*
* Assume the queue is ordered by LSN