diff options
Diffstat (limited to 'src/backend')
-rw-r--r-- | src/backend/access/transam/xlog.c | 64 | ||||
-rw-r--r-- | src/backend/commands/dbcommands.c | 14 | ||||
-rw-r--r-- | src/backend/commands/tablespace.c | 2 | ||||
-rw-r--r-- | src/backend/parser/gram.y | 7 | ||||
-rw-r--r-- | src/backend/postmaster/checkpointer.c | 77 | ||||
-rw-r--r-- | src/backend/postmaster/postmaster.c | 28 | ||||
-rw-r--r-- | src/backend/replication/slot.c | 53 | ||||
-rw-r--r-- | src/backend/storage/aio/method_worker.c | 62 | ||||
-rw-r--r-- | src/backend/storage/buffer/bufmgr.c | 10 | ||||
-rw-r--r-- | src/backend/storage/ipc/procarray.c | 52 | ||||
-rw-r--r-- | src/backend/tcop/utility.c | 12 | ||||
-rw-r--r-- | src/backend/utils/adt/acl.c | 33 | ||||
-rw-r--r-- | src/backend/utils/adt/timestamp.c | 20 | ||||
-rw-r--r-- | src/backend/utils/misc/guc_tables.c | 10 | ||||
-rw-r--r-- | src/backend/utils/misc/postgresql.conf.sample | 2 |
15 files changed, 212 insertions, 234 deletions
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index a8cc6402d62..88fb9b45b2a 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -2347,25 +2347,6 @@ check_wal_segment_size(int *newval, void **extra, GucSource source) } /* - * GUC check_hook for max_slot_wal_keep_size - * - * We don't allow the value of max_slot_wal_keep_size other than -1 during the - * binary upgrade. See start_postmaster() in pg_upgrade for more details. - */ -bool -check_max_slot_wal_keep_size(int *newval, void **extra, GucSource source) -{ - if (IsBinaryUpgrade && *newval != -1) - { - GUC_check_errdetail("\"%s\" must be set to -1 during binary upgrade mode.", - "max_slot_wal_keep_size"); - return false; - } - - return true; -} - -/* * At a checkpoint, how many WAL segments to recycle as preallocated future * XLOG segments? Returns the highest segment that should be preallocated. */ @@ -6505,7 +6486,7 @@ PerformRecoveryXLogAction(void) else { RequestCheckpoint(CHECKPOINT_END_OF_RECOVERY | - CHECKPOINT_IMMEDIATE | + CHECKPOINT_FAST | CHECKPOINT_WAIT); } @@ -6814,7 +6795,7 @@ ShutdownXLOG(int code, Datum arg) WalSndWaitStopping(); if (RecoveryInProgress()) - CreateRestartPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE); + CreateRestartPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_FAST); else { /* @@ -6826,7 +6807,7 @@ ShutdownXLOG(int code, Datum arg) if (XLogArchivingActive()) RequestXLogSwitch(false); - CreateCheckPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE); + CreateCheckPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_FAST); } } @@ -6842,24 +6823,24 @@ LogCheckpointStart(int flags, bool restartpoint) (errmsg("restartpoint starting:%s%s%s%s%s%s%s%s", (flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "", (flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "", - (flags & CHECKPOINT_IMMEDIATE) ? " immediate" : "", + (flags & CHECKPOINT_FAST) ? " fast" : "", (flags & CHECKPOINT_FORCE) ? " force" : "", (flags & CHECKPOINT_WAIT) ? " wait" : "", (flags & CHECKPOINT_CAUSE_XLOG) ? " wal" : "", (flags & CHECKPOINT_CAUSE_TIME) ? " time" : "", - (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : ""))); + (flags & CHECKPOINT_FLUSH_UNLOGGED) ? " flush-unlogged" : ""))); else ereport(LOG, /* translator: the placeholders show checkpoint options */ (errmsg("checkpoint starting:%s%s%s%s%s%s%s%s", (flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "", (flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "", - (flags & CHECKPOINT_IMMEDIATE) ? " immediate" : "", + (flags & CHECKPOINT_FAST) ? " fast" : "", (flags & CHECKPOINT_FORCE) ? " force" : "", (flags & CHECKPOINT_WAIT) ? " wait" : "", (flags & CHECKPOINT_CAUSE_XLOG) ? " wal" : "", (flags & CHECKPOINT_CAUSE_TIME) ? " time" : "", - (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : ""))); + (flags & CHECKPOINT_FLUSH_UNLOGGED) ? " flush-unlogged" : ""))); } /* @@ -7042,12 +7023,12 @@ update_checkpoint_display(int flags, bool restartpoint, bool reset) * flags is a bitwise OR of the following: * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown. * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery. - * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP, - * ignoring checkpoint_completion_target parameter. + * CHECKPOINT_FAST: finish the checkpoint ASAP, ignoring + * checkpoint_completion_target parameter. * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or * CHECKPOINT_END_OF_RECOVERY). - * CHECKPOINT_FLUSH_ALL: also flush buffers of unlogged tables. + * CHECKPOINT_FLUSH_UNLOGGED: also flush buffers of unlogged tables. * * Note: flags contains other bits, of interest here only for logging purposes. * In particular note that this routine is synchronous and does not pay @@ -8150,17 +8131,19 @@ KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo) XLByteToSeg(recptr, currSegNo, wal_segment_size); segno = currSegNo; - /* - * Calculate how many segments are kept by slots first, adjusting for - * max_slot_wal_keep_size. - */ + /* Calculate how many segments are kept by slots. */ keep = XLogGetReplicationSlotMinimumLSN(); if (keep != InvalidXLogRecPtr && keep < recptr) { XLByteToSeg(keep, segno, wal_segment_size); - /* Cap by max_slot_wal_keep_size ... */ - if (max_slot_wal_keep_size_mb >= 0) + /* + * Account for max_slot_wal_keep_size to avoid keeping more than + * configured. However, don't do that during a binary upgrade: if + * slots were to be invalidated because of this, it would not be + * possible to preserve logical ones during the upgrade. + */ + if (max_slot_wal_keep_size_mb >= 0 && !IsBinaryUpgrade) { uint64 slot_keep_segs; @@ -8946,9 +8929,8 @@ issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli) * backup state and tablespace map. * * Input parameters are "state" (the backup state), "fast" (if true, we do - * the checkpoint in immediate mode to make it faster), and "tablespaces" - * (if non-NULL, indicates a list of tablespaceinfo structs describing the - * cluster's tablespaces.). + * the checkpoint in fast mode), and "tablespaces" (if non-NULL, indicates a + * list of tablespaceinfo structs describing the cluster's tablespaces.). * * The tablespace map contents are appended to passed-in parameter * tablespace_map and the caller is responsible for including it in the backup @@ -9076,11 +9058,11 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, * during recovery means that checkpointer is running, we can use * RequestCheckpoint() to establish a restartpoint. * - * We use CHECKPOINT_IMMEDIATE only if requested by user (via - * passing fast = true). Otherwise this can take awhile. + * We use CHECKPOINT_FAST only if requested by user (via passing + * fast = true). Otherwise this can take awhile. */ RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT | - (fast ? CHECKPOINT_IMMEDIATE : 0)); + (fast ? CHECKPOINT_FAST : 0)); /* * Now we need to fetch the checkpoint record location, and also diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index c95eb945016..502a45163c8 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -570,8 +570,8 @@ CreateDatabaseUsingFileCopy(Oid src_dboid, Oid dst_dboid, Oid src_tsid, * any CREATE DATABASE commands. */ if (!IsBinaryUpgrade) - RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | - CHECKPOINT_WAIT | CHECKPOINT_FLUSH_ALL); + RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | + CHECKPOINT_WAIT | CHECKPOINT_FLUSH_UNLOGGED); /* * Iterate through all tablespaces of the template database, and copy each @@ -673,7 +673,7 @@ CreateDatabaseUsingFileCopy(Oid src_dboid, Oid dst_dboid, Oid src_tsid, * strategy that avoids these problems. */ if (!IsBinaryUpgrade) - RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | + RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT); } @@ -1870,7 +1870,7 @@ dropdb(const char *dbname, bool missing_ok, bool force) * Force a checkpoint to make sure the checkpointer has received the * message sent by ForgetDatabaseSyncRequests. */ - RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT); + RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT); /* Close all smgr fds in all backends. */ WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE)); @@ -2120,8 +2120,8 @@ movedb(const char *dbname, const char *tblspcname) * On Windows, this also ensures that background procs don't hold any open * files, which would cause rmdir() to fail. */ - RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT - | CHECKPOINT_FLUSH_ALL); + RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT + | CHECKPOINT_FLUSH_UNLOGGED); /* Close all smgr fds in all backends. */ WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE)); @@ -2252,7 +2252,7 @@ movedb(const char *dbname, const char *tblspcname) * any unlogged operations done in the new DB tablespace before the * next checkpoint. */ - RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT); + RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT); /* * Force synchronous commit, thus minimizing the window between diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index a9005cc7212..df31eace47a 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -500,7 +500,7 @@ DropTableSpace(DropTableSpaceStmt *stmt) * mustn't delete. So instead, we force a checkpoint which will clean * out any lingering files, and try again. */ - RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT); + RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT); /* * On Windows, an unlinked file persists in the directory listing diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index 70a0d832a11..73345bb3c70 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -2034,6 +2034,13 @@ CheckPointStmt: $$ = (Node *) n; } + | CHECKPOINT '(' utility_option_list ')' + { + CheckPointStmt *n = makeNode(CheckPointStmt); + + $$ = (Node *) n; + n->options = $3; + } ; diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index fda91ffd1ce..2809e298a44 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -42,6 +42,8 @@ #include "access/xlog.h" #include "access/xlog_internal.h" #include "access/xlogrecovery.h" +#include "catalog/pg_authid.h" +#include "commands/defrem.h" #include "libpq/pqsignal.h" #include "miscadmin.h" #include "pgstat.h" @@ -61,6 +63,7 @@ #include "storage/shmem.h" #include "storage/smgr.h" #include "storage/spin.h" +#include "utils/acl.h" #include "utils/guc.h" #include "utils/memutils.h" #include "utils/resowner.h" @@ -161,7 +164,7 @@ static pg_time_t last_xlog_switch_time; static void ProcessCheckpointerInterrupts(void); static void CheckArchiveTimeout(void); static bool IsCheckpointOnSchedule(double progress); -static bool ImmediateCheckpointRequested(void); +static bool FastCheckpointRequested(void); static bool CompactCheckpointerRequestQueue(void); static void UpdateSharedMemoryConfig(void); @@ -734,12 +737,12 @@ CheckArchiveTimeout(void) } /* - * Returns true if an immediate checkpoint request is pending. (Note that - * this does not check the *current* checkpoint's IMMEDIATE flag, but whether - * there is one pending behind it.) + * Returns true if a fast checkpoint request is pending. (Note that this does + * not check the *current* checkpoint's FAST flag, but whether there is one + * pending behind it.) */ static bool -ImmediateCheckpointRequested(void) +FastCheckpointRequested(void) { volatile CheckpointerShmemStruct *cps = CheckpointerShmem; @@ -747,7 +750,7 @@ ImmediateCheckpointRequested(void) * We don't need to acquire the ckpt_lck in this case because we're only * looking at a single flag bit. */ - if (cps->ckpt_flags & CHECKPOINT_IMMEDIATE) + if (cps->ckpt_flags & CHECKPOINT_FAST) return true; return false; } @@ -760,7 +763,7 @@ ImmediateCheckpointRequested(void) * checkpoint_completion_target. * * The checkpoint request flags should be passed in; currently the only one - * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes. + * examined is CHECKPOINT_FAST, which disables delays between writes. * * 'progress' is an estimate of how much of the work has been done, as a * fraction between 0.0 meaning none, and 1.0 meaning all done. @@ -778,10 +781,10 @@ CheckpointWriteDelay(int flags, double progress) * Perform the usual duties and take a nap, unless we're behind schedule, * in which case we just try to catch up as quickly as possible. */ - if (!(flags & CHECKPOINT_IMMEDIATE) && + if (!(flags & CHECKPOINT_FAST) && !ShutdownXLOGPending && !ShutdownRequestPending && - !ImmediateCheckpointRequested() && + !FastCheckpointRequested() && IsCheckpointOnSchedule(progress)) { if (ConfigReloadPending) @@ -977,17 +980,67 @@ CheckpointerShmemInit(void) } /* + * ExecCheckpoint + * Primary entry point for manual CHECKPOINT commands + * + * This is mainly a wrapper for RequestCheckpoint(). + */ +void +ExecCheckpoint(ParseState *pstate, CheckPointStmt *stmt) +{ + bool fast = true; + bool unlogged = false; + + foreach_ptr(DefElem, opt, stmt->options) + { + if (strcmp(opt->defname, "mode") == 0) + { + char *mode = defGetString(opt); + + if (strcmp(mode, "spread") == 0) + fast = false; + else if (strcmp(mode, "fast") != 0) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unrecognized MODE option \"%s\"", mode), + parser_errposition(pstate, opt->location))); + } + else if (strcmp(opt->defname, "flush_unlogged") == 0) + unlogged = defGetBoolean(opt); + else + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unrecognized CHECKPOINT option \"%s\"", opt->defname), + parser_errposition(pstate, opt->location))); + } + + if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + /* translator: %s is name of an SQL command (e.g., CHECKPOINT) */ + errmsg("permission denied to execute %s command", + "CHECKPOINT"), + errdetail("Only roles with privileges of the \"%s\" role may execute this command.", + "pg_checkpoint"))); + + RequestCheckpoint(CHECKPOINT_WAIT | + (fast ? CHECKPOINT_FAST : 0) | + (unlogged ? CHECKPOINT_FLUSH_UNLOGGED : 0) | + (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE)); +} + +/* * RequestCheckpoint * Called in backend processes to request a checkpoint * * flags is a bitwise OR of the following: * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown. * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery. - * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP, + * CHECKPOINT_FAST: finish the checkpoint ASAP, * ignoring checkpoint_completion_target parameter. * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or - * CHECKPOINT_END_OF_RECOVERY). + * CHECKPOINT_END_OF_RECOVERY, and the CHECKPOINT command). * CHECKPOINT_WAIT: wait for completion before returning (otherwise, * just signal checkpointer to do it, and return). * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling. @@ -1009,7 +1062,7 @@ RequestCheckpoint(int flags) * There's no point in doing slow checkpoints in a standalone backend, * because there's no other backends the checkpoint could disrupt. */ - CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE); + CreateCheckPoint(flags | CHECKPOINT_FAST); /* Free all smgr objects, as CheckpointerMain() normally would. */ smgrdestroyall(); diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 490f7ce3664..cca9b946e53 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -4337,15 +4337,15 @@ maybe_start_bgworkers(void) static bool maybe_reap_io_worker(int pid) { - for (int id = 0; id < MAX_IO_WORKERS; ++id) + for (int i = 0; i < MAX_IO_WORKERS; ++i) { - if (io_worker_children[id] && - io_worker_children[id]->pid == pid) + if (io_worker_children[i] && + io_worker_children[i]->pid == pid) { - ReleasePostmasterChildSlot(io_worker_children[id]); + ReleasePostmasterChildSlot(io_worker_children[i]); --io_worker_count; - io_worker_children[id] = NULL; + io_worker_children[i] = NULL; return true; } } @@ -4389,22 +4389,22 @@ maybe_adjust_io_workers(void) while (io_worker_count < io_workers) { PMChild *child; - int id; + int i; /* find unused entry in io_worker_children array */ - for (id = 0; id < MAX_IO_WORKERS; ++id) + for (i = 0; i < MAX_IO_WORKERS; ++i) { - if (io_worker_children[id] == NULL) + if (io_worker_children[i] == NULL) break; } - if (id == MAX_IO_WORKERS) - elog(ERROR, "could not find a free IO worker ID"); + if (i == MAX_IO_WORKERS) + elog(ERROR, "could not find a free IO worker slot"); /* Try to launch one. */ child = StartChildProcess(B_IO_WORKER); if (child != NULL) { - io_worker_children[id] = child; + io_worker_children[i] = child; ++io_worker_count; } else @@ -4415,11 +4415,11 @@ maybe_adjust_io_workers(void) if (io_worker_count > io_workers) { /* ask the IO worker in the highest slot to exit */ - for (int id = MAX_IO_WORKERS - 1; id >= 0; --id) + for (int i = MAX_IO_WORKERS - 1; i >= 0; --i) { - if (io_worker_children[id] != NULL) + if (io_worker_children[i] != NULL) { - kill(io_worker_children[id]->pid, SIGUSR2); + kill(io_worker_children[i]->pid, SIGUSR2); break; } } diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index f369fce2485..e44ad576bc7 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -154,7 +154,7 @@ int max_replication_slots = 10; /* the maximum number of replication * Invalidate replication slots that have remained idle longer than this * duration; '0' disables it. */ -int idle_replication_slot_timeout_mins = 0; +int idle_replication_slot_timeout_secs = 0; /* * This GUC lists streaming replication standby server slot names that @@ -1612,13 +1612,10 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause, case RS_INVAL_IDLE_TIMEOUT: { - int minutes = slot_idle_seconds / SECS_PER_MINUTE; - int secs = slot_idle_seconds % SECS_PER_MINUTE; - /* translator: %s is a GUC variable name */ - appendStringInfo(&err_detail, _("The slot's idle time of %dmin %02ds exceeds the configured \"%s\" duration of %dmin."), - minutes, secs, "idle_replication_slot_timeout", - idle_replication_slot_timeout_mins); + appendStringInfo(&err_detail, _("The slot's idle time of %lds exceeds the configured \"%s\" duration of %ds."), + slot_idle_seconds, "idle_replication_slot_timeout", + idle_replication_slot_timeout_secs); /* translator: %s is a GUC variable name */ appendStringInfo(&err_hint, _("You might need to increase \"%s\"."), "idle_replication_slot_timeout"); @@ -1656,7 +1653,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause, static inline bool CanInvalidateIdleSlot(ReplicationSlot *s) { - return (idle_replication_slot_timeout_mins != 0 && + return (idle_replication_slot_timeout_secs != 0 && !XLogRecPtrIsInvalid(s->data.restart_lsn) && s->inactive_since > 0 && !(RecoveryInProgress() && s->data.synced)); @@ -1717,9 +1714,9 @@ DetermineSlotInvalidationCause(uint32 possible_causes, ReplicationSlot *s, if (CanInvalidateIdleSlot(s)) { /* - * We simulate the invalidation due to idle_timeout as the minimum - * time idle time is one minute which makes tests take a long - * time. + * Simulate the invalidation due to idle_timeout to test the + * timeout behavior promptly, without waiting for it to trigger + * naturally. */ #ifdef USE_INJECTION_POINTS if (IS_INJECTION_POINT_ATTACHED("slot-timeout-inval")) @@ -1734,7 +1731,7 @@ DetermineSlotInvalidationCause(uint32 possible_causes, ReplicationSlot *s, * idle_replication_slot_timeout GUC. */ if (TimestampDifferenceExceedsSeconds(s->inactive_since, now, - idle_replication_slot_timeout_mins * SECS_PER_MINUTE)) + idle_replication_slot_timeout_secs)) { *inactive_since = s->inactive_since; return RS_INVAL_IDLE_TIMEOUT; @@ -1891,15 +1888,6 @@ InvalidatePossiblyObsoleteSlot(uint32 possible_causes, SpinLockRelease(&s->mutex); /* - * The logical replication slots shouldn't be invalidated as GUC - * max_slot_wal_keep_size is set to -1 and - * idle_replication_slot_timeout is set to 0 during the binary - * upgrade. See check_old_cluster_for_valid_slots() where we ensure - * that no invalidated before the upgrade. - */ - Assert(!(*invalidated && SlotIsLogical(s) && IsBinaryUpgrade)); - - /* * Calculate the idle time duration of the slot if slot is marked * invalidated with RS_INVAL_IDLE_TIMEOUT. */ @@ -2045,6 +2033,10 @@ restart: if (!s->in_use) continue; + /* Prevent invalidation of logical slots during binary upgrade */ + if (SlotIsLogical(s) && IsBinaryUpgrade) + continue; + if (InvalidatePossiblyObsoleteSlot(possible_causes, s, oldestLSN, dboid, snapshotConflictHorizon, &invalidated)) @@ -3057,22 +3049,3 @@ WaitForStandbyConfirmation(XLogRecPtr wait_for_lsn) ConditionVariableCancelSleep(); } - -/* - * GUC check_hook for idle_replication_slot_timeout - * - * The value of idle_replication_slot_timeout must be set to 0 during - * a binary upgrade. See start_postmaster() in pg_upgrade for more details. - */ -bool -check_idle_replication_slot_timeout(int *newval, void **extra, GucSource source) -{ - if (IsBinaryUpgrade && *newval != 0) - { - GUC_check_errdetail("\"%s\" must be set to 0 during binary upgrade mode.", - "idle_replication_slot_timeout"); - return false; - } - - return true; -} diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c index 36be179678d..bf8f77e6ff6 100644 --- a/src/backend/storage/aio/method_worker.c +++ b/src/backend/storage/aio/method_worker.c @@ -52,26 +52,26 @@ #define IO_WORKER_WAKEUP_FANOUT 2 -typedef struct AioWorkerSubmissionQueue +typedef struct PgAioWorkerSubmissionQueue { uint32 size; uint32 mask; uint32 head; uint32 tail; - uint32 ios[FLEXIBLE_ARRAY_MEMBER]; -} AioWorkerSubmissionQueue; + uint32 sqes[FLEXIBLE_ARRAY_MEMBER]; +} PgAioWorkerSubmissionQueue; -typedef struct AioWorkerSlot +typedef struct PgAioWorkerSlot { Latch *latch; bool in_use; -} AioWorkerSlot; +} PgAioWorkerSlot; -typedef struct AioWorkerControl +typedef struct PgAioWorkerControl { uint64 idle_worker_mask; - AioWorkerSlot workers[FLEXIBLE_ARRAY_MEMBER]; -} AioWorkerControl; + PgAioWorkerSlot workers[FLEXIBLE_ARRAY_MEMBER]; +} PgAioWorkerControl; static size_t pgaio_worker_shmem_size(void); @@ -96,8 +96,8 @@ int io_workers = 3; static int io_worker_queue_size = 64; static int MyIoWorkerId; -static AioWorkerSubmissionQueue *io_worker_submission_queue; -static AioWorkerControl *io_worker_control; +static PgAioWorkerSubmissionQueue *io_worker_submission_queue; +static PgAioWorkerControl *io_worker_control; static size_t @@ -106,15 +106,15 @@ pgaio_worker_queue_shmem_size(int *queue_size) /* Round size up to next power of two so we can make a mask. */ *queue_size = pg_nextpower2_32(io_worker_queue_size); - return offsetof(AioWorkerSubmissionQueue, ios) + + return offsetof(PgAioWorkerSubmissionQueue, sqes) + sizeof(uint32) * *queue_size; } static size_t pgaio_worker_control_shmem_size(void) { - return offsetof(AioWorkerControl, workers) + - sizeof(AioWorkerSlot) * MAX_IO_WORKERS; + return offsetof(PgAioWorkerControl, workers) + + sizeof(PgAioWorkerSlot) * MAX_IO_WORKERS; } static size_t @@ -162,7 +162,7 @@ pgaio_worker_shmem_init(bool first_time) } static int -pgaio_choose_idle_worker(void) +pgaio_worker_choose_idle(void) { int worker; @@ -172,6 +172,7 @@ pgaio_choose_idle_worker(void) /* Find the lowest bit position, and clear it. */ worker = pg_rightmost_one_pos64(io_worker_control->idle_worker_mask); io_worker_control->idle_worker_mask &= ~(UINT64_C(1) << worker); + Assert(io_worker_control->workers[worker].in_use); return worker; } @@ -179,7 +180,7 @@ pgaio_choose_idle_worker(void) static bool pgaio_worker_submission_queue_insert(PgAioHandle *ioh) { - AioWorkerSubmissionQueue *queue; + PgAioWorkerSubmissionQueue *queue; uint32 new_head; queue = io_worker_submission_queue; @@ -191,7 +192,7 @@ pgaio_worker_submission_queue_insert(PgAioHandle *ioh) return false; /* full */ } - queue->ios[queue->head] = pgaio_io_get_id(ioh); + queue->sqes[queue->head] = pgaio_io_get_id(ioh); queue->head = new_head; return true; @@ -200,14 +201,14 @@ pgaio_worker_submission_queue_insert(PgAioHandle *ioh) static uint32 pgaio_worker_submission_queue_consume(void) { - AioWorkerSubmissionQueue *queue; + PgAioWorkerSubmissionQueue *queue; uint32 result; queue = io_worker_submission_queue; if (queue->tail == queue->head) return UINT32_MAX; /* empty */ - result = queue->ios[queue->tail]; + result = queue->sqes[queue->tail]; queue->tail = (queue->tail + 1) & (queue->size - 1); return result; @@ -240,37 +241,37 @@ pgaio_worker_needs_synchronous_execution(PgAioHandle *ioh) } static void -pgaio_worker_submit_internal(int nios, PgAioHandle *ios[]) +pgaio_worker_submit_internal(int num_staged_ios, PgAioHandle **staged_ios) { PgAioHandle *synchronous_ios[PGAIO_SUBMIT_BATCH_SIZE]; int nsync = 0; Latch *wakeup = NULL; int worker; - Assert(nios <= PGAIO_SUBMIT_BATCH_SIZE); + Assert(num_staged_ios <= PGAIO_SUBMIT_BATCH_SIZE); LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE); - for (int i = 0; i < nios; ++i) + for (int i = 0; i < num_staged_ios; ++i) { - Assert(!pgaio_worker_needs_synchronous_execution(ios[i])); - if (!pgaio_worker_submission_queue_insert(ios[i])) + Assert(!pgaio_worker_needs_synchronous_execution(staged_ios[i])); + if (!pgaio_worker_submission_queue_insert(staged_ios[i])) { /* * We'll do it synchronously, but only after we've sent as many as * we can to workers, to maximize concurrency. */ - synchronous_ios[nsync++] = ios[i]; + synchronous_ios[nsync++] = staged_ios[i]; continue; } if (wakeup == NULL) { /* Choose an idle worker to wake up if we haven't already. */ - worker = pgaio_choose_idle_worker(); + worker = pgaio_worker_choose_idle(); if (worker >= 0) wakeup = io_worker_control->workers[worker].latch; - pgaio_debug_io(DEBUG4, ios[i], + pgaio_debug_io(DEBUG4, staged_ios[i], "choosing worker %d", worker); } @@ -316,6 +317,7 @@ pgaio_worker_die(int code, Datum arg) Assert(io_worker_control->workers[MyIoWorkerId].in_use); Assert(io_worker_control->workers[MyIoWorkerId].latch == MyLatch); + io_worker_control->idle_worker_mask &= ~(UINT64_C(1) << MyIoWorkerId); io_worker_control->workers[MyIoWorkerId].in_use = false; io_worker_control->workers[MyIoWorkerId].latch = NULL; LWLockRelease(AioWorkerSubmissionQueueLock); @@ -488,7 +490,7 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len) IO_WORKER_WAKEUP_FANOUT); for (int i = 0; i < nwakeups; ++i) { - if ((worker = pgaio_choose_idle_worker()) < 0) + if ((worker = pgaio_worker_choose_idle()) < 0) break; latches[nlatches++] = io_worker_control->workers[worker].latch; } @@ -573,6 +575,12 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len) } CHECK_FOR_INTERRUPTS(); + + if (ConfigReloadPending) + { + ConfigReloadPending = false; + ProcessConfigFile(PGC_SIGHUP); + } } error_context_stack = errcallback.previous; diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index bd68d7e0ca9..6afdd28dba6 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -3339,10 +3339,10 @@ UnpinBufferNoOwner(BufferDesc *buf) * BufferSync -- Write out all dirty buffers in the pool. * * This is called at checkpoint time to write out all dirty shared buffers. - * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE - * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN, - * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even - * unlogged buffers, which are otherwise skipped. The remaining flags + * The checkpoint request flags should be passed in. If CHECKPOINT_FAST is + * set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN, + * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_UNLOGGED is set, we write + * even unlogged buffers, which are otherwise skipped. The remaining flags * currently have no effect here. */ static void @@ -3367,7 +3367,7 @@ BufferSync(int flags) * recovery, we write all dirty buffers. */ if (!((flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY | - CHECKPOINT_FLUSH_ALL)))) + CHECKPOINT_FLUSH_UNLOGGED)))) mask |= BM_PERMANENT; /* diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index e5b945a9ee3..2418967def6 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -1622,58 +1622,6 @@ TransactionIdIsInProgress(TransactionId xid) return false; } -/* - * TransactionIdIsActive -- is xid the top-level XID of an active backend? - * - * This differs from TransactionIdIsInProgress in that it ignores prepared - * transactions, as well as transactions running on the primary if we're in - * hot standby. Also, we ignore subtransactions since that's not needed - * for current uses. - */ -bool -TransactionIdIsActive(TransactionId xid) -{ - bool result = false; - ProcArrayStruct *arrayP = procArray; - TransactionId *other_xids = ProcGlobal->xids; - int i; - - /* - * Don't bother checking a transaction older than RecentXmin; it could not - * possibly still be running. - */ - if (TransactionIdPrecedes(xid, RecentXmin)) - return false; - - LWLockAcquire(ProcArrayLock, LW_SHARED); - - for (i = 0; i < arrayP->numProcs; i++) - { - int pgprocno = arrayP->pgprocnos[i]; - PGPROC *proc = &allProcs[pgprocno]; - TransactionId pxid; - - /* Fetch xid just once - see GetNewTransactionId */ - pxid = UINT32_ACCESS_ONCE(other_xids[i]); - - if (!TransactionIdIsValid(pxid)) - continue; - - if (proc->pid == 0) - continue; /* ignore prepared transactions */ - - if (TransactionIdEquals(pxid, xid)) - { - result = true; - break; - } - } - - LWLockRelease(ProcArrayLock); - - return result; -} - /* * Determine XID horizons. diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index aff8510755f..4c1faf5575c 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -943,17 +943,7 @@ standard_ProcessUtility(PlannedStmt *pstmt, break; case T_CheckPointStmt: - if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - /* translator: %s is name of a SQL command, eg CHECKPOINT */ - errmsg("permission denied to execute %s command", - "CHECKPOINT"), - errdetail("Only roles with privileges of the \"%s\" role may execute this command.", - "pg_checkpoint"))); - - RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_WAIT | - (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE)); + ExecCheckpoint(pstate, (CheckPointStmt *) parsetree); break; /* diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index ca3c5ee3df3..1213f9106d5 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -135,6 +135,22 @@ static void RoleMembershipCacheCallback(Datum arg, int cacheid, uint32 hashvalue /* + * Test whether an identifier char can be left unquoted in ACLs. + * + * Formerly, we used isalnum() even on non-ASCII characters, resulting in + * unportable behavior. To ensure dump compatibility with old versions, + * we now treat high-bit-set characters as always requiring quoting during + * putid(), but getid() will always accept them without quotes. + */ +static inline bool +is_safe_acl_char(unsigned char c, bool is_getid) +{ + if (IS_HIGHBIT_SET(c)) + return is_getid; + return isalnum(c) || c == '_'; +} + +/* * getid * Consumes the first alphanumeric string (identifier) found in string * 's', ignoring any leading white space. If it finds a double quote @@ -159,21 +175,22 @@ getid(const char *s, char *n, Node *escontext) while (isspace((unsigned char) *s)) s++; - /* This code had better match what putid() does, below */ for (; *s != '\0' && - (isalnum((unsigned char) *s) || - *s == '_' || - *s == '"' || - in_quotes); + (in_quotes || *s == '"' || is_safe_acl_char(*s, true)); s++) { if (*s == '"') { + if (!in_quotes) + { + in_quotes = true; + continue; + } /* safe to look at next char (could be '\0' though) */ if (*(s + 1) != '"') { - in_quotes = !in_quotes; + in_quotes = false; continue; } /* it's an escaped double quote; skip the escaping char */ @@ -207,10 +224,10 @@ putid(char *p, const char *s) const char *src; bool safe = true; + /* Detect whether we need to use double quotes */ for (src = s; *src; src++) { - /* This test had better match what getid() does, above */ - if (!isalnum((unsigned char) *src) && *src != '_') + if (!is_safe_acl_char(*src, false)) { safe = false; break; diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index 0a5848a4ab2..25cff56c3d0 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -5312,10 +5312,10 @@ isoweekdate2date(int isoweek, int wday, int *year, int *mon, int *mday) int date2isoweek(int year, int mon, int mday) { - float8 result; int day0, day4, - dayn; + dayn, + week; /* current day */ dayn = date2j(year, mon, mday); @@ -5338,13 +5338,13 @@ date2isoweek(int year, int mon, int mday) day0 = j2day(day4 - 1); } - result = (dayn - (day4 - day0)) / 7 + 1; + week = (dayn - (day4 - day0)) / 7 + 1; /* * Sometimes the last few days in a year will fall into the first week of * the next year, so check for this. */ - if (result >= 52) + if (week >= 52) { day4 = date2j(year + 1, 1, 4); @@ -5352,10 +5352,10 @@ date2isoweek(int year, int mon, int mday) day0 = j2day(day4 - 1); if (dayn >= day4 - day0) - result = (dayn - (day4 - day0)) / 7 + 1; + week = (dayn - (day4 - day0)) / 7 + 1; } - return (int) result; + return week; } @@ -5367,10 +5367,10 @@ date2isoweek(int year, int mon, int mday) int date2isoyear(int year, int mon, int mday) { - float8 result; int day0, day4, - dayn; + dayn, + week; /* current day */ dayn = date2j(year, mon, mday); @@ -5395,13 +5395,13 @@ date2isoyear(int year, int mon, int mday) year--; } - result = (dayn - (day4 - day0)) / 7 + 1; + week = (dayn - (day4 - day0)) / 7 + 1; /* * Sometimes the last few days in a year will fall into the first week of * the next year, so check for this. */ - if (result >= 52) + if (week >= 52) { day4 = date2j(year + 1, 1, 4); diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index 511dc32d519..d14b1678e7f 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -3081,7 +3081,7 @@ struct config_int ConfigureNamesInt[] = }, &max_slot_wal_keep_size_mb, -1, -1, MAX_KILOBYTES, - check_max_slot_wal_keep_size, NULL, NULL + NULL, NULL, NULL }, { @@ -3100,11 +3100,11 @@ struct config_int ConfigureNamesInt[] = gettext_noop("Sets the duration a replication slot can remain idle before " "it is invalidated."), NULL, - GUC_UNIT_MIN + GUC_UNIT_S }, - &idle_replication_slot_timeout_mins, - 0, 0, INT_MAX / SECS_PER_MINUTE, - check_idle_replication_slot_timeout, NULL, NULL + &idle_replication_slot_timeout_secs, + 0, 0, INT_MAX, + NULL, NULL, NULL }, { diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index 341f88adc87..a9d8293474a 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -342,7 +342,7 @@ # (change requires restart) #wal_keep_size = 0 # in megabytes; 0 disables #max_slot_wal_keep_size = -1 # in megabytes; -1 disables -#idle_replication_slot_timeout = 0 # in minutes; 0 disables +#idle_replication_slot_timeout = 0 # in seconds; 0 disables #wal_sender_timeout = 60s # in milliseconds; 0 disables #track_commit_timestamp = off # collect timestamp of transaction commit # (change requires restart) |