diff --git a/Makefile b/Makefile
index 1431be4ef..5173aa38f 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,7 @@ OBJS = src/utils/configuration.o src/utils/json.o src/utils/logger.o \
OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \
src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \
src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \
- src/util.o src/validate.o src/datapagemap.o
+ src/util.o src/validate.o src/datapagemap.o src/catchup.o
# borrowed files
OBJS += src/pg_crc.o src/receivelog.o src/streamutil.o \
diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml
index 740517313..f7814c2d2 100644
--- a/doc/pgprobackup.xml
+++ b/doc/pgprobackup.xml
@@ -143,6 +143,14 @@ doc/src/sgml/pgprobackup.sgml
wal_file_name
option
+
+ pg_probackup
+
+ catchup_mode
+ =path_to_pgdata_on_remote_server
+ =path_to_local_dir
+ option
+
@@ -283,6 +291,11 @@ doc/src/sgml/pgprobackup.sgml
Partial restore: restoring only the specified databases.
+
+
+ Catchup: cloning a PostgreSQL instance for a fallen-behind standby server to catch up
with master.
+
+
To manage backup data, pg_probackup creates a
@@ -1076,7 +1089,8 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup;
mode: ,
,
,
- ,
+ ,
+ , and
.
@@ -1162,7 +1176,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup;
- PTRACK versions lower than 2.0 are deprecated. Postgres Pro Standard and Postgres Pro Enterprise
+ PTRACK versions lower than 2.0 are deprecated and not supported. Postgres Pro Standard and Postgres Pro Enterprise
versions starting with 11.9.1 contain PTRACK 2.0. Upgrade your server to avoid issues in backups
that you will take in future and be sure to take fresh backups of your clusters with the upgraded
PTRACK since the backups taken with PTRACK 1.x might be corrupt.
@@ -1218,34 +1232,6 @@ CREATE EXTENSION ptrack;
-
- For older PostgreSQL versions,
- PTRACK required taking backups in the exclusive mode
- to provide exclusive access to bitmaps with changed blocks.
- To set up PTRACK backups for PostgreSQL 10
- or lower, do the following:
-
-
-
-
- Set the ptrack_enable parameter to
- on.
-
-
-
-
- Grant the right to execute PTRACK
- functions to the backup role
- in every database of the
- cluster:
-
-
-GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_clear() TO backup;
-GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_get_and_clear(oid, oid) TO backup;
-
-
-
-
@@ -1459,6 +1445,7 @@ pg_probackup backup -B backup_dir --instance
+
Performing Cluster Verification
@@ -1534,6 +1521,7 @@ pg_probackup checkdb --amcheck --skip-block-validation [connection_
higher cost of CPU, memory, and I/O consumption.
+
Validating a Backup
@@ -2101,6 +2089,7 @@ pg_probackup restore -B backup_dir --instance ,
,
,
+ ,
and
processes can be
executed on several parallel threads. This can significantly
@@ -3418,6 +3407,148 @@ pg_probackup delete -B backup_dir --instance
+
+
+ Cloning PostgreSQL Instance
+
+ pg_probackup can create a copy of a PostgreSQL
+ instance directly, without using the backup catalog. This allows you
+ to add a new standby server in a parallel mode or to have a standby
+ server that has fallen behind catch up
with master.
+
+
+
+ Cloning a PostgreSQL instance is different from other pg_probackup
+ operations:
+
+
+
+ The backup catalog is not required.
+
+
+
+
+ STREAM WAL delivery mode is only supported.
+
+
+
+
+ Copying external directories
+ is not supported.
+
+
+
+
+ No SQL commands involving tablespaces, such as
+ CREATE TABLESPACE/DROP TABLESPACE,
+ can be run simultaneously with catchup.
+
+
+
+
+ catchup takes configuration files, such as
+ postgresql.conf, postgresql.auto.conf,
+ or pg_hba.conf, from the source server and overwrites them
+ on the target server.
+
+
+
+
+
+
+ Before cloning a PostgreSQL instance, set up the source database server as follows:
+
+
+
+ Configure
+ the database cluster for the instance to copy.
+
+
+
+
+ To copy from a remote server, configure the remote mode.
+
+
+
+
+ To use the PTRACK catchup mode, set up PTRACK backups.
+
+
+
+
+
+
+ To clone a PostgreSQL instance, ensure that the source
+ database server is running and accepting connections and
+ on the server with the destination database, run the following command:
+
+
+pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream [connection_options] [remote_options]
+
+
+ Where catchup_mode can take one of the
+ following values: FULL, DELTA, or PTRACK.
+
+
+
+
+ FULL — creates a full copy of the PostgreSQL instance.
+ The destination directory must be empty for this mode.
+
+
+
+
+ DELTA — reads all data files in the data directory and
+ creates an incremental copy for pages that have changed
+ since the destination database was shut down cleanly.
+ For this mode, the destination directory must contain a previous
+ copy of the database that was shut down cleanly.
+
+
+
+
+ PTRACK — tracking page changes on the fly,
+ only copies pages that have changed since the point of divergence
+ of the source and destination databases.
+ For this mode, the destination directory must contain a previous
+ copy of the database that was shut down cleanly.
+
+
+
+
+ You can use connection_options to specify
+ the connection to the source database cluster. If it is located on a different server,
+ also specify remote_options.
+ If the source database contains tablespaces that must be located in
+ a different directory, additionally specify the
+ option:
+
+pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --tablespace-mapping=OLDDIR=NEWDIR
+
+ To run the catchup command on parallel threads, specify the number
+ of threads with the option:
+
+pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --threads=num_threads
+
+
+
+ For example, assume that a remote standby server with the PostgreSQL instance having /replica-pgdata data directory has fallen behind. To sync this instance with the one in /master-pgdata data directory, you can run
+ the catchup command in the PTRACK mode on four parallel threads as follows:
+
+pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=PTRACK --remote-host=remote-hostname --remote-user=remote-unix-username -j 4
+
+
+
+ Another example shows how you can add a new remote standby server with the PostgreSQL data directory /replica-pgdata by running the catchup command in the FULL mode
+ on four parallel threads:
+
+pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=FULL --remote-host=remote-hostname --remote-user=remote-unix-username -j 4
+
+
+
@@ -3576,7 +3707,7 @@ pg_probackup show-config -B backup_dir --instance show
pg_probackup show -B backup_dir
-[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json]
+[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json] [--no-color]
Shows the contents of the backup catalog. If
@@ -3591,6 +3722,8 @@ pg_probackup show -B backup_dir
plain text. You can specify the
--format=json option to get the result
in the JSON format.
+ If --no-color flag is used,
+ then the output is not colored.
For details on usage, see the sections
@@ -4288,6 +4421,121 @@ pg_probackup archive-get -B backup_dir --instance Archiving Options.
+
+
+ catchup
+
+pg_probackup catchup -b catchup_mode
+--source-pgdata=path_to_pgdata_on_remote_server
+--destination-pgdata=path_to_local_dir
+[--help] [--stream] [-j num_threads]
+[-T OLDDIR=NEWDIR]
+[connection_options] [remote_options]
+
+
+ Creates a copy of a PostgreSQL
+ instance without using the backup catalog.
+
+
+
+
+
+
+
+ Specifies the catchup mode to use. Possible values are:
+
+
+
+
+ FULL — creates a full copy of the PostgreSQL instance.
+
+
+
+
+ DELTA — reads all data files in the data directory and
+ creates an incremental copy for pages that have changed
+ since the destination database was shut down cleanly.
+
+
+
+
+ PTRACK — tracking page changes on the fly,
+ only copies pages that have changed since the point of divergence
+ of the source and destination databases.
+
+
+
+
+
+
+
+
+
+
+
+ Specifies the path to the data directory of the instance to be copied. The path can be local or remote.
+
+
+
+
+
+
+
+
+ Specifies the path to the local data directory to copy to.
+
+
+
+
+
+
+
+
+ Makes a STREAM backup, which
+ includes all the necessary WAL files by streaming them from
+ the database server via replication protocol.
+
+
+
+
+
+
+
+
+
+ Sets the number of parallel threads for
+ catchup process.
+
+
+
+
+
+
+
+
+
+ Relocates the tablespace from the OLDDIR to the NEWDIR
+ directory at the time of recovery. Both OLDDIR and NEWDIR must
+ be absolute paths. If the path contains the equals sign (=),
+ escape it with a backslash. This option can be specified
+ multiple times for multiple tablespaces.
+
+
+
+
+
+
+
+
+ Additionally, connection
+ options, remote
+ mode options can be used.
+
+
+ For details on usage, see the section
+ Cloning PostgreSQL Instance.
+
+
Options
@@ -4672,6 +4920,16 @@ pg_probackup archive-get -B backup_dir --instance
+
+
+
+
+
+ Disable coloring for console log messages of warning and error levels.
+
+
+
+
@@ -4820,7 +5078,8 @@ pg_probackup archive-get -B backup_dir --instance Connection Options
You can use these options together with
- and
+
+ , , and
commands.
@@ -5111,6 +5370,7 @@ pg_probackup archive-get -B backup_dir --instance ,
,
,
+ ,
,
, and
commands.
diff --git a/src/archive.c b/src/archive.c
index 6ac1062b8..7bb8c1c03 100644
--- a/src/archive.c
+++ b/src/archive.c
@@ -113,7 +113,7 @@ static parray *setup_push_filelist(const char *archive_status_dir,
* Where archlog_path is $BACKUP_PATH/wal/instance_name
*/
void
-do_archive_push(InstanceConfig *instance, char *wal_file_path,
+do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wal_file_path,
char *wal_file_name, int batch_size, bool overwrite,
bool no_sync, bool no_ready_rename)
{
@@ -148,7 +148,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path,
elog(ERROR, "getcwd() error");
/* verify that archive-push --instance parameter is valid */
- system_id = get_system_identifier(current_dir);
+ system_id = get_system_identifier(current_dir, FIO_DB_HOST);
if (instance->pgdata == NULL)
elog(ERROR, "Cannot read pg_probackup.conf for this instance");
@@ -156,7 +156,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path,
if (system_id != instance->system_identifier)
elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch."
"Instance '%s' should have SYSTEM_ID = " UINT64_FORMAT " instead of " UINT64_FORMAT,
- wal_file_name, instance->name, instance->system_identifier, system_id);
+ wal_file_name, instanceState->instance_name, instance->system_identifier, system_id);
if (instance->compress_alg == PGLZ_COMPRESS)
elog(ERROR, "Cannot use pglz for WAL compression");
@@ -165,7 +165,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path,
join_path_components(archive_status_dir, pg_xlog_dir, "archive_status");
/* Create 'archlog_path' directory. Do nothing if it already exists. */
- //fio_mkdir(instance->arclog_path, DIR_PERMISSION, FIO_BACKUP_HOST);
+ //fio_mkdir(instanceState->instance_wal_subdir_path, DIR_PERMISSION, FIO_BACKUP_HOST);
#ifdef HAVE_LIBZ
if (instance->compress_alg == ZLIB_COMPRESS)
@@ -206,7 +206,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path,
WALSegno *xlogfile = (WALSegno *) parray_get(batch_files, i);
rc = push_file(xlogfile, archive_status_dir,
- pg_xlog_dir, instance->arclog_path,
+ pg_xlog_dir, instanceState->instance_wal_subdir_path,
overwrite, no_sync,
instance->archive_timeout,
no_ready_rename || (strcmp(xlogfile->name, wal_file_name) == 0) ? true : false,
@@ -231,7 +231,7 @@ do_archive_push(InstanceConfig *instance, char *wal_file_path,
archive_push_arg *arg = &(threads_args[i]);
arg->first_filename = wal_file_name;
- arg->archive_dir = instance->arclog_path;
+ arg->archive_dir = instanceState->instance_wal_subdir_path;
arg->pg_xlog_dir = pg_xlog_dir;
arg->archive_status_dir = archive_status_dir;
arg->overwrite = overwrite;
@@ -1009,7 +1009,7 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file,
*/
void
-do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg,
+do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const char *prefetch_dir_arg,
char *wal_file_path, char *wal_file_name, int batch_size,
bool validate_wal)
{
@@ -1047,8 +1047,8 @@ do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg,
join_path_components(absolute_wal_file_path, current_dir, wal_file_path);
/* full filepath to WAL file in archive directory.
- * backup_path/wal/instance_name/000000010000000000000001 */
- join_path_components(backup_wal_file_path, instance->arclog_path, wal_file_name);
+ * $BACKUP_PATH/wal/instance_name/000000010000000000000001 */
+ join_path_components(backup_wal_file_path, instanceState->instance_wal_subdir_path, wal_file_name);
INSTR_TIME_SET_CURRENT(start_time);
if (num_threads > batch_size)
@@ -1099,7 +1099,7 @@ do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg,
* copy requested file directly from archive.
*/
if (!next_wal_segment_exists(tli, segno, prefetch_dir, instance->xlog_seg_size))
- n_fetched = run_wal_prefetch(prefetch_dir, instance->arclog_path,
+ n_fetched = run_wal_prefetch(prefetch_dir, instanceState->instance_wal_subdir_path,
tli, segno, num_threads, false, batch_size,
instance->xlog_seg_size);
@@ -1138,7 +1138,7 @@ do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg,
// rmtree(prefetch_dir, false);
/* prefetch files */
- n_fetched = run_wal_prefetch(prefetch_dir, instance->arclog_path,
+ n_fetched = run_wal_prefetch(prefetch_dir, instanceState->instance_wal_subdir_path,
tli, segno, num_threads, true, batch_size,
instance->xlog_seg_size);
diff --git a/src/backup.c b/src/backup.c
index 83785c1cb..2d834410a 100644
--- a/src/backup.c
+++ b/src/backup.c
@@ -27,18 +27,16 @@
//const char *progname = "pg_probackup";
/* list of files contained in backup */
-static parray *backup_files_list = NULL;
+parray *backup_files_list = NULL;
/* We need critical section for datapagemap_add() in case of using threads */
static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER;
-
+// TODO: move to PGnodeInfo
bool exclusive_backup = false;
/* Is pg_start_backup() was executed */
-static bool backup_in_progress = false;
-/* Is pg_stop_backup() was sent */
-static bool pg_stop_backup_is_sent = false;
+bool backup_in_progress = false;
/*
* Backup routines
@@ -47,16 +45,12 @@ static void backup_cleanup(bool fatal, void *userdata);
static void *backup_files(void *arg);
-static void do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs);
+static void do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
+ PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs);
-static void pg_start_backup(const char *label, bool smooth, pgBackup *backup,
- PGNodeInfo *nodeInfo, PGconn *conn);
static void pg_switch_wal(PGconn *conn);
-static void pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo);
-static XLogRecPtr wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli,
- bool in_prev_segment, bool segment_only,
- int timeout_elevel, bool in_stream_dir, pgBackup *backup);
+static void pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo);
static void check_external_for_tablespaces(parray *external_list,
PGconn *backup_conn);
@@ -66,37 +60,40 @@ static parray *get_database_map(PGconn *pg_startbackup_conn);
static bool pgpro_support(PGconn *conn);
/* Check functions */
-static bool pg_checksum_enable(PGconn *conn);
+static bool pg_is_checksum_enabled(PGconn *conn);
static bool pg_is_in_recovery(PGconn *conn);
static bool pg_is_superuser(PGconn *conn);
static void check_server_version(PGconn *conn, PGNodeInfo *nodeInfo);
static void confirm_block_size(PGconn *conn, const char *name, int blcksz);
static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i);
+static StopBackupCallbackParams stop_callback_params;
+
static void
backup_stopbackup_callback(bool fatal, void *userdata)
{
- PGconn *pg_startbackup_conn = (PGconn *) userdata;
+ StopBackupCallbackParams *st = (StopBackupCallbackParams *) userdata;
/*
* If backup is in progress, notify stop of backup to PostgreSQL
*/
if (backup_in_progress)
{
elog(WARNING, "backup in progress, stop backup");
- pg_stop_backup(NULL, pg_startbackup_conn, NULL); /* don't care about stop_lsn in case of error */
+ /* don't care about stop_lsn in case of error */
+ pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL);
}
}
/*
* Take a backup of a single postgresql instance.
- * Move files from 'pgdata' to a subdirectory in 'backup_path'.
+ * Move files from 'pgdata' to a subdirectory in backup catalog.
*/
static void
-do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs)
+do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
+ PGNodeInfo *nodeInfo, bool no_sync, bool backup_logs)
{
int i;
char external_prefix[MAXPGPATH]; /* Temp value. Used as template */
- char dst_backup_path[MAXPGPATH];
char label[1024];
XLogRecPtr prev_backup_start_lsn = InvalidXLogRecPtr;
@@ -127,10 +124,6 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
check_external_for_tablespaces(external_dirs, backup_conn);
}
- /* Clear ptrack files for not PTRACK backups */
- if (current.backup_mode != BACKUP_MODE_DIFF_PTRACK && nodeInfo->is_ptrack_enable)
- pg_ptrack_clear(backup_conn, nodeInfo->ptrack_version_num);
-
/* notify start of backup to PostgreSQL server */
time2iso(label, lengthof(label), current.start_time, false);
strncat(label, " with pg_probackup", lengthof(label) -
@@ -143,7 +136,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
#if PG_VERSION_NUM >= 90600
current.tli = get_current_timeline(backup_conn);
#else
- current.tli = get_current_timeline_from_control(false);
+ current.tli = get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false);
#endif
/*
@@ -155,7 +148,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
current.backup_mode == BACKUP_MODE_DIFF_DELTA)
{
/* get list of backups already taken */
- backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID);
+ backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
prev_backup = catalog_get_last_data_backup(backup_list, current.tli, current.start_time);
if (prev_backup == NULL)
@@ -170,7 +163,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
{
elog(WARNING, "Failed to obtain current timeline history file via replication protocol");
/* fallback to using archive */
- tli_list = catalog_get_timelines(&instance_config);
+ tli_list = catalog_get_timelines(instanceState, &instance_config);
}
if (parray_num(tli_list) == 0)
@@ -193,11 +186,11 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
if (prev_backup)
{
- if (parse_program_version(prev_backup->program_version) > parse_program_version(PROGRAM_VERSION))
- elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. "
- "pg_probackup do not guarantee to be forward compatible. "
- "Please upgrade pg_probackup binary.",
- PROGRAM_VERSION, base36enc(prev_backup->start_time), prev_backup->program_version);
+ if (parse_program_version(prev_backup->program_version) > parse_program_version(PROGRAM_VERSION))
+ elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. "
+ "pg_probackup do not guarantee to be forward compatible. "
+ "Please upgrade pg_probackup binary.",
+ PROGRAM_VERSION, base36enc(prev_backup->start_time), prev_backup->program_version);
elog(INFO, "Parent backup: %s", base36enc(prev_backup->start_time));
@@ -219,29 +212,14 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
{
XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(backup_conn, nodeInfo);
- if (nodeInfo->ptrack_version_num < 200)
+ // new ptrack (>=2.0) is more robust and checks Start LSN
+ if (ptrack_lsn > prev_backup->start_lsn || ptrack_lsn == InvalidXLogRecPtr)
{
- // backward compatibility kludge: use Stop LSN for ptrack 1.x,
- if (ptrack_lsn > prev_backup->stop_lsn || ptrack_lsn == InvalidXLogRecPtr)
- {
- elog(ERROR, "LSN from ptrack_control %X/%X differs from Stop LSN of previous backup %X/%X.\n"
- "Create new full backup before an incremental one.",
- (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn),
- (uint32) (prev_backup->stop_lsn >> 32),
- (uint32) (prev_backup->stop_lsn));
- }
- }
- else
- {
- // new ptrack is more robust and checks Start LSN
- if (ptrack_lsn > prev_backup->start_lsn || ptrack_lsn == InvalidXLogRecPtr)
- {
- elog(ERROR, "LSN from ptrack_control %X/%X is greater than Start LSN of previous backup %X/%X.\n"
- "Create new full backup before an incremental one.",
- (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn),
- (uint32) (prev_backup->start_lsn >> 32),
- (uint32) (prev_backup->start_lsn));
- }
+ elog(ERROR, "LSN from ptrack_control %X/%X is greater than Start LSN of previous backup %X/%X.\n"
+ "Create new full backup before an incremental one.",
+ (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn),
+ (uint32) (prev_backup->start_lsn >> 32),
+ (uint32) (prev_backup->start_lsn));
}
}
@@ -261,35 +239,37 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
write_backup(¤t, true);
/* In PAGE mode or in ARCHIVE wal-mode wait for current segment */
- if (current.backup_mode == BACKUP_MODE_DIFF_PAGE || !stream_wal)
+ if (current.backup_mode == BACKUP_MODE_DIFF_PAGE || !current.stream)
{
/* Check that archive_dir can be reached */
- if (fio_access(arclog_path, F_OK, FIO_BACKUP_HOST) != 0)
+ if (fio_access(instanceState->instance_wal_subdir_path, F_OK, FIO_BACKUP_HOST) != 0)
elog(ERROR, "WAL archive directory is not accessible \"%s\": %s",
- arclog_path, strerror(errno));
+ instanceState->instance_wal_subdir_path, strerror(errno));
/*
* Do not wait start_lsn for stream backup.
* Because WAL streaming will start after pg_start_backup() in stream
* mode.
*/
- wait_wal_lsn(current.start_lsn, true, current.tli, false, true, ERROR, false, ¤t);
+ wait_wal_lsn(instanceState->instance_wal_subdir_path, current.start_lsn, true, current.tli, false, true, ERROR, false);
}
/* start stream replication */
- if (stream_wal)
+ if (current.stream)
{
- join_path_components(dst_backup_path, current.database_dir, PG_XLOG_DIR);
- fio_mkdir(dst_backup_path, DIR_PERMISSION, FIO_BACKUP_HOST);
+ char stream_xlog_path[MAXPGPATH];
+
+ join_path_components(stream_xlog_path, current.database_dir, PG_XLOG_DIR);
+ fio_mkdir(stream_xlog_path, DIR_PERMISSION, FIO_BACKUP_HOST);
- start_WAL_streaming(backup_conn, dst_backup_path, &instance_config.conn_opt,
+ start_WAL_streaming(backup_conn, stream_xlog_path, &instance_config.conn_opt,
current.start_lsn, current.tli);
/* Make sure that WAL streaming is working
* PAGE backup in stream mode is waited twice, first for
* segment in WAL archive and then for streamed segment
*/
- wait_wal_lsn(current.start_lsn, true, current.tli, false, true, ERROR, true, ¤t);
+ wait_wal_lsn(stream_xlog_path, current.start_lsn, true, current.tli, false, true, ERROR, true);
}
/* initialize backup's file list */
@@ -336,23 +316,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
elog(ERROR, "PGDATA is almost empty. Either it was concurrently deleted or "
"pg_probackup do not possess sufficient permissions to list PGDATA content");
- /* Calculate pgdata_bytes */
- for (i = 0; i < parray_num(backup_files_list); i++)
- {
- pgFile *file = (pgFile *) parray_get(backup_files_list, i);
-
- if (file->external_dir_num != 0)
- continue;
-
- if (S_ISDIR(file->mode))
- {
- current.pgdata_bytes += 4096;
- continue;
- }
-
- current.pgdata_bytes += file->size;
- }
-
+ current.pgdata_bytes += calculate_datasize_of_filelist(backup_files_list);
pretty_size(current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes));
elog(INFO, "PGDATA size: %s", pretty_bytes);
@@ -399,7 +363,8 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
* reading WAL segments present in archives up to the point
* where this backup has started.
*/
- pagemap_isok = extractPageMap(arclog_path, instance_config.xlog_seg_size,
+ pagemap_isok = extractPageMap(instanceState->instance_wal_subdir_path,
+ instance_config.xlog_seg_size,
prev_backup->start_lsn, prev_backup->tli,
current.start_lsn, current.tli, tli_list);
}
@@ -408,15 +373,10 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
/*
* Build the page map from ptrack information.
*/
- if (nodeInfo->ptrack_version_num >= 200)
- make_pagemap_from_ptrack_2(backup_files_list, backup_conn,
- nodeInfo->ptrack_schema,
- nodeInfo->ptrack_version_num,
- prev_backup_start_lsn);
- else if (nodeInfo->ptrack_version_num == 105 ||
- nodeInfo->ptrack_version_num == 106 ||
- nodeInfo->ptrack_version_num == 107)
- make_pagemap_from_ptrack_1(backup_files_list, backup_conn);
+ make_pagemap_from_ptrack_2(backup_files_list, backup_conn,
+ nodeInfo->ptrack_schema,
+ nodeInfo->ptrack_version_num,
+ prev_backup_start_lsn);
}
time(&end_time);
@@ -431,7 +391,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
}
/*
- * Make directories before backup and setup threads at the same time
+ * Make directories before backup
*/
for (i = 0; i < parray_num(backup_files_list); i++)
{
@@ -456,10 +416,11 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
fio_mkdir(dirpath, DIR_PERMISSION, FIO_BACKUP_HOST);
}
- /* setup threads */
- pg_atomic_clear_flag(&file->lock);
}
+ /* setup thread locks */
+ pfilearray_clear_locks(backup_files_list);
+
/* Sort by size for load balancing */
parray_qsort(backup_files_list, pgFileCompareSize);
/* Sort the array for binary search */
@@ -490,8 +451,6 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
arg->files_list = backup_files_list;
arg->prev_filelist = prev_backup_filelist;
arg->prev_start_lsn = prev_backup_start_lsn;
- arg->conn_arg.conn = NULL;
- arg->conn_arg.cancel_conn = NULL;
arg->hdr_map = &(current.hdr_map);
arg->thread_num = i+1;
/* By default there are some error */
@@ -536,7 +495,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
}
/* Notify end of backup */
- pg_stop_backup(¤t, backup_conn, nodeInfo);
+ pg_stop_backup(instanceState, ¤t, backup_conn, nodeInfo);
/* In case of backup from replica >= 9.6 we must fix minRecPoint,
* First we must find pg_control in backup_files_list.
@@ -706,7 +665,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo)
/* Confirm that this server version is supported */
check_server_version(cur_conn, nodeInfo);
- if (pg_checksum_enable(cur_conn))
+ if (pg_is_checksum_enabled(cur_conn))
current.checksum_version = 1;
else
current.checksum_version = 0;
@@ -723,7 +682,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo)
if (nodeInfo->is_superuser)
elog(WARNING, "Current PostgreSQL role is superuser. "
- "It is not recommended to run backup or checkdb as superuser.");
+ "It is not recommended to run pg_probackup under superuser.");
strlcpy(current.server_version, nodeInfo->server_version_str,
sizeof(current.server_version));
@@ -735,7 +694,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo)
* Entry point of pg_probackup BACKUP subcommand.
*/
int
-do_backup(pgSetBackupParams *set_backup_params,
+do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params,
bool no_validate, bool no_sync, bool backup_logs)
{
PGconn *backup_conn = NULL;
@@ -751,7 +710,7 @@ do_backup(pgSetBackupParams *set_backup_params,
current.external_dir_str = instance_config.external_dir_str;
/* Create backup directory and BACKUP_CONTROL_FILE */
- pgBackupCreateDir(¤t, backup_instance_path);
+ pgBackupCreateDir(¤t, instanceState->instance_backup_subdir_path);
if (!instance_config.pgdata)
elog(ERROR, "required parameter not specified: PGDATA "
@@ -769,7 +728,7 @@ do_backup(pgSetBackupParams *set_backup_params,
elog(INFO, "Backup start, pg_probackup version: %s, instance: %s, backup ID: %s, backup mode: %s, "
"wal mode: %s, remote: %s, compress-algorithm: %s, compress-level: %i",
- PROGRAM_VERSION, instance_name, base36enc(current.backup_id), pgBackupGetBackupMode(¤t),
+ PROGRAM_VERSION, instanceState->instance_name, base36enc(current.backup_id), pgBackupGetBackupMode(¤t, false),
current.stream ? "STREAM" : "ARCHIVE", IsSshProtocol() ? "true" : "false",
deparse_compress_alg(current.compress_alg), current.compress_level);
@@ -812,15 +771,16 @@ do_backup(pgSetBackupParams *set_backup_params,
// elog(WARNING, "ptrack_version_num %d", ptrack_version_num);
if (nodeInfo.ptrack_version_num > 0)
- nodeInfo.is_ptrack_enable = pg_ptrack_enable(backup_conn, nodeInfo.ptrack_version_num);
+ nodeInfo.is_ptrack_enabled = pg_is_ptrack_enabled(backup_conn, nodeInfo.ptrack_version_num);
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
{
+ /* ptrack_version_num < 2.0 was already checked in get_ptrack_version() */
if (nodeInfo.ptrack_version_num == 0)
elog(ERROR, "This PostgreSQL instance does not support ptrack");
else
{
- if (!nodeInfo.is_ptrack_enable)
+ if (!nodeInfo.is_ptrack_enabled)
elog(ERROR, "Ptrack is disabled");
}
}
@@ -835,7 +795,7 @@ do_backup(pgSetBackupParams *set_backup_params,
add_note(¤t, set_backup_params->note);
/* backup data */
- do_backup_instance(backup_conn, &nodeInfo, no_sync, backup_logs);
+ do_backup_pg(instanceState, backup_conn, &nodeInfo, no_sync, backup_logs);
pgut_atexit_pop(backup_cleanup, NULL);
/* compute size of wal files of this backup stored in the archive */
@@ -890,7 +850,7 @@ do_backup(pgSetBackupParams *set_backup_params,
* which are expired according to retention policies
*/
if (delete_expired || merge_expired || delete_wal)
- do_retention(no_validate, no_sync);
+ do_retention(instanceState, no_validate, no_sync);
return 0;
}
@@ -978,12 +938,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo)
* All system identifiers must be equal.
*/
void
-check_system_identifiers(PGconn *conn, char *pgdata)
+check_system_identifiers(PGconn *conn, const char *pgdata)
{
uint64 system_id_conn;
uint64 system_id_pgdata;
- system_id_pgdata = get_system_identifier(pgdata);
+ system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST);
system_id_conn = get_remote_system_identifier(conn);
/* for checkdb check only system_id_pgdata and system_id_conn */
@@ -1036,7 +996,7 @@ confirm_block_size(PGconn *conn, const char *name, int blcksz)
/*
* Notify start of backup to PostgreSQL server.
*/
-static void
+void
pg_start_backup(const char *label, bool smooth, pgBackup *backup,
PGNodeInfo *nodeInfo, PGconn *conn)
{
@@ -1044,7 +1004,6 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup,
const char *params[2];
uint32 lsn_hi;
uint32 lsn_lo;
-
params[0] = label;
elog(INFO, "wait for pg_start_backup()");
@@ -1067,7 +1026,9 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup,
* is necessary to call pg_stop_backup() in backup_cleanup().
*/
backup_in_progress = true;
- pgut_atexit_push(backup_stopbackup_callback, conn);
+ stop_callback_params.conn = conn;
+ stop_callback_params.server_version = nodeInfo->server_version;
+ pgut_atexit_push(backup_stopbackup_callback, &stop_callback_params);
/* Extract timeline and LSN from results of pg_start_backup() */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
@@ -1076,7 +1037,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup,
PQclear(res);
- if ((!stream_wal || current.backup_mode == BACKUP_MODE_DIFF_PAGE) &&
+ if ((!backup->stream || backup->backup_mode == BACKUP_MODE_DIFF_PAGE) &&
!backup->from_replica &&
!(nodeInfo->server_version < 90600 &&
!nodeInfo->is_superuser))
@@ -1093,14 +1054,12 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup,
* Switch to a new WAL segment. It should be called only for master.
* For PG 9.5 it should be called only if pguser is superuser.
*/
-static void
+void
pg_switch_wal(PGconn *conn)
{
PGresult *res;
- /* Remove annoying NOTICE messages generated by backend */
- res = pgut_execute(conn, "SET client_min_messages = warning;", 0, NULL);
- PQclear(res);
+ pg_silent_client_messages(conn);
#if PG_VERSION_NUM >= 100000
res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_wal()", 0, NULL);
@@ -1197,7 +1156,7 @@ get_database_map(PGconn *conn)
/* Check if ptrack is enabled in target instance */
static bool
-pg_checksum_enable(PGconn *conn)
+pg_is_checksum_enabled(PGconn *conn)
{
PGresult *res_db;
@@ -1263,7 +1222,7 @@ pg_is_superuser(PGconn *conn)
* previous segment.
*
* Flag 'in_stream_dir' determine whether we looking for WAL in 'pg_wal' directory or
- * in archive. Do note, that we cannot rely sorely on global variable 'stream_wal' because,
+ * in archive. Do note, that we cannot rely sorely on global variable 'stream_wal' (current.stream) because,
* for example, PAGE backup must(!) look for start_lsn in archive regardless of wal_mode.
*
* 'timeout_elevel' determine the elevel for timeout elog message. If elevel lighter than
@@ -1272,15 +1231,13 @@ pg_is_superuser(PGconn *conn)
* Returns target LSN if such is found, failing that returns LSN of record prior to target LSN.
* Returns InvalidXLogRecPtr if 'segment_only' flag is used.
*/
-static XLogRecPtr
-wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli,
+XLogRecPtr
+wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli,
bool in_prev_segment, bool segment_only,
- int timeout_elevel, bool in_stream_dir, pgBackup *backup)
+ int timeout_elevel, bool in_stream_dir)
{
XLogSegNo targetSegNo;
- char pg_wal_dir[MAXPGPATH];
char wal_segment_path[MAXPGPATH],
- *wal_segment_dir,
wal_segment[MAXFNAMELEN];
bool file_exists = false;
uint32 try_count = 0,
@@ -1298,6 +1255,7 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli,
GetXLogFileName(wal_segment, tli, targetSegNo,
instance_config.xlog_seg_size);
+ join_path_components(wal_segment_path, wal_segment_dir, wal_segment);
/*
* In pg_start_backup we wait for 'target_lsn' in 'pg_wal' directory if it is
* stream and non-page backup. Page backup needs archived WAL files, so we
@@ -1305,17 +1263,6 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli,
*
* In pg_stop_backup it depends only on stream_wal.
*/
- if (in_stream_dir)
- {
- join_path_components(pg_wal_dir, backup->database_dir, PG_XLOG_DIR);
- join_path_components(wal_segment_path, pg_wal_dir, wal_segment);
- wal_segment_dir = pg_wal_dir;
- }
- else
- {
- join_path_components(wal_segment_path, arclog_path, wal_segment);
- wal_segment_dir = arclog_path; /* global var */
- }
/* TODO: remove this in 3.0 (it is a cludge against some old bug with archive_timeout) */
if (instance_config.archive_timeout > 0)
@@ -1421,7 +1368,7 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli,
wal_delivery_str, wal_segment_path);
}
- if (!stream_wal && is_start_lsn && try_count == 30)
+ if (!current.stream && is_start_lsn && try_count == 30)
elog(WARNING, "By default pg_probackup assume WAL delivery method to be ARCHIVE. "
"If continuous archiving is not set up, use '--stream' option to make autonomous backup. "
"Otherwise check that continuous archiving works correctly.");
@@ -1446,481 +1393,529 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli,
}
/*
- * Notify end of backup to PostgreSQL server.
+ * Check stop_lsn (returned from pg_stop_backup()) and update backup->stop_lsn
*/
-static void
-pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn,
- PGNodeInfo *nodeInfo)
+void
+wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup)
{
- PGconn *conn;
- PGresult *res;
- PGresult *tablespace_map_content = NULL;
- uint32 lsn_hi;
- uint32 lsn_lo;
- //XLogRecPtr restore_lsn = InvalidXLogRecPtr;
- int pg_stop_backup_timeout = 0;
- char path[MAXPGPATH];
- char backup_label[MAXPGPATH];
- FILE *fp;
- pgFile *file;
- size_t len;
- char *val = NULL;
- char *stop_backup_query = NULL;
- bool stop_lsn_exists = false;
- XLogRecPtr stop_backup_lsn_tmp = InvalidXLogRecPtr;
+ bool stop_lsn_exists = false;
- /*
- * We will use this values if there are no transactions between start_lsn
- * and stop_lsn.
+ /* It is ok for replica to return invalid STOP LSN
+ * UPD: Apparently it is ok even for a master.
*/
- time_t recovery_time;
- TransactionId recovery_xid;
+ if (!XRecOffIsValid(stop_lsn))
+ {
+ XLogSegNo segno = 0;
+ XLogRecPtr lsn_tmp = InvalidXLogRecPtr;
- if (!backup_in_progress)
- elog(ERROR, "backup is not in progress");
+ /*
+ * Even though the value is invalid, it's expected postgres behaviour
+ * and we're trying to fix it below.
+ */
+ elog(LOG, "Invalid offset in stop_lsn value %X/%X, trying to fix",
+ (uint32) (stop_lsn >> 32), (uint32) (stop_lsn));
- conn = pg_startbackup_conn;
+ /*
+ * Note: even with gdb it is very hard to produce automated tests for
+ * contrecord + invalid LSN, so emulate it for manual testing.
+ */
+ //lsn = lsn - XLOG_SEG_SIZE;
+ //elog(WARNING, "New Invalid stop_backup_lsn value %X/%X",
+ // (uint32) (stop_lsn >> 32), (uint32) (stop_lsn));
- /* Remove annoying NOTICE messages generated by backend */
- res = pgut_execute(conn, "SET client_min_messages = warning;",
- 0, NULL);
- PQclear(res);
+ GetXLogSegNo(stop_lsn, segno, instance_config.xlog_seg_size);
- /* Make proper timestamp format for parse_time() */
- res = pgut_execute(conn, "SET datestyle = 'ISO, DMY';", 0, NULL);
- PQclear(res);
+ /*
+ * Note, that there is no guarantee that corresponding WAL file even exists.
+ * Replica may return LSN from future and keep staying in present.
+ * Or it can return invalid LSN.
+ *
+ * That's bad, since we want to get real LSN to save it in backup label file
+ * and to use it in WAL validation.
+ *
+ * So we try to do the following:
+ * 1. Wait 'archive_timeout' seconds for segment containing stop_lsn and
+ * look for the first valid record in it.
+ * It solves the problem of occasional invalid LSN on write-busy system.
+ * 2. Failing that, look for record in previous segment with endpoint
+ * equal or greater than stop_lsn. It may(!) solve the problem of invalid LSN
+ * on write-idle system. If that fails too, error out.
+ */
- /* Create restore point
- * Only if backup is from master.
- * For PG 9.5 create restore point only if pguser is superuser.
- */
- if (backup != NULL && !backup->from_replica &&
- !(nodeInfo->server_version < 90600 &&
- !nodeInfo->is_superuser))
- {
- const char *params[1];
- char name[1024];
+ /* stop_lsn is pointing to a 0 byte of xlog segment */
+ if (stop_lsn % instance_config.xlog_seg_size == 0)
+ {
+ /* Wait for segment with current stop_lsn, it is ok for it to never arrive */
+ wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli,
+ false, true, WARNING, backup->stream);
+
+ /* Get the first record in segment with current stop_lsn */
+ lsn_tmp = get_first_record_lsn(xlog_path, segno, backup->tli,
+ instance_config.xlog_seg_size,
+ instance_config.archive_timeout);
+
+ /* Check that returned LSN is valid and greater than stop_lsn */
+ if (XLogRecPtrIsInvalid(lsn_tmp) ||
+ !XRecOffIsValid(lsn_tmp) ||
+ lsn_tmp < stop_lsn)
+ {
+ /* Backup from master should error out here */
+ if (!backup->from_replica)
+ elog(ERROR, "Failed to get next WAL record after %X/%X",
+ (uint32) (stop_lsn >> 32),
+ (uint32) (stop_lsn));
+
+ /* No luck, falling back to looking up for previous record */
+ elog(WARNING, "Failed to get next WAL record after %X/%X, "
+ "looking for previous WAL record",
+ (uint32) (stop_lsn >> 32),
+ (uint32) (stop_lsn));
+
+ /* Despite looking for previous record there is not guarantee of success
+ * because previous record can be the contrecord.
+ */
+ lsn_tmp = wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli,
+ true, false, ERROR, backup->stream);
- snprintf(name, lengthof(name), "pg_probackup, backup_id %s",
- base36enc(backup->start_time));
- params[0] = name;
+ /* sanity */
+ if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp))
+ elog(ERROR, "Failed to get WAL record prior to %X/%X",
+ (uint32) (stop_lsn >> 32),
+ (uint32) (stop_lsn));
+ }
+ }
+ /* stop lsn is aligned to xlog block size, just find next lsn */
+ else if (stop_lsn % XLOG_BLCKSZ == 0)
+ {
+ /* Wait for segment with current stop_lsn */
+ wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli,
+ false, true, ERROR, backup->stream);
+
+ /* Get the next closest record in segment with current stop_lsn */
+ lsn_tmp = get_next_record_lsn(xlog_path, segno, backup->tli,
+ instance_config.xlog_seg_size,
+ instance_config.archive_timeout,
+ stop_lsn);
+
+ /* sanity */
+ if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp))
+ elog(ERROR, "Failed to get WAL record next to %X/%X",
+ (uint32) (stop_lsn >> 32),
+ (uint32) (stop_lsn));
+ }
+ /* PostgreSQL returned something very illegal as STOP_LSN, error out */
+ else
+ elog(ERROR, "Invalid stop_backup_lsn value %X/%X",
+ (uint32) (stop_lsn >> 32), (uint32) (stop_lsn));
- res = pgut_execute(conn, "SELECT pg_catalog.pg_create_restore_point($1)",
- 1, params);
- PQclear(res);
+ /* Setting stop_backup_lsn will set stop point for streaming */
+ stop_backup_lsn = lsn_tmp;
+ stop_lsn_exists = true;
}
+ elog(LOG, "stop_lsn: %X/%X",
+ (uint32) (stop_lsn >> 32), (uint32) (stop_lsn));
+
/*
- * send pg_stop_backup asynchronously because we could came
- * here from backup_cleanup() after some error caused by
- * postgres archive_command problem and in this case we will
- * wait for pg_stop_backup() forever.
+ * Wait for stop_lsn to be archived or streamed.
+ * If replica returned valid STOP_LSN of not actually existing record,
+ * look for previous record with endpoint >= STOP_LSN.
*/
+ if (!stop_lsn_exists)
+ stop_backup_lsn = wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli,
+ false, false, ERROR, backup->stream);
- if (!pg_stop_backup_is_sent)
- {
- bool sent = false;
+ backup->stop_lsn = stop_backup_lsn;
+}
- if (!exclusive_backup)
- {
+/* Remove annoying NOTICE messages generated by backend */
+void
+pg_silent_client_messages(PGconn *conn)
+{
+ PGresult *res;
+ res = pgut_execute(conn, "SET client_min_messages = warning;",
+ 0, NULL);
+ PQclear(res);
+}
+
+void
+pg_create_restore_point(PGconn *conn, time_t backup_start_time)
+{
+ PGresult *res;
+ const char *params[1];
+ char name[1024];
+
+ snprintf(name, lengthof(name), "pg_probackup, backup_id %s",
+ base36enc(backup_start_time));
+ params[0] = name;
+
+ res = pgut_execute(conn, "SELECT pg_catalog.pg_create_restore_point($1)",
+ 1, params);
+ PQclear(res);
+}
+
+void
+pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text)
+{
+ static const char
+ stop_exlusive_backup_query[] =
/*
* Stop the non-exclusive backup. Besides stop_lsn it returns from
* pg_stop_backup(false) copy of the backup label and tablespace map
* so they can be written to disk by the caller.
- * In case of backup from replica >= 9.6 we do not trust minRecPoint
- * and stop_backup LSN, so we use latest replayed LSN as STOP LSN.
+ * TODO, question: add NULLs as backup_label and tablespace_map?
*/
+ "SELECT"
+ " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
+ " current_timestamp(0)::timestamptz,"
+ " pg_catalog.pg_stop_backup() as lsn",
+ stop_backup_on_master_query[] =
+ "SELECT"
+ " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
+ " current_timestamp(0)::timestamptz,"
+ " lsn,"
+ " labelfile,"
+ " spcmapfile"
+ " FROM pg_catalog.pg_stop_backup(false, false)",
+ stop_backup_on_master_before10_query[] =
+ "SELECT"
+ " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
+ " current_timestamp(0)::timestamptz,"
+ " lsn,"
+ " labelfile,"
+ " spcmapfile"
+ " FROM pg_catalog.pg_stop_backup(false)",
+ /*
+ * In case of backup from replica >= 9.6 we do not trust minRecPoint
+ * and stop_backup LSN, so we use latest replayed LSN as STOP LSN.
+ */
+ stop_backup_on_replica_query[] =
+ "SELECT"
+ " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
+ " current_timestamp(0)::timestamptz,"
+ " pg_catalog.pg_last_wal_replay_lsn(),"
+ " labelfile,"
+ " spcmapfile"
+ " FROM pg_catalog.pg_stop_backup(false, false)",
+ stop_backup_on_replica_before10_query[] =
+ "SELECT"
+ " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
+ " current_timestamp(0)::timestamptz,"
+ " pg_catalog.pg_last_xlog_replay_location(),"
+ " labelfile,"
+ " spcmapfile"
+ " FROM pg_catalog.pg_stop_backup(false)";
+
+ const char * const stop_backup_query =
+ is_exclusive ?
+ stop_exlusive_backup_query :
+ server_version >= 100000 ?
+ (is_started_on_replica ?
+ stop_backup_on_replica_query :
+ stop_backup_on_master_query
+ ) :
+ (is_started_on_replica ?
+ stop_backup_on_replica_before10_query :
+ stop_backup_on_master_before10_query
+ );
+ bool sent = false;
+
+ /* Make proper timestamp format for parse_time(recovery_time) */
+ pgut_execute(conn, "SET datestyle = 'ISO, DMY';", 0, NULL);
+ // TODO: check result
- /* current is used here because of cleanup */
- if (current.from_replica)
- stop_backup_query = "SELECT"
- " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
- " current_timestamp(0)::timestamptz,"
-#if PG_VERSION_NUM >= 100000
- " pg_catalog.pg_last_wal_replay_lsn(),"
-#else
- " pg_catalog.pg_last_xlog_replay_location(),"
-#endif
- " labelfile,"
- " spcmapfile"
-#if PG_VERSION_NUM >= 100000
- " FROM pg_catalog.pg_stop_backup(false, false)";
-#else
- " FROM pg_catalog.pg_stop_backup(false)";
-#endif
- else
- stop_backup_query = "SELECT"
- " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
- " current_timestamp(0)::timestamptz,"
- " lsn,"
- " labelfile,"
- " spcmapfile"
-#if PG_VERSION_NUM >= 100000
- " FROM pg_catalog.pg_stop_backup(false, false)";
-#else
- " FROM pg_catalog.pg_stop_backup(false)";
-#endif
-
- }
- else
- {
- stop_backup_query = "SELECT"
- " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
- " current_timestamp(0)::timestamptz,"
- " pg_catalog.pg_stop_backup() as lsn";
- }
-
- sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING);
- pg_stop_backup_is_sent = true;
- if (!sent)
- elog(ERROR, "Failed to send pg_stop_backup query");
- }
+ /*
+ * send pg_stop_backup asynchronously because we could came
+ * here from backup_cleanup() after some error caused by
+ * postgres archive_command problem and in this case we will
+ * wait for pg_stop_backup() forever.
+ */
+ sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING);
+ if (!sent)
+ elog(ERROR, "Failed to send pg_stop_backup query");
/* After we have sent pg_stop_backup, we don't need this callback anymore */
- pgut_atexit_pop(backup_stopbackup_callback, pg_startbackup_conn);
+ pgut_atexit_pop(backup_stopbackup_callback, &stop_callback_params);
- /*
- * Wait for the result of pg_stop_backup(), but no longer than
- * archive_timeout seconds
- */
- if (pg_stop_backup_is_sent && !in_cleanup)
- {
- int timeout = ARCHIVE_TIMEOUT_DEFAULT;
- res = NULL;
+ if (query_text)
+ *query_text = pgut_strdup(stop_backup_query);
+}
- /* kludge against some old bug in archive_timeout. TODO: remove in 3.0.0 */
- if (instance_config.archive_timeout > 0)
- timeout = instance_config.archive_timeout;
+/*
+ * pg_stop_backup_consume -- get 'pg_stop_backup' query results
+ * side effects:
+ * - allocates memory for tablespace_map and backup_label contents, so it must freed by caller (if its not null)
+ * parameters:
+ * -
+ */
+void
+pg_stop_backup_consume(PGconn *conn, int server_version,
+ bool is_exclusive, uint32 timeout, const char *query_text,
+ PGStopBackupResult *result)
+{
+ PGresult *query_result;
+ uint32 pg_stop_backup_timeout = 0;
+ enum stop_backup_query_result_column_numbers {
+ recovery_xid_colno = 0,
+ recovery_time_colno,
+ lsn_colno,
+ backup_label_colno,
+ tablespace_map_colno
+ };
+
+ /* and now wait */
+ while (1)
+ {
+ if (!PQconsumeInput(conn))
+ elog(ERROR, "pg_stop backup() failed: %s",
+ PQerrorMessage(conn));
- while (1)
+ if (PQisBusy(conn))
{
- if (!PQconsumeInput(conn))
- elog(ERROR, "pg_stop backup() failed: %s",
- PQerrorMessage(conn));
+ pg_stop_backup_timeout++;
+ sleep(1);
- if (PQisBusy(conn))
+ if (interrupted)
{
- pg_stop_backup_timeout++;
- sleep(1);
-
- if (interrupted)
- {
- pgut_cancel(conn);
- elog(ERROR, "interrupted during waiting for pg_stop_backup");
- }
+ pgut_cancel(conn);
+ elog(ERROR, "interrupted during waiting for pg_stop_backup");
+ }
- if (pg_stop_backup_timeout == 1)
- elog(INFO, "wait for pg_stop_backup()");
+ if (pg_stop_backup_timeout == 1)
+ elog(INFO, "wait for pg_stop_backup()");
- /*
- * If postgres haven't answered in archive_timeout seconds,
- * send an interrupt.
- */
- if (pg_stop_backup_timeout > timeout)
- {
- pgut_cancel(conn);
- elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout);
- }
- }
- else
+ /*
+ * If postgres haven't answered in archive_timeout seconds,
+ * send an interrupt.
+ */
+ if (pg_stop_backup_timeout > timeout)
{
- res = PQgetResult(conn);
- break;
+ pgut_cancel(conn);
+ elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout);
}
}
-
- /* Check successfull execution of pg_stop_backup() */
- if (!res)
- elog(ERROR, "pg_stop backup() failed");
else
{
- switch (PQresultStatus(res))
- {
- /*
- * We should expect only PGRES_TUPLES_OK since pg_stop_backup
- * returns tuples.
- */
- case PGRES_TUPLES_OK:
- break;
- default:
- elog(ERROR, "query failed: %s query was: %s",
- PQerrorMessage(conn), stop_backup_query);
- }
- elog(INFO, "pg_stop backup() successfully executed");
+ query_result = PQgetResult(conn);
+ break;
}
+ }
+ /* Check successfull execution of pg_stop_backup() */
+ if (!query_result)
+ elog(ERROR, "pg_stop_backup() failed");
+ else
+ {
+ switch (PQresultStatus(query_result))
+ {
+ /*
+ * We should expect only PGRES_TUPLES_OK since pg_stop_backup
+ * returns tuples.
+ */
+ case PGRES_TUPLES_OK:
+ break;
+ default:
+ elog(ERROR, "query failed: %s query was: %s",
+ PQerrorMessage(conn), query_text);
+ }
backup_in_progress = false;
+ elog(INFO, "pg_stop backup() successfully executed");
+ }
+
+ /* get results and fill result structure */
+ /* get&check recovery_xid */
+ if (sscanf(PQgetvalue(query_result, 0, recovery_xid_colno), XID_FMT, &result->snapshot_xid) != 1)
+ elog(ERROR,
+ "result of txid_snapshot_xmax() is invalid: %s",
+ PQgetvalue(query_result, 0, recovery_xid_colno));
+
+ /* get&check recovery_time */
+ if (!parse_time(PQgetvalue(query_result, 0, recovery_time_colno), &result->invocation_time, true))
+ elog(ERROR,
+ "result of current_timestamp is invalid: %s",
+ PQgetvalue(query_result, 0, recovery_time_colno));
+
+ /* get stop_backup_lsn */
+ {
+ uint32 lsn_hi;
+ uint32 lsn_lo;
// char *target_lsn = "2/F578A000";
// XLogDataFromLSN(target_lsn, &lsn_hi, &lsn_lo);
/* Extract timeline and LSN from results of pg_stop_backup() */
- XLogDataFromLSN(PQgetvalue(res, 0, 2), &lsn_hi, &lsn_lo);
+ XLogDataFromLSN(PQgetvalue(query_result, 0, lsn_colno), &lsn_hi, &lsn_lo);
/* Calculate LSN */
- stop_backup_lsn_tmp = ((uint64) lsn_hi) << 32 | lsn_lo;
-
- /* It is ok for replica to return invalid STOP LSN
- * UPD: Apparently it is ok even for a master.
- */
- if (!XRecOffIsValid(stop_backup_lsn_tmp))
- {
- char *xlog_path,
- stream_xlog_path[MAXPGPATH];
- XLogSegNo segno = 0;
- XLogRecPtr lsn_tmp = InvalidXLogRecPtr;
+ result->lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
+ }
- /*
- * Even though the value is invalid, it's expected postgres behaviour
- * and we're trying to fix it below.
- */
- elog(LOG, "Invalid offset in stop_lsn value %X/%X, trying to fix",
- (uint32) (stop_backup_lsn_tmp >> 32), (uint32) (stop_backup_lsn_tmp));
+ /* get backup_label_content */
+ result->backup_label_content = NULL;
+ // if (!PQgetisnull(query_result, 0, backup_label_colno))
+ if (!is_exclusive)
+ {
+ result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno);
+ if (result->backup_label_content_len > 0)
+ result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno),
+ result->backup_label_content_len);
+ } else {
+ result->backup_label_content_len = 0;
+ }
- /*
- * Note: even with gdb it is very hard to produce automated tests for
- * contrecord + invalid LSN, so emulate it for manual testing.
- */
- //stop_backup_lsn_tmp = stop_backup_lsn_tmp - XLOG_SEG_SIZE;
- //elog(WARNING, "New Invalid stop_backup_lsn value %X/%X",
- // (uint32) (stop_backup_lsn_tmp >> 32), (uint32) (stop_backup_lsn_tmp));
+ /* get tablespace_map_content */
+ result->tablespace_map_content = NULL;
+ // if (!PQgetisnull(query_result, 0, tablespace_map_colno))
+ if (!is_exclusive)
+ {
+ result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno);
+ if (result->tablespace_map_content_len > 0)
+ result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno),
+ result->tablespace_map_content_len);
+ } else {
+ result->tablespace_map_content_len = 0;
+ }
+}
- if (stream_wal)
- {
- pgBackupGetPath2(backup, stream_xlog_path,
- lengthof(stream_xlog_path),
- DATABASE_DIR, PG_XLOG_DIR);
- xlog_path = stream_xlog_path;
- }
- else
- xlog_path = arclog_path;
+/*
+ * helper routine used to write backup_label and tablespace_map in pg_stop_backup()
+ */
+void
+pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename,
+ const void *data, size_t len, parray *file_list)
+{
+ FILE *fp;
+ pgFile *file;
+ char full_filename[MAXPGPATH];
+
+ join_path_components(full_filename, path, filename);
+ fp = fio_fopen(full_filename, PG_BINARY_W, FIO_BACKUP_HOST);
+ if (fp == NULL)
+ elog(ERROR, "can't open %s file \"%s\": %s",
+ error_msg_filename, full_filename, strerror(errno));
+
+ if (fio_fwrite(fp, data, len) != len ||
+ fio_fflush(fp) != 0 ||
+ fio_fclose(fp))
+ elog(ERROR, "can't write %s file \"%s\": %s",
+ error_msg_filename, full_filename, strerror(errno));
- GetXLogSegNo(stop_backup_lsn_tmp, segno, instance_config.xlog_seg_size);
+ /*
+ * It's vital to check if files_list is initialized,
+ * because we could get here because the backup was interrupted
+ */
+ if (file_list)
+ {
+ file = pgFileNew(full_filename, filename, true, 0,
+ FIO_BACKUP_HOST);
- /*
- * Note, that there is no guarantee that corresponding WAL file even exists.
- * Replica may return LSN from future and keep staying in present.
- * Or it can return invalid LSN.
- *
- * That's bad, since we want to get real LSN to save it in backup label file
- * and to use it in WAL validation.
- *
- * So we try to do the following:
- * 1. Wait 'archive_timeout' seconds for segment containing stop_lsn and
- * look for the first valid record in it.
- * It solves the problem of occasional invalid LSN on write-busy system.
- * 2. Failing that, look for record in previous segment with endpoint
- * equal or greater than stop_lsn. It may(!) solve the problem of invalid LSN
- * on write-idle system. If that fails too, error out.
- */
+ if (S_ISREG(file->mode))
+ {
+ file->crc = pgFileGetCRC(full_filename, true, false);
- /* stop_lsn is pointing to a 0 byte of xlog segment */
- if (stop_backup_lsn_tmp % instance_config.xlog_seg_size == 0)
- {
- /* Wait for segment with current stop_lsn, it is ok for it to never arrive */
- wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli,
- false, true, WARNING, stream_wal, backup);
-
- /* Get the first record in segment with current stop_lsn */
- lsn_tmp = get_first_record_lsn(xlog_path, segno, backup->tli,
- instance_config.xlog_seg_size,
- instance_config.archive_timeout);
-
- /* Check that returned LSN is valid and greater than stop_lsn */
- if (XLogRecPtrIsInvalid(lsn_tmp) ||
- !XRecOffIsValid(lsn_tmp) ||
- lsn_tmp < stop_backup_lsn_tmp)
- {
- /* Backup from master should error out here */
- if (!backup->from_replica)
- elog(ERROR, "Failed to get next WAL record after %X/%X",
- (uint32) (stop_backup_lsn_tmp >> 32),
- (uint32) (stop_backup_lsn_tmp));
-
- /* No luck, falling back to looking up for previous record */
- elog(WARNING, "Failed to get next WAL record after %X/%X, "
- "looking for previous WAL record",
- (uint32) (stop_backup_lsn_tmp >> 32),
- (uint32) (stop_backup_lsn_tmp));
-
- /* Despite looking for previous record there is not guarantee of success
- * because previous record can be the contrecord.
- */
- lsn_tmp = wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli,
- true, false, ERROR, stream_wal, backup);
-
- /* sanity */
- if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp))
- elog(ERROR, "Failed to get WAL record prior to %X/%X",
- (uint32) (stop_backup_lsn_tmp >> 32),
- (uint32) (stop_backup_lsn_tmp));
- }
- }
- /* stop lsn is aligned to xlog block size, just find next lsn */
- else if (stop_backup_lsn_tmp % XLOG_BLCKSZ == 0)
- {
- /* Wait for segment with current stop_lsn */
- wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli,
- false, true, ERROR, stream_wal, backup);
+ file->write_size = file->size;
+ file->uncompressed_size = file->size;
+ }
+ parray_append(file_list, file);
+ }
+}
- /* Get the next closest record in segment with current stop_lsn */
- lsn_tmp = get_next_record_lsn(xlog_path, segno, backup->tli,
- instance_config.xlog_seg_size,
- instance_config.archive_timeout,
- stop_backup_lsn_tmp);
+/*
+ * Notify end of backup to PostgreSQL server.
+ */
+static void
+pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn,
+ PGNodeInfo *nodeInfo)
+{
+ PGStopBackupResult stop_backup_result;
+ char *xlog_path, stream_xlog_path[MAXPGPATH];
+ /* kludge against some old bug in archive_timeout. TODO: remove in 3.0.0 */
+ int timeout = (instance_config.archive_timeout > 0) ?
+ instance_config.archive_timeout : ARCHIVE_TIMEOUT_DEFAULT;
+ char *query_text = NULL;
+
+ /* Remove it ? */
+ if (!backup_in_progress)
+ elog(ERROR, "backup is not in progress");
- /* sanity */
- if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp))
- elog(ERROR, "Failed to get WAL record next to %X/%X",
- (uint32) (stop_backup_lsn_tmp >> 32),
- (uint32) (stop_backup_lsn_tmp));
- }
- /* PostgreSQL returned something very illegal as STOP_LSN, error out */
- else
- elog(ERROR, "Invalid stop_backup_lsn value %X/%X",
- (uint32) (stop_backup_lsn_tmp >> 32), (uint32) (stop_backup_lsn_tmp));
+ pg_silent_client_messages(pg_startbackup_conn);
- /* Setting stop_backup_lsn will set stop point for streaming */
- stop_backup_lsn = lsn_tmp;
- stop_lsn_exists = true;
- }
+ /* Create restore point
+ * Only if backup is from master.
+ * For PG 9.5 create restore point only if pguser is superuser.
+ */
+ if (!backup->from_replica &&
+ !(nodeInfo->server_version < 90600 &&
+ !nodeInfo->is_superuser)) //TODO: check correctness
+ pg_create_restore_point(pg_startbackup_conn, backup->start_time);
- elog(LOG, "stop_lsn: %X/%X",
- (uint32) (stop_backup_lsn_tmp >> 32), (uint32) (stop_backup_lsn_tmp));
+ /* Execute pg_stop_backup using PostgreSQL connection */
+ pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, exclusive_backup, &query_text);
- /* Write backup_label and tablespace_map */
- if (!exclusive_backup)
- {
- Assert(PQnfields(res) >= 4);
- pgBackupGetPath(backup, path, lengthof(path), DATABASE_DIR);
-
- /* Write backup_label */
- join_path_components(backup_label, path, PG_BACKUP_LABEL_FILE);
- fp = fio_fopen(backup_label, PG_BINARY_W, FIO_BACKUP_HOST);
- if (fp == NULL)
- elog(ERROR, "can't open backup label file \"%s\": %s",
- backup_label, strerror(errno));
-
- len = strlen(PQgetvalue(res, 0, 3));
- if (fio_fwrite(fp, PQgetvalue(res, 0, 3), len) != len ||
- fio_fflush(fp) != 0 ||
- fio_fclose(fp))
- elog(ERROR, "can't write backup label file \"%s\": %s",
- backup_label, strerror(errno));
+ /*
+ * Wait for the result of pg_stop_backup(), but no longer than
+ * archive_timeout seconds
+ */
+ pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, exclusive_backup, timeout, query_text, &stop_backup_result);
- /*
- * It's vital to check if backup_files_list is initialized,
- * because we could get here because the backup was interrupted
- */
- if (backup_files_list)
- {
- file = pgFileNew(backup_label, PG_BACKUP_LABEL_FILE, true, 0,
- FIO_BACKUP_HOST);
+ if (backup->stream)
+ {
+ join_path_components(stream_xlog_path, backup->database_dir, PG_XLOG_DIR);
+ xlog_path = stream_xlog_path;
+ }
+ else
+ xlog_path = instanceState->instance_wal_subdir_path;
- file->crc = pgFileGetCRC(backup_label, true, false);
+ wait_wal_and_calculate_stop_lsn(xlog_path, stop_backup_result.lsn, backup);
- file->write_size = file->size;
- file->uncompressed_size = file->size;
- parray_append(backup_files_list, file);
- }
- }
+ /* Write backup_label and tablespace_map */
+ if (!exclusive_backup)
+ {
+ Assert(stop_backup_result.backup_label_content != NULL);
- if (sscanf(PQgetvalue(res, 0, 0), XID_FMT, &recovery_xid) != 1)
- elog(ERROR,
- "result of txid_snapshot_xmax() is invalid: %s",
- PQgetvalue(res, 0, 0));
- if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time, true))
- elog(ERROR,
- "result of current_timestamp is invalid: %s",
- PQgetvalue(res, 0, 1));
-
- /* Get content for tablespace_map from stop_backup results
- * in case of non-exclusive backup
- */
- if (!exclusive_backup)
- val = PQgetvalue(res, 0, 4);
+ /* Write backup_label */
+ pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label",
+ stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len,
+ backup_files_list);
+ free(stop_backup_result.backup_label_content);
+ stop_backup_result.backup_label_content = NULL;
+ stop_backup_result.backup_label_content_len = 0;
/* Write tablespace_map */
- if (!exclusive_backup && val && strlen(val) > 0)
+ if (stop_backup_result.tablespace_map_content != NULL)
{
- char tablespace_map[MAXPGPATH];
-
- join_path_components(tablespace_map, path, PG_TABLESPACE_MAP_FILE);
- fp = fio_fopen(tablespace_map, PG_BINARY_W, FIO_BACKUP_HOST);
- if (fp == NULL)
- elog(ERROR, "can't open tablespace map file \"%s\": %s",
- tablespace_map, strerror(errno));
-
- len = strlen(val);
- if (fio_fwrite(fp, val, len) != len ||
- fio_fflush(fp) != 0 ||
- fio_fclose(fp))
- elog(ERROR, "can't write tablespace map file \"%s\": %s",
- tablespace_map, strerror(errno));
-
- if (backup_files_list)
- {
- file = pgFileNew(tablespace_map, PG_TABLESPACE_MAP_FILE, true, 0,
- FIO_BACKUP_HOST);
- if (S_ISREG(file->mode))
- {
- file->crc = pgFileGetCRC(tablespace_map, true, false);
- file->write_size = file->size;
- }
-
- parray_append(backup_files_list, file);
- }
+ pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map",
+ stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len,
+ backup_files_list);
+ free(stop_backup_result.tablespace_map_content);
+ stop_backup_result.tablespace_map_content = NULL;
+ stop_backup_result.tablespace_map_content_len = 0;
}
-
- if (tablespace_map_content)
- PQclear(tablespace_map_content);
- PQclear(res);
}
- /* Fill in fields if that is the correct end of backup. */
- if (backup != NULL)
+ if (backup->stream)
{
- char *xlog_path,
- stream_xlog_path[MAXPGPATH];
-
- /*
- * Wait for stop_lsn to be archived or streamed.
- * If replica returned valid STOP_LSN of not actually existing record,
- * look for previous record with endpoint >= STOP_LSN.
- */
- if (!stop_lsn_exists)
- stop_backup_lsn = wait_wal_lsn(stop_backup_lsn_tmp, false, backup->tli,
- false, false, ERROR, stream_wal, backup);
-
- if (stream_wal)
- {
- /* This function will also add list of xlog files
- * to the passed filelist */
- if(wait_WAL_streaming_end(backup_files_list))
- elog(ERROR, "WAL streaming failed");
-
- pgBackupGetPath2(backup, stream_xlog_path,
- lengthof(stream_xlog_path),
- DATABASE_DIR, PG_XLOG_DIR);
- xlog_path = stream_xlog_path;
- }
- else
- xlog_path = arclog_path;
+ /* This function will also add list of xlog files
+ * to the passed filelist */
+ if(wait_WAL_streaming_end(backup_files_list))
+ elog(ERROR, "WAL streaming failed");
+ }
- backup->stop_lsn = stop_backup_lsn;
- backup->recovery_xid = recovery_xid;
+ backup->recovery_xid = stop_backup_result.snapshot_xid;
- elog(LOG, "Getting the Recovery Time from WAL");
+ elog(LOG, "Getting the Recovery Time from WAL");
- /* iterate over WAL from stop_backup lsn to start_backup lsn */
- if (!read_recovery_info(xlog_path, backup->tli,
- instance_config.xlog_seg_size,
- backup->start_lsn, backup->stop_lsn,
- &backup->recovery_time))
- {
- elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp");
- backup->recovery_time = recovery_time;
- }
+ /* iterate over WAL from stop_backup lsn to start_backup lsn */
+ if (!read_recovery_info(xlog_path, backup->tli,
+ instance_config.xlog_seg_size,
+ backup->start_lsn, backup->stop_lsn,
+ &backup->recovery_time))
+ {
+ elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp");
+ backup->recovery_time = stop_backup_result.invocation_time;
}
+
+ /* Cleanup */
+ pg_free(query_text);
}
/*
@@ -2050,15 +2045,15 @@ backup_files(void *arg)
/* backup file */
if (file->is_datafile && !file->is_cfs)
{
- backup_data_file(&(arguments->conn_arg), file, from_fullpath, to_fullpath,
- arguments->prev_start_lsn,
- current.backup_mode,
- instance_config.compress_alg,
- instance_config.compress_level,
- arguments->nodeInfo->checksum_version,
- arguments->nodeInfo->ptrack_version_num,
- arguments->nodeInfo->ptrack_schema,
- arguments->hdr_map, false);
+ backup_data_file(file, from_fullpath, to_fullpath,
+ arguments->prev_start_lsn,
+ current.backup_mode,
+ instance_config.compress_alg,
+ instance_config.compress_level,
+ arguments->nodeInfo->checksum_version,
+ arguments->nodeInfo->ptrack_version_num,
+ arguments->nodeInfo->ptrack_schema,
+ arguments->hdr_map, false);
}
else
{
@@ -2082,10 +2077,6 @@ backup_files(void *arg)
/* ssh connection to longer needed */
fio_disconnect();
- /* Close connection */
- if (arguments->conn_arg.conn)
- pgut_disconnect(arguments->conn_arg.conn);
-
/* Data files transferring is successful */
arguments->ret = 0;
@@ -2276,7 +2267,7 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
}
-static void
+void
check_external_for_tablespaces(parray *external_list, PGconn *backup_conn)
{
PGresult *res;
@@ -2340,3 +2331,36 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn)
}
}
}
+
+/*
+ * Calculate pgdata_bytes
+ * accepts (parray *) of (pgFile *)
+ */
+int64
+calculate_datasize_of_filelist(parray *filelist)
+{
+ int64 bytes = 0;
+ int i;
+
+ /* parray_num don't check for NULL */
+ if (filelist == NULL)
+ return 0;
+
+ for (i = 0; i < parray_num(filelist); i++)
+ {
+ pgFile *file = (pgFile *) parray_get(filelist, i);
+
+ if (file->external_dir_num != 0)
+ continue;
+
+ if (S_ISDIR(file->mode))
+ {
+ // TODO is a dir always 4K?
+ bytes += 4096;
+ continue;
+ }
+
+ bytes += file->size;
+ }
+ return bytes;
+}
diff --git a/src/catalog.c b/src/catalog.c
index 3ba17e9fd..9775968b8 100644
--- a/src/catalog.c
+++ b/src/catalog.c
@@ -124,7 +124,7 @@ read_backup(const char *root_dir)
*/
void
write_backup_status(pgBackup *backup, BackupStatus status,
- const char *instance_name, bool strict)
+ bool strict)
{
pgBackup *tmp;
@@ -818,9 +818,22 @@ release_shared_lock_file(const char *backup_dir)
* Get backup_mode in string representation.
*/
const char *
-pgBackupGetBackupMode(pgBackup *backup)
+pgBackupGetBackupMode(pgBackup *backup, bool show_color)
{
- return backupModes[backup->backup_mode];
+ if (show_color)
+ {
+ /* color the Backup mode */
+ char *mode = pgut_malloc(24); /* leaking memory here */
+
+ if (backup->backup_mode == BACKUP_MODE_FULL)
+ snprintf(mode, 24, "%s%s%s", TC_GREEN_BOLD, backupModes[backup->backup_mode], TC_RESET);
+ else
+ snprintf(mode, 24, "%s%s%s", TC_BLUE_BOLD, backupModes[backup->backup_mode], TC_RESET);
+
+ return mode;
+ }
+ else
+ return backupModes[backup->backup_mode];
}
static bool
@@ -837,13 +850,11 @@ IsDir(const char *dirpath, const char *entry, fio_location location)
/*
* Create list of instances in given backup catalog.
*
- * Returns parray of "InstanceConfig" structures, filled with
- * actual config of each instance.
+ * Returns parray of InstanceState structures.
*/
parray *
-catalog_get_instance_list(void)
+catalog_get_instance_list(CatalogState *catalogState)
{
- char path[MAXPGPATH];
DIR *dir;
struct dirent *dent;
parray *instances;
@@ -851,24 +862,23 @@ catalog_get_instance_list(void)
instances = parray_new();
/* open directory and list contents */
- join_path_components(path, backup_path, BACKUPS_DIR);
- dir = opendir(path);
+ dir = opendir(catalogState->backup_subdir_path);
if (dir == NULL)
elog(ERROR, "Cannot open directory \"%s\": %s",
- path, strerror(errno));
+ catalogState->backup_subdir_path, strerror(errno));
while (errno = 0, (dent = readdir(dir)) != NULL)
{
char child[MAXPGPATH];
struct stat st;
- InstanceConfig *instance;
+ InstanceState *instanceState = NULL;
/* skip entries point current dir or parent dir */
if (strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0)
continue;
- join_path_components(child, path, dent->d_name);
+ join_path_components(child, catalogState->backup_subdir_path, dent->d_name);
if (lstat(child, &st) == -1)
elog(ERROR, "Cannot stat file \"%s\": %s",
@@ -877,9 +887,18 @@ catalog_get_instance_list(void)
if (!S_ISDIR(st.st_mode))
continue;
- instance = readInstanceConfigFile(dent->d_name);
+ instanceState = pgut_new(InstanceState);
- parray_append(instances, instance);
+ strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH);
+ join_path_components(instanceState->instance_backup_subdir_path,
+ catalogState->backup_subdir_path, instanceState->instance_name);
+ join_path_components(instanceState->instance_wal_subdir_path,
+ catalogState->wal_subdir_path, instanceState->instance_name);
+ join_path_components(instanceState->instance_config_path,
+ instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE);
+
+ instanceState->config = readInstanceConfigFile(instanceState);
+ parray_append(instances, instanceState);
}
/* TODO 3.0: switch to ERROR */
@@ -888,11 +907,11 @@ catalog_get_instance_list(void)
if (errno)
elog(ERROR, "Cannot read directory \"%s\": %s",
- path, strerror(errno));
+ catalogState->backup_subdir_path, strerror(errno));
if (closedir(dir))
elog(ERROR, "Cannot close directory \"%s\": %s",
- path, strerror(errno));
+ catalogState->backup_subdir_path, strerror(errno));
return instances;
}
@@ -904,22 +923,18 @@ catalog_get_instance_list(void)
* If valid backup id is passed only matching backup will be added to the list.
*/
parray *
-catalog_get_backup_list(const char *instance_name, time_t requested_backup_id)
+catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id)
{
DIR *data_dir = NULL;
struct dirent *data_ent = NULL;
parray *backups = NULL;
int i;
- char backup_instance_path[MAXPGPATH];
-
- sprintf(backup_instance_path, "%s/%s/%s",
- backup_path, BACKUPS_DIR, instance_name);
/* open backup instance backups directory */
- data_dir = fio_opendir(backup_instance_path, FIO_BACKUP_HOST);
+ data_dir = fio_opendir(instanceState->instance_backup_subdir_path, FIO_BACKUP_HOST);
if (data_dir == NULL)
{
- elog(WARNING, "cannot open directory \"%s\": %s", backup_instance_path,
+ elog(WARNING, "cannot open directory \"%s\": %s", instanceState->instance_backup_subdir_path,
strerror(errno));
goto err_proc;
}
@@ -933,12 +948,12 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id)
pgBackup *backup = NULL;
/* skip not-directory entries and hidden entries */
- if (!IsDir(backup_instance_path, data_ent->d_name, FIO_BACKUP_HOST)
+ if (!IsDir(instanceState->instance_backup_subdir_path, data_ent->d_name, FIO_BACKUP_HOST)
|| data_ent->d_name[0] == '.')
continue;
/* open subdirectory of specific backup */
- join_path_components(data_path, backup_instance_path, data_ent->d_name);
+ join_path_components(data_path, instanceState->instance_backup_subdir_path, data_ent->d_name);
/* read backup information from BACKUP_CONTROL_FILE */
join_path_components(backup_conf_path, data_path, BACKUP_CONTROL_FILE);
@@ -978,7 +993,7 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id)
if (errno)
{
elog(WARNING, "Cannot read backup root directory \"%s\": %s",
- backup_instance_path, strerror(errno));
+ instanceState->instance_backup_subdir_path, strerror(errno));
goto err_proc;
}
@@ -1019,19 +1034,118 @@ catalog_get_backup_list(const char *instance_name, time_t requested_backup_id)
}
/*
- * Create list of backup datafiles.
- * If 'requested_backup_id' is INVALID_BACKUP_ID, exit with error.
- * If valid backup id is passed only matching backup will be added to the list.
- * TODO this function only used once. Is it really needed?
+ * Get list of files in the backup from the DATABASE_FILE_LIST.
*/
parray *
get_backup_filelist(pgBackup *backup, bool strict)
{
parray *files = NULL;
char backup_filelist_path[MAXPGPATH];
+ FILE *fp;
+ char buf[BLCKSZ];
+ char stdio_buf[STDIO_BUFSIZE];
+ pg_crc32 content_crc = 0;
join_path_components(backup_filelist_path, backup->root_dir, DATABASE_FILE_LIST);
- files = dir_read_file_list(NULL, NULL, backup_filelist_path, FIO_BACKUP_HOST, backup->content_crc);
+
+ fp = fio_open_stream(backup_filelist_path, FIO_BACKUP_HOST);
+ if (fp == NULL)
+ elog(ERROR, "cannot open \"%s\": %s", backup_filelist_path, strerror(errno));
+
+ /* enable stdio buffering for local file */
+ if (!fio_is_remote(FIO_BACKUP_HOST))
+ setvbuf(fp, stdio_buf, _IOFBF, STDIO_BUFSIZE);
+
+ files = parray_new();
+
+ INIT_FILE_CRC32(true, content_crc);
+
+ while (fgets(buf, lengthof(buf), fp))
+ {
+ char path[MAXPGPATH];
+ char linked[MAXPGPATH];
+ char compress_alg_string[MAXPGPATH];
+ int64 write_size,
+ mode, /* bit length of mode_t depends on platforms */
+ is_datafile,
+ is_cfs,
+ external_dir_num,
+ crc,
+ segno,
+ n_blocks,
+ n_headers,
+ dbOid, /* used for partial restore */
+ hdr_crc,
+ hdr_off,
+ hdr_size;
+ pgFile *file;
+
+ COMP_FILE_CRC32(true, content_crc, buf, strlen(buf));
+
+ get_control_value(buf, "path", path, NULL, true);
+ get_control_value(buf, "size", NULL, &write_size, true);
+ get_control_value(buf, "mode", NULL, &mode, true);
+ get_control_value(buf, "is_datafile", NULL, &is_datafile, true);
+ get_control_value(buf, "is_cfs", NULL, &is_cfs, false);
+ get_control_value(buf, "crc", NULL, &crc, true);
+ get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
+ get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false);
+ get_control_value(buf, "dbOid", NULL, &dbOid, false);
+
+ file = pgFileInit(path);
+ file->write_size = (int64) write_size;
+ file->mode = (mode_t) mode;
+ file->is_datafile = is_datafile ? true : false;
+ file->is_cfs = is_cfs ? true : false;
+ file->crc = (pg_crc32) crc;
+ file->compress_alg = parse_compress_alg(compress_alg_string);
+ file->external_dir_num = external_dir_num;
+ file->dbOid = dbOid ? dbOid : 0;
+
+ /*
+ * Optional fields
+ */
+ if (get_control_value(buf, "linked", linked, NULL, false) && linked[0])
+ {
+ file->linked = pgut_strdup(linked);
+ canonicalize_path(file->linked);
+ }
+
+ if (get_control_value(buf, "segno", NULL, &segno, false))
+ file->segno = (int) segno;
+
+ if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false))
+ file->n_blocks = (int) n_blocks;
+
+ if (get_control_value(buf, "n_headers", NULL, &n_headers, false))
+ file->n_headers = (int) n_headers;
+
+ if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false))
+ file->hdr_crc = (pg_crc32) hdr_crc;
+
+ if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false))
+ file->hdr_off = hdr_off;
+
+ if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false))
+ file->hdr_size = (int) hdr_size;
+
+ parray_append(files, file);
+ }
+
+ FIN_FILE_CRC32(true, content_crc);
+
+ if (ferror(fp))
+ elog(ERROR, "Failed to read from file: \"%s\"", backup_filelist_path);
+
+ fio_close_stream(fp);
+
+ if (backup->content_crc != 0 &&
+ backup->content_crc != content_crc)
+ {
+ elog(WARNING, "Invalid CRC of backup control file '%s': %u. Expected: %u",
+ backup_filelist_path, content_crc, backup->content_crc);
+ return NULL;
+ }
/* redundant sanity? */
if (!files)
@@ -1389,22 +1503,21 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path)
* TODO: '.partial' and '.part' segno information should be added to tlinfo.
*/
parray *
-catalog_get_timelines(InstanceConfig *instance)
+catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance)
{
int i,j,k;
parray *xlog_files_list = parray_new();
parray *timelineinfos;
parray *backups;
timelineInfo *tlinfo;
- char arclog_path[MAXPGPATH];
/* for fancy reporting */
char begin_segno_str[MAXFNAMELEN];
char end_segno_str[MAXFNAMELEN];
/* read all xlog files that belong to this archive */
- sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance->name);
- dir_list_file(xlog_files_list, arclog_path, false, true, false, false, true, 0, FIO_BACKUP_HOST);
+ dir_list_file(xlog_files_list, instanceState->instance_wal_subdir_path,
+ false, true, false, false, true, 0, FIO_BACKUP_HOST);
parray_qsort(xlog_files_list, pgFileCompareName);
timelineinfos = parray_new();
@@ -1574,7 +1687,7 @@ catalog_get_timelines(InstanceConfig *instance)
TimeLineHistoryEntry *tln;
sscanf(file->name, "%08X.history", &tli);
- timelines = read_timeline_history(arclog_path, tli, true);
+ timelines = read_timeline_history(instanceState->instance_wal_subdir_path, tli, true);
/* History file is empty or corrupted, disregard it */
if (!timelines)
@@ -1612,7 +1725,7 @@ catalog_get_timelines(InstanceConfig *instance)
}
/* save information about backups belonging to each timeline */
- backups = catalog_get_backup_list(instance->name, INVALID_BACKUP_ID);
+ backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
for (i = 0; i < parray_num(timelineinfos); i++)
{
@@ -2085,7 +2198,7 @@ get_oldest_backup(timelineInfo *tlinfo)
* Overwrite backup metadata.
*/
void
-do_set_backup(const char *instance_name, time_t backup_id,
+do_set_backup(InstanceState *instanceState, time_t backup_id,
pgSetBackupParams *set_backup_params)
{
pgBackup *target_backup = NULL;
@@ -2094,7 +2207,7 @@ do_set_backup(const char *instance_name, time_t backup_id,
if (!set_backup_params)
elog(ERROR, "Nothing to set by 'set-backup' command");
- backup_list = catalog_get_backup_list(instance_name, backup_id);
+ backup_list = catalog_get_backup_list(instanceState, backup_id);
if (parray_num(backup_list) != 1)
elog(ERROR, "Failed to find backup %s", base36enc(backup_id));
@@ -2208,7 +2321,7 @@ pgBackupWriteControl(FILE *out, pgBackup *backup, bool utc)
char timestamp[100];
fio_fprintf(out, "#Configuration\n");
- fio_fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup));
+ fio_fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup, false));
fio_fprintf(out, "stream = %s\n", backup->stream ? "true" : "false");
fio_fprintf(out, "compress-alg = %s\n",
deparse_compress_alg(backup->compress_alg));
@@ -2770,7 +2883,7 @@ pgNodeInit(PGNodeInfo *node)
node->server_version_str[0] = '\0';
node->ptrack_version_num = 0;
- node->is_ptrack_enable = false;
+ node->is_ptrack_enabled = false;
node->ptrack_schema = NULL;
}
@@ -2857,64 +2970,6 @@ pgBackupCompareIdDesc(const void *l, const void *r)
return -pgBackupCompareId(l, r);
}
-/*
- * Construct absolute path of the backup directory.
- * If subdir is not NULL, it will be appended after the path.
- */
-void
-pgBackupGetPath(const pgBackup *backup, char *path, size_t len, const char *subdir)
-{
- pgBackupGetPath2(backup, path, len, subdir, NULL);
-}
-
-/*
- * Construct absolute path of the backup directory.
- * Append "subdir1" and "subdir2" to the backup directory.
- */
-void
-pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
- const char *subdir1, const char *subdir2)
-{
- /* If "subdir1" is NULL do not check "subdir2" */
- if (!subdir1)
- snprintf(path, len, "%s/%s", backup_instance_path,
- base36enc(backup->start_time));
- else if (!subdir2)
- snprintf(path, len, "%s/%s/%s", backup_instance_path,
- base36enc(backup->start_time), subdir1);
- /* "subdir1" and "subdir2" is not NULL */
- else
- snprintf(path, len, "%s/%s/%s/%s", backup_instance_path,
- base36enc(backup->start_time), subdir1, subdir2);
-}
-
-/*
- * independent from global variable backup_instance_path
- * Still depends from backup_path
- */
-void
-pgBackupGetPathInInstance(const char *instance_name,
- const pgBackup *backup, char *path, size_t len,
- const char *subdir1, const char *subdir2)
-{
- char backup_instance_path[MAXPGPATH];
-
- sprintf(backup_instance_path, "%s/%s/%s",
- backup_path, BACKUPS_DIR, instance_name);
-
- /* If "subdir1" is NULL do not check "subdir2" */
- if (!subdir1)
- snprintf(path, len, "%s/%s", backup_instance_path,
- base36enc(backup->start_time));
- else if (!subdir2)
- snprintf(path, len, "%s/%s/%s", backup_instance_path,
- base36enc(backup->start_time), subdir1);
- /* "subdir1" and "subdir2" is not NULL */
- else
- snprintf(path, len, "%s/%s/%s/%s", backup_instance_path,
- base36enc(backup->start_time), subdir1, subdir2);
-}
-
/*
* Check if multiple backups consider target backup to be their direct parent
*/
@@ -3064,26 +3119,6 @@ is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive)
return false;
}
-/*
- * Return backup index number.
- * Note: this index number holds true until new sorting of backup list
- */
-int
-get_backup_index_number(parray *backup_list, pgBackup *backup)
-{
- int i;
-
- for (i = 0; i < parray_num(backup_list); i++)
- {
- pgBackup *tmp_backup = (pgBackup *) parray_get(backup_list, i);
-
- if (tmp_backup->start_time == backup->start_time)
- return i;
- }
- elog(WARNING, "Failed to find backup %s", base36enc(backup->start_time));
- return -1;
-}
-
/* On backup_list lookup children of target_backup and append them to append_list */
void
append_children(parray *backup_list, pgBackup *target_backup, parray *append_list)
diff --git a/src/catchup.c b/src/catchup.c
new file mode 100644
index 000000000..58ce13c10
--- /dev/null
+++ b/src/catchup.c
@@ -0,0 +1,1020 @@
+/*-------------------------------------------------------------------------
+ *
+ * catchup.c: sync DB cluster
+ *
+ * Copyright (c) 2021, Postgres Professional
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "pg_probackup.h"
+
+#if PG_VERSION_NUM < 110000
+#include "catalog/catalog.h"
+#endif
+#include "catalog/pg_tablespace.h"
+#include "access/timeline.h"
+#include "pgtar.h"
+#include "streamutil.h"
+
+#include
+#include
+#include
+
+#include "utils/thread.h"
+#include "utils/file.h"
+
+/*
+ * Catchup routines
+ */
+static PGconn *catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata);
+static void catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, const char *source_pgdata,
+ const char *dest_pgdata);
+static void catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn);
+static parray* catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli);
+
+//REVIEW The name of this function looks strange to me.
+//Maybe catchup_init_state() or catchup_setup() will do better?
+//I'd also suggest to wrap all these fields into some CatchupState, but it isn't urgent.
+/*
+ * Prepare for work: fill some globals, open connection to source database
+ */
+static PGconn *
+catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata)
+{
+ PGconn *source_conn;
+
+ /* Initialize PGInfonode */
+ pgNodeInit(source_node_info);
+
+ /* Get WAL segments size and system ID of source PG instance */
+ instance_config.xlog_seg_size = get_xlog_seg_size(source_pgdata);
+ instance_config.system_identifier = get_system_identifier(source_pgdata, FIO_DB_HOST);
+ current.start_time = time(NULL);
+
+ strlcpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version));
+
+ /* Do some compatibility checks and fill basic info about PG instance */
+ source_conn = pgdata_basic_setup(instance_config.conn_opt, source_node_info);
+
+#if PG_VERSION_NUM >= 110000
+ if (!RetrieveWalSegSize(source_conn))
+ elog(ERROR, "Failed to retrieve wal_segment_size");
+#endif
+
+ get_ptrack_version(source_conn, source_node_info);
+ if (source_node_info->ptrack_version_num > 0)
+ source_node_info->is_ptrack_enabled = pg_is_ptrack_enabled(source_conn, source_node_info->ptrack_version_num);
+
+ /* Obtain current timeline */
+#if PG_VERSION_NUM >= 90600
+ current.tli = get_current_timeline(source_conn);
+#else
+ instance_config.pgdata = source_pgdata;
+ current.tli = get_current_timeline_from_control(source_pgdata, FIO_DB_HOST, false);
+#endif
+
+ elog(INFO, "Catchup start, pg_probackup version: %s, "
+ "PostgreSQL version: %s, "
+ "remote: %s, source-pgdata: %s, destination-pgdata: %s",
+ PROGRAM_VERSION, source_node_info->server_version_str,
+ IsSshProtocol() ? "true" : "false",
+ source_pgdata, dest_pgdata);
+
+ if (current.from_replica)
+ elog(INFO, "Running catchup from standby");
+
+ return source_conn;
+}
+
+/*
+ * Check that catchup can be performed on source and dest
+ * this function is for checks, that can be performed without modification of data on disk
+ */
+static void
+catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn,
+ const char *source_pgdata, const char *dest_pgdata)
+{
+ /* TODO
+ * gsmol - fallback to FULL mode if dest PGDATA is empty
+ * kulaginm -- I think this is a harmful feature. If user requested an incremental catchup, then
+ * he expects that this will be done quickly and efficiently. If, for example, he made a mistake
+ * with dest_dir, then he will receive a second full copy instead of an error message, and I think
+ * that in some cases he would prefer the error.
+ * I propose in future versions to offer a backup_mode auto, in which we will look to the dest_dir
+ * and decide which of the modes will be the most effective.
+ * I.e.:
+ * if(requested_backup_mode == BACKUP_MODE_DIFF_AUTO)
+ * {
+ * if(dest_pgdata_is_empty)
+ * backup_mode = BACKUP_MODE_FULL;
+ * else
+ * if(ptrack supported and applicable)
+ * backup_mode = BACKUP_MODE_DIFF_PTRACK;
+ * else
+ * backup_mode = BACKUP_MODE_DIFF_DELTA;
+ * }
+ */
+
+ if (dir_is_empty(dest_pgdata, FIO_LOCAL_HOST))
+ {
+ if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK ||
+ current.backup_mode == BACKUP_MODE_DIFF_DELTA)
+ elog(ERROR, "\"%s\" is empty, but incremental catchup mode requested.",
+ dest_pgdata);
+ }
+ else /* dest dir not empty */
+ {
+ if (current.backup_mode == BACKUP_MODE_FULL)
+ elog(ERROR, "Can't perform full catchup into non-empty directory \"%s\".",
+ dest_pgdata);
+ }
+
+ /* check that postmaster is not running in destination */
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ {
+ pid_t pid;
+ pid = fio_check_postmaster(dest_pgdata, FIO_LOCAL_HOST);
+ if (pid == 1) /* postmaster.pid is mangled */
+ {
+ char pid_filename[MAXPGPATH];
+ join_path_components(pid_filename, dest_pgdata, "postmaster.pid");
+ elog(ERROR, "Pid file \"%s\" is mangled, cannot determine whether postmaster is running or not",
+ pid_filename);
+ }
+ else if (pid > 1) /* postmaster is up */
+ {
+ elog(ERROR, "Postmaster with pid %u is running in destination directory \"%s\"",
+ pid, dest_pgdata);
+ }
+ }
+
+ /* check backup_label absence in dest */
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ {
+ char backup_label_filename[MAXPGPATH];
+
+ join_path_components(backup_label_filename, dest_pgdata, PG_BACKUP_LABEL_FILE);
+ if (fio_access(backup_label_filename, F_OK, FIO_LOCAL_HOST) == 0)
+ elog(ERROR, "Destination directory contains \"" PG_BACKUP_LABEL_FILE "\" file");
+ }
+
+ /* check that destination database is shutdowned cleanly */
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ {
+ DBState state;
+ state = get_system_dbstate(dest_pgdata, FIO_LOCAL_HOST);
+ /* see states in postgres sources (src/include/catalog/pg_control.h) */
+ if (state != DB_SHUTDOWNED && state != DB_SHUTDOWNED_IN_RECOVERY)
+ elog(ERROR, "Postmaster in destination directory \"%s\" must be stopped cleanly",
+ dest_pgdata);
+ }
+
+ /* Check that connected PG instance, source and destination PGDATA are the same */
+ {
+ uint64 source_conn_id, source_id, dest_id;
+
+ source_conn_id = get_remote_system_identifier(source_conn);
+ source_id = get_system_identifier(source_pgdata, FIO_DB_HOST); /* same as instance_config.system_identifier */
+
+ if (source_conn_id != source_id)
+ elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu",
+ source_conn_id, source_pgdata, source_id);
+
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ {
+ dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST);
+ if (source_conn_id != dest_id)
+ elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu",
+ source_conn_id, dest_pgdata, dest_id);
+ }
+ }
+
+ /* check PTRACK version */
+ if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
+ {
+ if (source_node_info->ptrack_version_num == 0)
+ elog(ERROR, "This PostgreSQL instance does not support ptrack");
+ else if (source_node_info->ptrack_version_num < 200)
+ elog(ERROR, "ptrack extension is too old.\n"
+ "Upgrade ptrack to version >= 2");
+ else if (!source_node_info->is_ptrack_enabled)
+ elog(ERROR, "Ptrack is disabled");
+ }
+
+ if (current.from_replica && exclusive_backup)
+ elog(ERROR, "Catchup from standby is only available for PostgreSQL >= 9.6");
+
+ /* check that we don't overwrite tablespace in source pgdata */
+ catchup_check_tablespaces_existance_in_tbsmapping(source_conn);
+
+ /* check timelines */
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ {
+ RedoParams dest_redo = { 0, InvalidXLogRecPtr, 0 };
+
+ /* fill dest_redo.lsn and dest_redo.tli */
+ get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo);
+
+ if (current.tli != 1)
+ {
+ parray *source_timelines; /* parray* of TimeLineHistoryEntry* */
+ source_timelines = catchup_get_tli_history(&instance_config.conn_opt, current.tli);
+
+ if (source_timelines == NULL)
+ elog(ERROR, "Cannot get source timeline history");
+
+ if (!satisfy_timeline(source_timelines, dest_redo.tli, dest_redo.lsn))
+ elog(ERROR, "Destination is not in source timeline history");
+
+ parray_walk(source_timelines, pfree);
+ parray_free(source_timelines);
+ }
+ else /* special case -- no history files in source */
+ {
+ if (dest_redo.tli != 1)
+ elog(ERROR, "Source is behind destination in timeline history");
+ }
+ }
+}
+
+/*
+ * Check that all tablespaces exists in tablespace mapping (--tablespace-mapping option)
+ * Check that all local mapped directories is empty if it is local FULL catchup
+ * Emit fatal error if that (not existent in map or not empty) tablespace found
+ */
+static void
+catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn)
+{
+ PGresult *res;
+ int i;
+ char *tablespace_path = NULL;
+ const char *linked_path = NULL;
+ char *query = "SELECT pg_catalog.pg_tablespace_location(oid) "
+ "FROM pg_catalog.pg_tablespace "
+ "WHERE pg_catalog.pg_tablespace_location(oid) <> '';";
+
+ res = pgut_execute(conn, query, 0, NULL);
+
+ if (!res)
+ elog(ERROR, "Failed to get list of tablespaces");
+
+ for (i = 0; i < res->ntups; i++)
+ {
+ tablespace_path = PQgetvalue(res, i, 0);
+ Assert (strlen(tablespace_path) > 0);
+
+ canonicalize_path(tablespace_path);
+ linked_path = get_tablespace_mapping(tablespace_path);
+
+ if (strcmp(tablespace_path, linked_path) == 0)
+ /* same result -> not found in mapping */
+ {
+ if (!fio_is_remote(FIO_DB_HOST))
+ elog(ERROR, "Local catchup executed, but source database contains "
+ "tablespace (\"%s\"), that is not listed in the map", tablespace_path);
+ else
+ elog(WARNING, "Remote catchup executed and source database contains "
+ "tablespace (\"%s\"), that is not listed in the map", tablespace_path);
+ }
+
+ if (!is_absolute_path(linked_path))
+ elog(ERROR, "Tablespace directory path must be an absolute path: \"%s\"",
+ linked_path);
+
+ if (current.backup_mode == BACKUP_MODE_FULL
+ && !dir_is_empty(linked_path, FIO_LOCAL_HOST))
+ elog(ERROR, "Target mapped tablespace directory (\"%s\") is not empty in FULL catchup",
+ linked_path);
+ }
+ PQclear(res);
+}
+
+/*
+ * Get timeline history via replication connection
+ * returns parray* of TimeLineHistoryEntry*
+ */
+static parray*
+catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli)
+{
+ PGresult *res;
+ PGconn *conn;
+ char *history;
+ char query[128];
+ parray *result = NULL;
+
+ snprintf(query, sizeof(query), "TIMELINE_HISTORY %u", tli);
+
+ /*
+ * Connect in replication mode to the server.
+ */
+ conn = pgut_connect_replication(conn_opt->pghost,
+ conn_opt->pgport,
+ conn_opt->pgdatabase,
+ conn_opt->pguser,
+ false);
+
+ if (!conn)
+ return NULL;
+
+ res = PQexec(conn, query);
+ PQfinish(conn);
+
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ elog(WARNING, "Could not send replication command \"%s\": %s",
+ query, PQresultErrorMessage(res));
+ PQclear(res);
+ return NULL;
+ }
+
+ /*
+ * The response to TIMELINE_HISTORY is a single row result set
+ * with two fields: filename and content
+ */
+ if (PQnfields(res) != 2 || PQntuples(res) != 1)
+ {
+ elog(ERROR, "Unexpected response to TIMELINE_HISTORY command: "
+ "got %d rows and %d fields, expected %d rows and %d fields",
+ PQntuples(res), PQnfields(res), 1, 2);
+ PQclear(res);
+ return NULL;
+ }
+
+ history = pgut_strdup(PQgetvalue(res, 0, 1));
+ result = parse_tli_history_buffer(history, tli);
+
+ /* some cleanup */
+ pg_free(history);
+ PQclear(res);
+
+ return result;
+}
+
+/*
+ * catchup multithreaded copy rountine and helper structure and function
+ */
+
+/* parameters for catchup_thread_runner() passed from catchup_multithreaded_copy() */
+typedef struct
+{
+ PGNodeInfo *nodeInfo;
+ const char *from_root;
+ const char *to_root;
+ parray *source_filelist;
+ parray *dest_filelist;
+ XLogRecPtr sync_lsn;
+ BackupMode backup_mode;
+ int thread_num;
+ bool completed;
+} catchup_thread_runner_arg;
+
+/* Catchup file copier executed in separate thread */
+static void *
+catchup_thread_runner(void *arg)
+{
+ int i;
+ char from_fullpath[MAXPGPATH];
+ char to_fullpath[MAXPGPATH];
+
+ catchup_thread_runner_arg *arguments = (catchup_thread_runner_arg *) arg;
+ int n_files = parray_num(arguments->source_filelist);
+
+ /* catchup a file */
+ for (i = 0; i < n_files; i++)
+ {
+ pgFile *file = (pgFile *) parray_get(arguments->source_filelist, i);
+ pgFile *dest_file = NULL;
+
+ /* We have already copied all directories */
+ if (S_ISDIR(file->mode))
+ continue;
+
+ if (!pg_atomic_test_set_flag(&file->lock))
+ continue;
+
+ /* check for interrupt */
+ if (interrupted || thread_interrupted)
+ elog(ERROR, "Interrupted during catchup");
+
+ if (progress)
+ elog(INFO, "Progress: (%d/%d). Process file \"%s\"",
+ i + 1, n_files, file->rel_path);
+
+ /* construct destination filepath */
+ Assert(file->external_dir_num == 0);
+ join_path_components(from_fullpath, arguments->from_root, file->rel_path);
+ join_path_components(to_fullpath, arguments->to_root, file->rel_path);
+
+ /* Encountered some strange beast */
+ if (!S_ISREG(file->mode))
+ elog(WARNING, "Unexpected type %d of file \"%s\", skipping",
+ file->mode, from_fullpath);
+
+ /* Check that file exist in dest pgdata */
+ if (arguments->backup_mode != BACKUP_MODE_FULL)
+ {
+ pgFile **dest_file_tmp = NULL;
+ dest_file_tmp = (pgFile **) parray_bsearch(arguments->dest_filelist,
+ file, pgFileCompareRelPathWithExternal);
+ if (dest_file_tmp)
+ {
+ /* File exists in destination PGDATA */
+ file->exists_in_prev = true;
+ dest_file = *dest_file_tmp;
+ }
+ }
+
+ /* Do actual work */
+ if (file->is_datafile && !file->is_cfs)
+ {
+ catchup_data_file(file, from_fullpath, to_fullpath,
+ arguments->sync_lsn,
+ arguments->backup_mode,
+ NONE_COMPRESS,
+ 0,
+ arguments->nodeInfo->checksum_version,
+ arguments->nodeInfo->ptrack_version_num,
+ arguments->nodeInfo->ptrack_schema,
+ false,
+ dest_file != NULL ? dest_file->size : 0);
+ }
+ else
+ {
+ backup_non_data_file(file, dest_file, from_fullpath, to_fullpath,
+ arguments->backup_mode, current.parent_backup, true);
+ }
+
+ if (file->write_size == FILE_NOT_FOUND)
+ continue;
+
+ if (file->write_size == BYTES_INVALID)
+ {
+ elog(VERBOSE, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size);
+ continue;
+ }
+
+ elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes",
+ from_fullpath, file->write_size);
+ }
+
+ /* ssh connection to longer needed */
+ fio_disconnect();
+
+ /* Data files transferring is successful */
+ arguments->completed = true;
+
+ return NULL;
+}
+
+/*
+ * main multithreaded copier
+ */
+static bool
+catchup_multithreaded_copy(int num_threads,
+ PGNodeInfo *source_node_info,
+ const char *source_pgdata_path,
+ const char *dest_pgdata_path,
+ parray *source_filelist,
+ parray *dest_filelist,
+ XLogRecPtr sync_lsn,
+ BackupMode backup_mode)
+{
+ /* arrays with meta info for multi threaded catchup */
+ catchup_thread_runner_arg *threads_args;
+ pthread_t *threads;
+
+ bool all_threads_successful = true;
+ int i;
+
+ /* init thread args */
+ threads_args = (catchup_thread_runner_arg *) palloc(sizeof(catchup_thread_runner_arg) * num_threads);
+ for (i = 0; i < num_threads; i++)
+ threads_args[i] = (catchup_thread_runner_arg){
+ .nodeInfo = source_node_info,
+ .from_root = source_pgdata_path,
+ .to_root = dest_pgdata_path,
+ .source_filelist = source_filelist,
+ .dest_filelist = dest_filelist,
+ .sync_lsn = sync_lsn,
+ .backup_mode = backup_mode,
+ .thread_num = i + 1,
+ .completed = false,
+ };
+
+ /* Run threads */
+ thread_interrupted = false;
+ threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
+ for (i = 0; i < num_threads; i++)
+ {
+ elog(VERBOSE, "Start thread num: %i", i);
+ pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i]));
+ }
+
+ /* Wait threads */
+ for (i = 0; i < num_threads; i++)
+ {
+ pthread_join(threads[i], NULL);
+ all_threads_successful &= threads_args[i].completed;
+ }
+
+ free(threads);
+ free(threads_args);
+ return all_threads_successful;
+}
+
+/*
+ *
+ */
+static void
+catchup_sync_destination_files(const char* pgdata_path, fio_location location, parray *filelist, pgFile *pg_control_file)
+{
+ char fullpath[MAXPGPATH];
+ time_t start_time, end_time;
+ char pretty_time[20];
+ int i;
+
+ elog(INFO, "Syncing copied files to disk");
+ time(&start_time);
+
+ for (i = 0; i < parray_num(filelist); i++)
+ {
+ pgFile *file = (pgFile *) parray_get(filelist, i);
+
+ /* TODO: sync directory ? */
+ if (S_ISDIR(file->mode))
+ continue;
+
+ Assert(file->external_dir_num == 0);
+ join_path_components(fullpath, pgdata_path, file->rel_path);
+ if (fio_sync(fullpath, location) != 0)
+ elog(ERROR, "Cannot sync file \"%s\": %s", fullpath, strerror(errno));
+ }
+
+ /*
+ * sync pg_control file
+ */
+ join_path_components(fullpath, pgdata_path, pg_control_file->rel_path);
+ if (fio_sync(fullpath, location) != 0)
+ elog(ERROR, "Cannot sync file \"%s\": %s", fullpath, strerror(errno));
+
+ time(&end_time);
+ pretty_time_interval(difftime(end_time, start_time),
+ pretty_time, lengthof(pretty_time));
+ elog(INFO, "Files are synced, time elapsed: %s", pretty_time);
+}
+
+/*
+ * Entry point of pg_probackup CATCHUP subcommand.
+ */
+int
+do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files)
+{
+ PGconn *source_conn = NULL;
+ PGNodeInfo source_node_info;
+ bool backup_logs = false;
+ parray *source_filelist = NULL;
+ pgFile *source_pg_control_file = NULL;
+ parray *dest_filelist = NULL;
+ char dest_xlog_path[MAXPGPATH];
+
+ RedoParams dest_redo = { 0, InvalidXLogRecPtr, 0 };
+ PGStopBackupResult stop_backup_result;
+ bool catchup_isok = true;
+
+ int i;
+
+ /* for fancy reporting */
+ time_t start_time, end_time;
+ char pretty_time[20];
+ char pretty_bytes[20];
+
+ source_conn = catchup_collect_info(&source_node_info, source_pgdata, dest_pgdata);
+ catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata);
+
+ elog(LOG, "Database catchup start");
+
+ {
+ char label[1024];
+ /* notify start of backup to PostgreSQL server */
+ time2iso(label, lengthof(label), current.start_time, false);
+ strncat(label, " with pg_probackup", lengthof(label) -
+ strlen(" with pg_probackup"));
+
+ /* Call pg_start_backup function in PostgreSQL connect */
+ pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn);
+ elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn));
+ }
+
+ //REVIEW I wonder, if we can move this piece above and call before pg_start backup()?
+ //It seems to be a part of setup phase.
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ {
+ dest_filelist = parray_new();
+ dir_list_file(dest_filelist, dest_pgdata,
+ true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST);
+
+ // fill dest_redo.lsn and dest_redo.tli
+ get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo);
+ elog(INFO, "syncLSN = %X/%X", (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn);
+
+ /*
+ * Future improvement to catch partial catchup:
+ * 1. rename dest pg_control into something like pg_control.pbk
+ * (so user can't start partial catchup'ed instance from this point)
+ * 2. try to read by get_redo() pg_control and pg_control.pbk (to detect partial catchup)
+ * 3. at the end (after copy of correct pg_control), remove pg_control.pbk
+ */
+ }
+
+ //REVIEW I wonder, if we can move this piece above and call before pg_start backup()?
+ //It seems to be a part of setup phase.
+ /*
+ * TODO: move to separate function to use in both backup.c and catchup.c
+ */
+ if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
+ {
+ XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(source_conn, &source_node_info);
+
+ // new ptrack is more robust and checks Start LSN
+ if (ptrack_lsn > dest_redo.lsn || ptrack_lsn == InvalidXLogRecPtr)
+ elog(ERROR, "LSN from ptrack_control in source %X/%X is greater than checkpoint LSN in destination %X/%X.\n"
+ "You can perform only FULL catchup.",
+ (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn),
+ (uint32) (dest_redo.lsn >> 32),
+ (uint32) (dest_redo.lsn));
+ }
+
+ /* Check that dest_redo.lsn is less than current.start_lsn */
+ if (current.backup_mode != BACKUP_MODE_FULL &&
+ dest_redo.lsn > current.start_lsn)
+ elog(ERROR, "Current START LSN %X/%X is lower than SYNC LSN %X/%X, "
+ "it may indicate that we are trying to catchup with PostgreSQL instance from the past",
+ (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn),
+ (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn));
+
+ /* Start stream replication */
+ join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR);
+ fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST);
+ start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt,
+ current.start_lsn, current.tli);
+
+ source_filelist = parray_new();
+
+ /* list files with the logical path. omit $PGDATA */
+ if (fio_is_remote(FIO_DB_HOST))
+ fio_list_dir(source_filelist, source_pgdata,
+ true, true, false, backup_logs, true, 0);
+ else
+ dir_list_file(source_filelist, source_pgdata,
+ true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST);
+
+ //REVIEW FIXME. Let's fix that before release.
+ // TODO filter pg_xlog/wal?
+ // TODO what if wal is not a dir (symlink to a dir)?
+
+ /* close ssh session in main thread */
+ fio_disconnect();
+
+ //REVIEW Do we want to do similar calculation for dest?
+ current.pgdata_bytes += calculate_datasize_of_filelist(source_filelist);
+ pretty_size(current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes));
+ elog(INFO, "Source PGDATA size: %s", pretty_bytes);
+
+ /*
+ * Sort pathname ascending. It is necessary to create intermediate
+ * directories sequentially.
+ *
+ * For example:
+ * 1 - create 'base'
+ * 2 - create 'base/1'
+ *
+ * Sorted array is used at least in parse_filelist_filenames(),
+ * extractPageMap(), make_pagemap_from_ptrack().
+ */
+ parray_qsort(source_filelist, pgFileCompareRelPathWithExternal);
+
+ /* Extract information about files in source_filelist parsing their names:*/
+ parse_filelist_filenames(source_filelist, source_pgdata);
+
+ elog(LOG, "Start LSN (source): %X/%X, TLI: %X",
+ (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn),
+ current.tli);
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ elog(LOG, "LSN in destination: %X/%X, TLI: %X",
+ (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn),
+ dest_redo.tli);
+
+ /* Build page mapping in PTRACK mode */
+ if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
+ {
+ time(&start_time);
+ elog(INFO, "Extracting pagemap of changed blocks");
+
+ /* Build the page map from ptrack information */
+ make_pagemap_from_ptrack_2(source_filelist, source_conn,
+ source_node_info.ptrack_schema,
+ source_node_info.ptrack_version_num,
+ dest_redo.lsn);
+ time(&end_time);
+ elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec",
+ difftime(end_time, start_time));
+ }
+
+ /*
+ * Make directories before catchup
+ */
+ /*
+ * We iterate over source_filelist and for every directory with parent 'pg_tblspc'
+ * we must lookup this directory name in tablespace map.
+ * If we got a match, we treat this directory as tablespace.
+ * It means that we create directory specified in tablespace_map and
+ * original directory created as symlink to it.
+ */
+ for (i = 0; i < parray_num(source_filelist); i++)
+ {
+ pgFile *file = (pgFile *) parray_get(source_filelist, i);
+ char parent_dir[MAXPGPATH];
+
+ if (!S_ISDIR(file->mode))
+ continue;
+
+ /*
+ * check if it is fake "directory" and is a tablespace link
+ * this is because we passed the follow_symlink when building the list
+ */
+ /* get parent dir of rel_path */
+ strncpy(parent_dir, file->rel_path, MAXPGPATH);
+ get_parent_directory(parent_dir);
+
+ /* check if directory is actually link to tablespace */
+ if (strcmp(parent_dir, PG_TBLSPC_DIR) != 0)
+ {
+ /* if the entry is a regular directory, create it in the destination */
+ char dirpath[MAXPGPATH];
+
+ join_path_components(dirpath, dest_pgdata, file->rel_path);
+
+ elog(VERBOSE, "Create directory '%s'", dirpath);
+ fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST);
+ }
+ else
+ {
+ /* this directory located in pg_tblspc */
+ const char *linked_path = NULL;
+ char to_path[MAXPGPATH];
+
+ // TODO perform additional check that this is actually symlink?
+ { /* get full symlink path and map this path to new location */
+ char source_full_path[MAXPGPATH];
+ char symlink_content[MAXPGPATH];
+ join_path_components(source_full_path, source_pgdata, file->rel_path);
+ fio_readlink(source_full_path, symlink_content, sizeof(symlink_content), FIO_DB_HOST);
+ /* we checked that mapping exists in preflight_checks for local catchup */
+ linked_path = get_tablespace_mapping(symlink_content);
+ elog(INFO, "Map tablespace full_path: \"%s\" old_symlink_content: \"%s\" new_symlink_content: \"%s\"\n",
+ source_full_path,
+ symlink_content,
+ linked_path);
+ }
+
+ if (!is_absolute_path(linked_path))
+ elog(ERROR, "Tablespace directory path must be an absolute path: %s\n",
+ linked_path);
+
+ join_path_components(to_path, dest_pgdata, file->rel_path);
+
+ elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
+ linked_path, to_path);
+
+ /* create tablespace directory */
+ if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0)
+ elog(ERROR, "Could not create tablespace directory \"%s\": %s",
+ linked_path, strerror(errno));
+
+ /* create link to linked_path */
+ if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0)
+ elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s",
+ linked_path, to_path, strerror(errno));
+ }
+ }
+
+ /*
+ * find pg_control file (in already sorted source_filelist)
+ * and exclude it from list for future special processing
+ */
+ {
+ int control_file_elem_index;
+ pgFile search_key;
+ MemSet(&search_key, 0, sizeof(pgFile));
+ /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */
+ search_key.rel_path = XLOG_CONTROL_FILE;
+ search_key.external_dir_num = 0;
+ control_file_elem_index = parray_bsearch_index(source_filelist, &search_key, pgFileCompareRelPathWithExternal);
+ if(control_file_elem_index < 0)
+ elog(ERROR, "\"%s\" not found in \"%s\"\n", XLOG_CONTROL_FILE, source_pgdata);
+ source_pg_control_file = parray_remove(source_filelist, control_file_elem_index);
+ }
+
+ /*
+ * remove absent source files in dest (dropped tables, etc...)
+ * note: global/pg_control will also be deleted here
+ */
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ {
+ elog(INFO, "Removing redundant files in destination directory");
+ parray_qsort(dest_filelist, pgFileCompareRelPathWithExternalDesc);
+ for (i = 0; i < parray_num(dest_filelist); i++)
+ {
+ bool redundant = true;
+ pgFile *file = (pgFile *) parray_get(dest_filelist, i);
+
+ //TODO optimize it and use some merge-like algorithm
+ //instead of bsearch for each file.
+ if (parray_bsearch(source_filelist, file, pgFileCompareRelPathWithExternal))
+ redundant = false;
+
+ /* pg_filenode.map are always restored, because it's crc cannot be trusted */
+ Assert(file->external_dir_num == 0);
+ if (pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0)
+ redundant = true;
+
+ //REVIEW This check seems unneded. Anyway we delete only redundant stuff below.
+ /* do not delete the useful internal directories */
+ if (S_ISDIR(file->mode) && !redundant)
+ continue;
+
+ /* if file does not exists in destination list, then we can safely unlink it */
+ if (redundant)
+ {
+ char fullpath[MAXPGPATH];
+
+ join_path_components(fullpath, dest_pgdata, file->rel_path);
+
+ fio_delete(file->mode, fullpath, FIO_DB_HOST);
+ elog(VERBOSE, "Deleted file \"%s\"", fullpath);
+
+ /* shrink pgdata list */
+ pgFileFree(file);
+ parray_remove(dest_filelist, i);
+ i--;
+ }
+ }
+ }
+
+ /* clear file locks */
+ pfilearray_clear_locks(source_filelist);
+
+ /* Sort by size for load balancing */
+ parray_qsort(source_filelist, pgFileCompareSizeDesc);
+
+ /* Sort the array for binary search */
+ if (dest_filelist)
+ parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal);
+
+ /* run copy threads */
+ elog(INFO, "Start transferring data files");
+ time(&start_time);
+ catchup_isok = catchup_multithreaded_copy(num_threads, &source_node_info,
+ source_pgdata, dest_pgdata,
+ source_filelist, dest_filelist,
+ dest_redo.lsn, current.backup_mode);
+
+ /* at last copy control file */
+ if (catchup_isok)
+ {
+ char from_fullpath[MAXPGPATH];
+ char to_fullpath[MAXPGPATH];
+ join_path_components(from_fullpath, source_pgdata, source_pg_control_file->rel_path);
+ join_path_components(to_fullpath, dest_pgdata, source_pg_control_file->rel_path);
+ copy_pgcontrol_file(from_fullpath, FIO_DB_HOST,
+ to_fullpath, FIO_LOCAL_HOST, source_pg_control_file);
+ }
+
+ time(&end_time);
+ pretty_time_interval(difftime(end_time, start_time),
+ pretty_time, lengthof(pretty_time));
+ if (catchup_isok)
+ elog(INFO, "Data files are transferred, time elapsed: %s",
+ pretty_time);
+ else
+ elog(ERROR, "Data files transferring failed, time elapsed: %s",
+ pretty_time);
+
+ /* Notify end of backup */
+ {
+ //REVIEW Is it relevant to catchup? I suppose it isn't, since catchup is a new code.
+ //If we do need it, please write a comment explaining that.
+ /* kludge against some old bug in archive_timeout. TODO: remove in 3.0.0 */
+ int timeout = (instance_config.archive_timeout > 0) ?
+ instance_config.archive_timeout : ARCHIVE_TIMEOUT_DEFAULT;
+ char *stop_backup_query_text = NULL;
+
+ pg_silent_client_messages(source_conn);
+
+ //REVIEW. Do we want to support pg 9.5? I suppose we never test it...
+ //Maybe check it and error out early?
+ /* Create restore point
+ * Only if backup is from master.
+ * For PG 9.5 create restore point only if pguser is superuser.
+ */
+ if (!current.from_replica &&
+ !(source_node_info.server_version < 90600 &&
+ !source_node_info.is_superuser)) //TODO: check correctness
+ pg_create_restore_point(source_conn, current.start_time);
+
+ /* Execute pg_stop_backup using PostgreSQL connection */
+ pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, exclusive_backup, &stop_backup_query_text);
+
+ /*
+ * Wait for the result of pg_stop_backup(), but no longer than
+ * archive_timeout seconds
+ */
+ pg_stop_backup_consume(source_conn, source_node_info.server_version, exclusive_backup, timeout, stop_backup_query_text, &stop_backup_result);
+
+ /* Cleanup */
+ pg_free(stop_backup_query_text);
+ }
+
+ wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t);
+
+#if PG_VERSION_NUM >= 90600
+ /* Write backup_label */
+ Assert(stop_backup_result.backup_label_content != NULL);
+ pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label",
+ stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len,
+ NULL);
+ free(stop_backup_result.backup_label_content);
+ stop_backup_result.backup_label_content = NULL;
+ stop_backup_result.backup_label_content_len = 0;
+
+ /* tablespace_map */
+ if (stop_backup_result.tablespace_map_content != NULL)
+ {
+ // TODO what if tablespace is created during catchup?
+ /* Because we have already created symlinks in pg_tblspc earlier,
+ * we do not need to write the tablespace_map file.
+ * So this call is unnecessary:
+ * pg_stop_backup_write_file_helper(dest_pgdata, PG_TABLESPACE_MAP_FILE, "tablespace map",
+ * stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len,
+ * NULL);
+ */
+ free(stop_backup_result.tablespace_map_content);
+ stop_backup_result.tablespace_map_content = NULL;
+ stop_backup_result.tablespace_map_content_len = 0;
+ }
+#endif
+
+ if(wait_WAL_streaming_end(NULL))
+ elog(ERROR, "WAL streaming failed");
+
+ //REVIEW Please add a comment about these lsns. It is a crutial part of the algorithm.
+ current.recovery_xid = stop_backup_result.snapshot_xid;
+
+ elog(LOG, "Getting the Recovery Time from WAL");
+
+ /* iterate over WAL from stop_backup lsn to start_backup lsn */
+ if (!read_recovery_info(dest_xlog_path, current.tli,
+ instance_config.xlog_seg_size,
+ current.start_lsn, current.stop_lsn,
+ ¤t.recovery_time))
+ {
+ elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp");
+ current.recovery_time = stop_backup_result.invocation_time;
+ }
+
+ /*
+ * In case of backup from replica >= 9.6 we must fix minRecPoint
+ */
+ if (current.from_replica && !exclusive_backup)
+ {
+ set_min_recovery_point(source_pg_control_file, dest_pgdata, current.stop_lsn);
+ }
+
+ /* close ssh session in main thread */
+ fio_disconnect();
+
+ /* Sync all copied files unless '--no-sync' flag is used */
+ if (catchup_isok)
+ {
+ if (sync_dest_files)
+ catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file);
+ else
+ elog(WARNING, "Files are not synced to disk");
+ }
+
+ /* Cleanup */
+ if (dest_filelist)
+ {
+ parray_walk(dest_filelist, pgFileFree);
+ parray_free(dest_filelist);
+ }
+ parray_walk(source_filelist, pgFileFree);
+ parray_free(source_filelist);
+ pgFileFree(source_pg_control_file);
+
+ //REVIEW: Are we going to do that before release?
+ /* TODO: show the amount of transfered data in bytes and calculate incremental ratio */
+
+ return 0;
+}
diff --git a/src/configure.c b/src/configure.c
index cf172242a..9ffe2d7a7 100644
--- a/src/configure.c
+++ b/src/configure.c
@@ -277,18 +277,16 @@ do_show_config(void)
* values into the file.
*/
void
-do_set_config(bool missing_ok)
+do_set_config(InstanceState *instanceState, bool missing_ok)
{
- char path[MAXPGPATH];
char path_temp[MAXPGPATH];
FILE *fp;
int i;
- join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
- snprintf(path_temp, sizeof(path_temp), "%s.tmp", path);
+ snprintf(path_temp, sizeof(path_temp), "%s.tmp", instanceState->instance_config_path);
- if (!missing_ok && !fileExists(path, FIO_LOCAL_HOST))
- elog(ERROR, "Configuration file \"%s\" doesn't exist", path);
+ if (!missing_ok && !fileExists(instanceState->instance_config_path, FIO_LOCAL_HOST))
+ elog(ERROR, "Configuration file \"%s\" doesn't exist", instanceState->instance_config_path);
fp = fopen(path_temp, "wt");
if (fp == NULL)
@@ -340,12 +338,12 @@ do_set_config(bool missing_ok)
elog(ERROR, "Failed to sync temp configuration file \"%s\": %s",
path_temp, strerror(errno));
- if (rename(path_temp, path) < 0)
+ if (rename(path_temp, instanceState->instance_config_path) < 0)
{
int errno_temp = errno;
unlink(path_temp);
elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s",
- path_temp, path, strerror(errno_temp));
+ path_temp, instanceState->instance_config_path, strerror(errno_temp));
}
}
@@ -354,8 +352,6 @@ init_config(InstanceConfig *config, const char *instance_name)
{
MemSet(config, 0, sizeof(InstanceConfig));
- config->name = pgut_strdup(instance_name);
-
/*
* Starting from PostgreSQL 11 WAL segment size may vary. Prior to
* PostgreSQL 10 xlog_seg_size is equal to XLOG_SEG_SIZE.
@@ -387,9 +383,8 @@ init_config(InstanceConfig *config, const char *instance_name)
* read instance config from file
*/
InstanceConfig *
-readInstanceConfigFile(const char *instance_name)
+readInstanceConfigFile(InstanceState *instanceState)
{
- char path[MAXPGPATH];
InstanceConfig *instance = pgut_new(InstanceConfig);
char *log_level_console = NULL;
char *log_level_file = NULL;
@@ -605,31 +600,21 @@ readInstanceConfigFile(const char *instance_name)
};
- init_config(instance, instance_name);
-
- sprintf(instance->backup_instance_path, "%s/%s/%s",
- backup_path, BACKUPS_DIR, instance_name);
- canonicalize_path(instance->backup_instance_path);
-
- sprintf(instance->arclog_path, "%s/%s/%s",
- backup_path, "wal", instance_name);
- canonicalize_path(instance->arclog_path);
+ init_config(instance, instanceState->instance_name);
- join_path_components(path, instance->backup_instance_path,
- BACKUP_CATALOG_CONF_FILE);
-
- if (fio_access(path, F_OK, FIO_BACKUP_HOST) != 0)
+ if (fio_access(instanceState->instance_config_path, F_OK, FIO_BACKUP_HOST) != 0)
{
- elog(WARNING, "Control file \"%s\" doesn't exist", path);
+ elog(WARNING, "Control file \"%s\" doesn't exist", instanceState->instance_config_path);
pfree(instance);
return NULL;
}
- parsed_options = config_read_opt(path, instance_options, WARNING, true, true);
+ parsed_options = config_read_opt(instanceState->instance_config_path,
+ instance_options, WARNING, true, true);
if (parsed_options == 0)
{
- elog(WARNING, "Control file \"%s\" is empty", path);
+ elog(WARNING, "Control file \"%s\" is empty", instanceState->instance_config_path);
pfree(instance);
return NULL;
}
@@ -650,7 +635,6 @@ readInstanceConfigFile(const char *instance_name)
#endif
return instance;
-
}
static void
diff --git a/src/data.c b/src/data.c
index 60986fd5c..49b696059 100644
--- a/src/data.c
+++ b/src/data.c
@@ -268,7 +268,7 @@ get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno)
* PageIsOk(0) if page was successfully retrieved
* PageIsTruncated(-1) if the page was truncated
* SkipCurrentPage(-2) if we need to skip this page,
- * only used for DELTA backup
+ * only used for DELTA and PTRACK backup
* PageIsCorrupted(-3) if the page checksum mismatch
* or header corruption,
* only used for checkdb
@@ -276,8 +276,7 @@ get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno)
* return it to the caller
*/
static int32
-prepare_page(ConnectionArgs *conn_arg,
- pgFile *file, XLogRecPtr prev_backup_start_lsn,
+prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
BlockNumber blknum, FILE *in,
BackupMode backup_mode,
Page page, bool strict,
@@ -290,6 +289,7 @@ prepare_page(ConnectionArgs *conn_arg,
int try_again = PAGE_READ_ATTEMPTS;
bool page_is_valid = false;
BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum;
+ int rc = 0;
/* check for interrupt */
if (interrupted || thread_interrupted)
@@ -300,171 +300,112 @@ prepare_page(ConnectionArgs *conn_arg,
* Under high write load it's possible that we've read partly
* flushed page, so try several times before throwing an error.
*/
- if (backup_mode != BACKUP_MODE_DIFF_PTRACK || ptrack_version_num >= 200)
+ while (!page_is_valid && try_again--)
{
- int rc = 0;
- while (!page_is_valid && try_again--)
- {
- /* read the block */
- int read_len = fio_pread(in, page, blknum * BLCKSZ);
+ /* read the block */
+ int read_len = fio_pread(in, page, blknum * BLCKSZ);
- /* The block could have been truncated. It is fine. */
- if (read_len == 0)
- {
- elog(VERBOSE, "Cannot read block %u of \"%s\": "
- "block truncated", blknum, from_fullpath);
- return PageIsTruncated;
- }
- else if (read_len < 0)
- elog(ERROR, "Cannot read block %u of \"%s\": %s",
- blknum, from_fullpath, strerror(errno));
- else if (read_len != BLCKSZ)
- elog(WARNING, "Cannot read block %u of \"%s\": "
- "read %i of %d, try again",
- blknum, from_fullpath, read_len, BLCKSZ);
- else
+ /* The block could have been truncated. It is fine. */
+ if (read_len == 0)
+ {
+ elog(VERBOSE, "Cannot read block %u of \"%s\": "
+ "block truncated", blknum, from_fullpath);
+ return PageIsTruncated;
+ }
+ else if (read_len < 0)
+ elog(ERROR, "Cannot read block %u of \"%s\": %s",
+ blknum, from_fullpath, strerror(errno));
+ else if (read_len != BLCKSZ)
+ elog(WARNING, "Cannot read block %u of \"%s\": "
+ "read %i of %d, try again",
+ blknum, from_fullpath, read_len, BLCKSZ);
+ else
+ {
+ /* We have BLCKSZ of raw data, validate it */
+ rc = validate_one_page(page, absolute_blknum,
+ InvalidXLogRecPtr, page_st,
+ checksum_version);
+ switch (rc)
{
- /* We have BLCKSZ of raw data, validate it */
- rc = validate_one_page(page, absolute_blknum,
- InvalidXLogRecPtr, page_st,
- checksum_version);
- switch (rc)
- {
- case PAGE_IS_ZEROED:
- elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum);
+ case PAGE_IS_ZEROED:
+ elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum);
+ return PageIsOk;
+
+ case PAGE_IS_VALID:
+ /* in DELTA or PTRACK modes we must compare lsn */
+ if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK)
+ page_is_valid = true;
+ else
return PageIsOk;
-
- case PAGE_IS_VALID:
- /* in DELTA mode we must compare lsn */
- if (backup_mode == BACKUP_MODE_DIFF_DELTA)
- page_is_valid = true;
- else
- return PageIsOk;
- break;
-
- case PAGE_HEADER_IS_INVALID:
- elog(VERBOSE, "File: \"%s\" blknum %u have wrong page header, try again",
- from_fullpath, blknum);
- break;
-
- case PAGE_CHECKSUM_MISMATCH:
- elog(VERBOSE, "File: \"%s\" blknum %u have wrong checksum, try again",
- from_fullpath, blknum);
- break;
- default:
- Assert(false);
- }
+ break;
+
+ case PAGE_HEADER_IS_INVALID:
+ elog(VERBOSE, "File: \"%s\" blknum %u have wrong page header, try again",
+ from_fullpath, blknum);
+ break;
+
+ case PAGE_CHECKSUM_MISMATCH:
+ elog(VERBOSE, "File: \"%s\" blknum %u have wrong checksum, try again",
+ from_fullpath, blknum);
+ break;
+ default:
+ Assert(false);
}
}
-
- /*
- * If page is not valid after 100 attempts to read it
- * throw an error.
- */
- if (!page_is_valid)
- {
- int elevel = ERROR;
- char *errormsg = NULL;
-
- /* Get the details of corruption */
- if (rc == PAGE_HEADER_IS_INVALID)
- get_header_errormsg(page, &errormsg);
- else if (rc == PAGE_CHECKSUM_MISMATCH)
- get_checksum_errormsg(page, &errormsg,
- file->segno * RELSEG_SIZE + blknum);
-
- /* Error out in case of merge or backup without ptrack support;
- * issue warning in case of checkdb or backup with ptrack support
- */
- if (!strict)
- elevel = WARNING;
-
- if (errormsg)
- elog(elevel, "Corruption detected in file \"%s\", block %u: %s",
- from_fullpath, blknum, errormsg);
- else
- elog(elevel, "Corruption detected in file \"%s\", block %u",
- from_fullpath, blknum);
-
- pg_free(errormsg);
- return PageIsCorrupted;
- }
-
- /* Checkdb not going futher */
- if (!strict)
- return PageIsOk;
}
/*
- * Get page via ptrack interface from PostgreSQL shared buffer.
- * We do this only in the cases of PTRACK 1.x versions backup
+ * If page is not valid after PAGE_READ_ATTEMPTS attempts to read it
+ * throw an error.
*/
- if (backup_mode == BACKUP_MODE_DIFF_PTRACK
- && (ptrack_version_num >= 105 && ptrack_version_num < 200))
- {
- int rc = 0;
- size_t page_size = 0;
- Page ptrack_page = NULL;
- ptrack_page = (Page) pg_ptrack_get_block(conn_arg, file->dbOid, file->tblspcOid,
- file->relOid, absolute_blknum, &page_size,
- ptrack_version_num, ptrack_schema);
-
- if (ptrack_page == NULL)
- /* This block was truncated.*/
- return PageIsTruncated;
-
- if (page_size != BLCKSZ)
- elog(ERROR, "File: \"%s\", block %u, expected block size %d, but read %zu",
- from_fullpath, blknum, BLCKSZ, page_size);
-
- /*
- * We need to copy the page that was successfully
- * retrieved from ptrack into our output "page" parameter.
- */
- memcpy(page, ptrack_page, BLCKSZ);
- pg_free(ptrack_page);
-
- /*
- * UPD: It apprears that is possible to get zeroed page or page with invalid header
- * from shared buffer.
- * Note, that getting page with wrong checksumm from shared buffer is
- * acceptable.
- */
- rc = validate_one_page(page, absolute_blknum,
- InvalidXLogRecPtr, page_st,
- checksum_version);
-
- /* It is ok to get zeroed page */
- if (rc == PAGE_IS_ZEROED)
- return PageIsOk;
+ if (!page_is_valid)
+ {
+ int elevel = ERROR;
+ char *errormsg = NULL;
- /* Getting page with invalid header from shared buffers is unacceptable */
+ /* Get the details of corruption */
if (rc == PAGE_HEADER_IS_INVALID)
- {
- char *errormsg = NULL;
get_header_errormsg(page, &errormsg);
- elog(ERROR, "Corruption detected in file \"%s\", block %u: %s",
- from_fullpath, blknum, errormsg);
- }
+ else if (rc == PAGE_CHECKSUM_MISMATCH)
+ get_checksum_errormsg(page, &errormsg,
+ file->segno * RELSEG_SIZE + blknum);
- /*
- * We must set checksum here, because it is outdated
- * in the block recieved from shared buffers.
+ /* Error out in case of merge or backup without ptrack support;
+ * issue warning in case of checkdb or backup with ptrack support
*/
- if (checksum_version)
- page_st->checksum = ((PageHeader) page)->pd_checksum = pg_checksum_page(page, absolute_blknum);
+ if (!strict)
+ elevel = WARNING;
+
+ if (errormsg)
+ elog(elevel, "Corruption detected in file \"%s\", block %u: %s",
+ from_fullpath, blknum, errormsg);
+ else
+ elog(elevel, "Corruption detected in file \"%s\", block %u",
+ from_fullpath, blknum);
+
+ pg_free(errormsg);
+ return PageIsCorrupted;
}
+ /* Checkdb not going futher */
+ if (!strict)
+ return PageIsOk;
+
/*
* Skip page if page lsn is less than START_LSN of parent backup.
* Nullified pages must be copied by DELTA backup, just to be safe.
*/
- if (backup_mode == BACKUP_MODE_DIFF_DELTA &&
+ if ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
file->exists_in_prev &&
page_st->lsn > 0 &&
page_st->lsn < prev_backup_start_lsn)
{
- elog(VERBOSE, "Skipping blknum %u in file: \"%s\"", blknum, from_fullpath);
+ elog(VERBOSE, "Skipping blknum %u in file: \"%s\", file->exists_in_prev: %s, page_st->lsn: %X/%X, prev_backup_start_lsn: %X/%X",
+ blknum, from_fullpath,
+ file->exists_in_prev ? "true" : "false",
+ (uint32) (page_st->lsn >> 32), (uint32) page_st->lsn,
+ (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn
+ );
return SkipCurrentPage;
}
@@ -522,6 +463,23 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
return compressed_size;
}
+/* взята из compress_and_backup_page, но выпилена вся магия заголовков и компрессии, просто копирование 1-в-1 */
+static int
+copy_page(pgFile *file, BlockNumber blknum,
+ FILE *in, FILE *out, Page page,
+ const char *to_fullpath)
+{
+ /* write data page */
+ if (fio_fwrite(out, page, BLCKSZ) != BLCKSZ)
+ elog(ERROR, "File: \"%s\", cannot write at block %u: %s",
+ to_fullpath, blknum, strerror(errno));
+
+ file->write_size += BLCKSZ;
+ file->uncompressed_size += BLCKSZ;
+
+ return BLCKSZ;
+}
+
/*
* Backup data file in the from_root directory to the to_root directory with
* same relative path. If prev_backup_start_lsn is not NULL, only pages with
@@ -531,8 +489,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
* backup with special header.
*/
void
-backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
- const char *from_fullpath, const char *to_fullpath,
+backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
CompressAlg calg, int clevel, uint32 checksum_version,
int ptrack_version_num, const char *ptrack_schema,
@@ -603,7 +560,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
rc = fio_send_pages(to_fullpath, from_fullpath, file,
/* send prev backup START_LSN */
- backup_mode == BACKUP_MODE_DIFF_DELTA &&
+ (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
calg, clevel, checksum_version,
/* send pagemap if any */
@@ -614,9 +571,9 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
else
{
/* TODO: stop handling errors internally */
- rc = send_pages(conn_arg, to_fullpath, from_fullpath, file,
+ rc = send_pages(to_fullpath, from_fullpath, file,
/* send prev backup START_LSN */
- backup_mode == BACKUP_MODE_DIFF_DELTA &&
+ (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
calg, clevel, checksum_version, use_pagemap,
&headers, backup_mode, ptrack_version_num, ptrack_schema);
@@ -688,6 +645,169 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
pg_free(headers);
}
+/*
+ * Backup data file in the from_root directory to the to_root directory with
+ * same relative path. If prev_backup_start_lsn is not NULL, only pages with
+ * higher lsn will be copied.
+ * Not just copy file, but read it block by block (use bitmap in case of
+ * incremental backup), validate checksum, optionally compress and write to
+ * backup with special header.
+ */
+void
+catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
+ XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
+ CompressAlg calg, int clevel, uint32 checksum_version,
+ int ptrack_version_num, const char *ptrack_schema,
+ bool is_merge, size_t prev_size)
+{
+ int rc;
+ bool use_pagemap;
+ char *errmsg = NULL;
+ BlockNumber err_blknum = 0;
+ /* page headers */
+ BackupPageHeader2 *headers = NULL;
+
+ /* sanity */
+ if (file->size % BLCKSZ != 0)
+ elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size);
+
+ /*
+ * Compute expected number of blocks in the file.
+ * NOTE This is a normal situation, if the file size has changed
+ * since the moment we computed it.
+ */
+ file->n_blocks = file->size/BLCKSZ;
+
+ /*
+ * Skip unchanged file only if it exists in previous backup.
+ * This way we can correctly handle null-sized files which are
+ * not tracked by pagemap and thus always marked as unchanged.
+ */
+ if (backup_mode == BACKUP_MODE_DIFF_PTRACK &&
+ file->pagemap.bitmapsize == PageBitmapIsEmpty &&
+ file->exists_in_prev && file->size == prev_size && !file->pagemap_isabsent)
+ {
+ /*
+ * There are no changed blocks since last backup. We want to make
+ * incremental backup, so we should exit.
+ */
+ file->write_size = BYTES_INVALID;
+ return;
+ }
+
+ /* reset size summary */
+ file->read_size = 0;
+ file->write_size = 0;
+ file->uncompressed_size = 0;
+ INIT_FILE_CRC32(true, file->crc);
+
+ /*
+ * Read each page, verify checksum and write it to backup.
+ * If page map is empty or file is not present in previous backup
+ * backup all pages of the relation.
+ *
+ * In PTRACK 1.x there was a problem
+ * of data files with missing _ptrack map.
+ * Such files should be fully copied.
+ */
+
+ if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
+ file->pagemap_isabsent || !file->exists_in_prev ||
+ !file->pagemap.bitmap)
+ use_pagemap = false;
+ else
+ use_pagemap = true;
+
+ if (use_pagemap)
+ elog(VERBOSE, "Using pagemap for file \"%s\"", file->rel_path);
+
+ /* Remote mode */
+ if (fio_is_remote(FIO_DB_HOST))
+ {
+ rc = fio_copy_pages(to_fullpath, from_fullpath, file,
+ /* send prev backup START_LSN */
+ (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
+ file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
+ calg, clevel, checksum_version,
+ /* send pagemap if any */
+ use_pagemap,
+ /* variables for error reporting */
+ &err_blknum, &errmsg, &headers);
+ }
+ else
+ {
+ /* TODO: stop handling errors internally */
+ rc = copy_pages(to_fullpath, from_fullpath, file,
+ /* send prev backup START_LSN */
+ (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
+ file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
+ checksum_version, use_pagemap,
+ backup_mode, ptrack_version_num, ptrack_schema);
+ }
+
+ /* check for errors */
+ if (rc == FILE_MISSING)
+ {
+ elog(is_merge ? ERROR : LOG, "File not found: \"%s\"", from_fullpath);
+ file->write_size = FILE_NOT_FOUND;
+ goto cleanup;
+ }
+
+ else if (rc == WRITE_FAILED)
+ elog(ERROR, "Cannot write block %u of \"%s\": %s",
+ err_blknum, to_fullpath, strerror(errno));
+
+ else if (rc == PAGE_CORRUPTION)
+ {
+ if (errmsg)
+ elog(ERROR, "Corruption detected in file \"%s\", block %u: %s",
+ from_fullpath, err_blknum, errmsg);
+ else
+ elog(ERROR, "Corruption detected in file \"%s\", block %u",
+ from_fullpath, err_blknum);
+ }
+ /* OPEN_FAILED and READ_FAILED */
+ else if (rc == OPEN_FAILED)
+ {
+ if (errmsg)
+ elog(ERROR, "%s", errmsg);
+ else
+ elog(ERROR, "Cannot open file \"%s\"", from_fullpath);
+ }
+ else if (rc == READ_FAILED)
+ {
+ if (errmsg)
+ elog(ERROR, "%s", errmsg);
+ else
+ elog(ERROR, "Cannot read file \"%s\"", from_fullpath);
+ }
+
+ file->read_size = rc * BLCKSZ;
+
+ /* refresh n_blocks for FULL and DELTA */
+ if (backup_mode == BACKUP_MODE_FULL ||
+ backup_mode == BACKUP_MODE_DIFF_DELTA)
+ file->n_blocks = file->read_size / BLCKSZ;
+
+ /* Determine that file didn`t changed in case of incremental catchup */
+ if (backup_mode != BACKUP_MODE_FULL &&
+ file->exists_in_prev &&
+ file->write_size == 0 &&
+ file->n_blocks > 0)
+ {
+ file->write_size = BYTES_INVALID;
+ }
+
+cleanup:
+
+ /* finish CRC calculation */
+ FIN_FILE_CRC32(true, file->crc);
+
+ pg_free(errmsg);
+ pg_free(file->pagemap.bitmap);
+ pg_free(headers);
+}
+
/*
* Backup non data file
* We do not apply compression to this file.
@@ -1563,10 +1683,10 @@ check_data_file(ConnectionArgs *arguments, pgFile *file,
for (blknum = 0; blknum < nblocks; blknum++)
{
PageState page_st;
- page_state = prepare_page(NULL, file, InvalidXLogRecPtr,
- blknum, in, BACKUP_MODE_FULL,
- curr_page, false, checksum_version,
- 0, NULL, from_fullpath, &page_st);
+ page_state = prepare_page(file, InvalidXLogRecPtr,
+ blknum, in, BACKUP_MODE_FULL,
+ curr_page, false, checksum_version,
+ 0, NULL, from_fullpath, &page_st);
if (page_state == PageIsTruncated)
break;
@@ -1994,7 +2114,7 @@ open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size)
/* backup local file */
int
-send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_fullpath,
+send_pages(const char *to_fullpath, const char *from_fullpath,
pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel,
uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers,
BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema)
@@ -2052,11 +2172,12 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f
while (blknum < file->n_blocks)
{
PageState page_st;
- int rc = prepare_page(conn_arg, file, prev_backup_start_lsn,
- blknum, in, backup_mode, curr_page,
- true, checksum_version,
- ptrack_version_num, ptrack_schema,
- from_fullpath, &page_st);
+ int rc = prepare_page(file, prev_backup_start_lsn,
+ blknum, in, backup_mode, curr_page,
+ true, checksum_version,
+ ptrack_version_num, ptrack_schema,
+ from_fullpath, &page_st);
+
if (rc == PageIsTruncated)
break;
@@ -2133,6 +2254,130 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f
return n_blocks_read;
}
+/* copy local file (взята из send_pages, но используется простое копирование странички, без добавления заголовков и компрессии) */
+int
+copy_pages(const char *to_fullpath, const char *from_fullpath,
+ pgFile *file, XLogRecPtr sync_lsn,
+ uint32 checksum_version, bool use_pagemap,
+ BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema)
+{
+ FILE *in = NULL;
+ FILE *out = NULL;
+ char curr_page[BLCKSZ];
+ int n_blocks_read = 0;
+ BlockNumber blknum = 0;
+ datapagemap_iterator_t *iter = NULL;
+
+ /* stdio buffers */
+ char *in_buf = NULL;
+ char *out_buf = NULL;
+
+ /* open source file for read */
+ in = fopen(from_fullpath, PG_BINARY_R);
+ if (in == NULL)
+ {
+ /*
+ * If file is not found, this is not en error.
+ * It could have been deleted by concurrent postgres transaction.
+ */
+ if (errno == ENOENT)
+ return FILE_MISSING;
+
+ elog(ERROR, "Cannot open file \"%s\": %s", from_fullpath, strerror(errno));
+ }
+
+ /*
+ * Enable stdio buffering for local input file,
+ * unless the pagemap is involved, which
+ * imply a lot of random access.
+ */
+
+ if (use_pagemap)
+ {
+ iter = datapagemap_iterate(&file->pagemap);
+ datapagemap_next(iter, &blknum); /* set first block */
+
+ setvbuf(in, NULL, _IONBF, BUFSIZ);
+ }
+ else
+ {
+ in_buf = pgut_malloc(STDIO_BUFSIZE);
+ setvbuf(in, in_buf, _IOFBF, STDIO_BUFSIZE);
+ }
+
+ out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_BACKUP_HOST);
+ if (out == NULL)
+ elog(ERROR, "Cannot open destination file \"%s\": %s",
+ to_fullpath, strerror(errno));
+
+ /* update file permission */
+ if (fio_chmod(to_fullpath, file->mode, FIO_BACKUP_HOST) == -1)
+ elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath,
+ strerror(errno));
+
+ elog(VERBOSE, "ftruncate file \"%s\" to size %lu",
+ to_fullpath, file->size);
+ if (fio_ftruncate(out, file->size) == -1)
+ elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s",
+ to_fullpath, file->size, strerror(errno));
+
+ if (!fio_is_remote_file(out))
+ {
+ out_buf = pgut_malloc(STDIO_BUFSIZE);
+ setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE);
+ }
+
+ while (blknum < file->n_blocks)
+ {
+ PageState page_st;
+ int rc = prepare_page(file, sync_lsn,
+ blknum, in, backup_mode, curr_page,
+ true, checksum_version,
+ ptrack_version_num, ptrack_schema,
+ from_fullpath, &page_st);
+ if (rc == PageIsTruncated)
+ break;
+
+ else if (rc == PageIsOk)
+ {
+ if (fio_fseek(out, blknum * BLCKSZ) < 0)
+ {
+ elog(ERROR, "Cannot seek block %u of \"%s\": %s",
+ blknum, to_fullpath, strerror(errno));
+ }
+ copy_page(file, blknum, in, out, curr_page, to_fullpath);
+ }
+
+ n_blocks_read++;
+
+ /* next block */
+ if (use_pagemap)
+ {
+ /* exit if pagemap is exhausted */
+ if (!datapagemap_next(iter, &blknum))
+ break;
+ }
+ else
+ blknum++;
+ }
+
+ /* cleanup */
+ if (in && fclose(in))
+ elog(ERROR, "Cannot close the source file \"%s\": %s",
+ to_fullpath, strerror(errno));
+
+ /* close local output file */
+ if (out && fio_fclose(out))
+ elog(ERROR, "Cannot close the destination file \"%s\": %s",
+ to_fullpath, strerror(errno));
+
+ pg_free(iter);
+ pg_free(in_buf);
+ pg_free(out_buf);
+
+ return n_blocks_read;
+}
+
/*
* Attempt to open header file, read content and return as
* array of headers.
diff --git a/src/delete.c b/src/delete.c
index d1afa2874..6c70ff81e 100644
--- a/src/delete.c
+++ b/src/delete.c
@@ -14,14 +14,15 @@
#include
#include
-static void delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tli,
+static void delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timelineInfo *tli,
uint32 xlog_seg_size, bool dry_run);
static void do_retention_internal(parray *backup_list, parray *to_keep_list,
parray *to_purge_list);
-static void do_retention_merge(parray *backup_list, parray *to_keep_list,
- parray *to_purge_list, bool no_validate, bool no_sync);
+static void do_retention_merge(InstanceState *instanceState, parray *backup_list,
+ parray *to_keep_list, parray *to_purge_list,
+ bool no_validate, bool no_sync);
static void do_retention_purge(parray *to_keep_list, parray *to_purge_list);
-static void do_retention_wal(bool dry_run);
+static void do_retention_wal(InstanceState *instanceState, bool dry_run);
// TODO: more useful messages for dry run.
static bool backup_deleted = false; /* At least one backup was deleted */
@@ -29,7 +30,7 @@ static bool backup_merged = false; /* At least one merge was enacted */
static bool wal_deleted = false; /* At least one WAL segments was deleted */
void
-do_delete(time_t backup_id)
+do_delete(InstanceState *instanceState, time_t backup_id)
{
int i;
parray *backup_list,
@@ -39,7 +40,7 @@ do_delete(time_t backup_id)
char size_to_delete_pretty[20];
/* Get complete list of backups */
- backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID);
+ backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
delete_list = parray_new();
@@ -105,7 +106,7 @@ do_delete(time_t backup_id)
/* Clean WAL segments */
if (delete_wal)
- do_retention_wal(dry_run);
+ do_retention_wal(instanceState, dry_run);
/* cleanup */
parray_free(delete_list);
@@ -123,7 +124,7 @@ do_delete(time_t backup_id)
* which FULL backup should be keeped for redundancy obligation(only valid do),
* but if invalid backup is not guarded by retention - it is removed
*/
-void do_retention(bool no_validate, bool no_sync)
+void do_retention(InstanceState *instanceState, bool no_validate, bool no_sync)
{
parray *backup_list = NULL;
parray *to_keep_list = parray_new();
@@ -139,7 +140,7 @@ void do_retention(bool no_validate, bool no_sync)
MyLocation = FIO_LOCAL_HOST;
/* Get a complete list of backups. */
- backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID);
+ backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
if (parray_num(backup_list) == 0)
backup_list_is_empty = true;
@@ -172,14 +173,14 @@ void do_retention(bool no_validate, bool no_sync)
do_retention_internal(backup_list, to_keep_list, to_purge_list);
if (merge_expired && !dry_run && !backup_list_is_empty)
- do_retention_merge(backup_list, to_keep_list, to_purge_list, no_validate, no_sync);
+ do_retention_merge(instanceState, backup_list, to_keep_list, to_purge_list, no_validate, no_sync);
if (delete_expired && !dry_run && !backup_list_is_empty)
do_retention_purge(to_keep_list, to_purge_list);
/* TODO: some sort of dry run for delete_wal */
if (delete_wal)
- do_retention_wal(dry_run);
+ do_retention_wal(instanceState, dry_run);
/* TODO: consider dry-run flag */
@@ -406,7 +407,7 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
/* TODO: add ancestor(chain full backup) ID */
elog(INFO, "Backup %s, mode: %s, status: %s. Redundancy: %i/%i, Time Window: %ud/%ud. %s",
base36enc(backup->start_time),
- pgBackupGetBackupMode(backup),
+ pgBackupGetBackupMode(backup, false),
status2str(backup->status),
cur_full_backup_num,
instance_config.retention_redundancy,
@@ -424,7 +425,8 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
/* Merge partially expired incremental chains */
static void
-do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_list,
+do_retention_merge(InstanceState *instanceState, parray *backup_list,
+ parray *to_keep_list, parray *to_purge_list,
bool no_validate, bool no_sync)
{
int i;
@@ -542,9 +544,8 @@ do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_l
*
* Merge incremental chain from PAGE3 into FULL.
*/
-
keep_backup = parray_get(merge_list, 0);
- merge_chain(merge_list, full_backup, keep_backup, no_validate, no_sync);
+ merge_chain(instanceState, merge_list, full_backup, keep_backup, no_validate, no_sync);
backup_merged = true;
for (j = parray_num(merge_list) - 2; j >= 0; j--)
@@ -657,12 +658,13 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list)
* and delete them.
*/
static void
-do_retention_wal(bool dry_run)
+do_retention_wal(InstanceState *instanceState, bool dry_run)
{
parray *tli_list;
int i;
- tli_list = catalog_get_timelines(&instance_config);
+ //TODO check that instanceState is not NULL
+ tli_list = catalog_get_timelines(instanceState, &instance_config);
for (i = 0; i < parray_num(tli_list); i++)
{
@@ -701,22 +703,22 @@ do_retention_wal(bool dry_run)
{
if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
{
- delete_walfiles_in_tli(tlinfo->anchor_lsn,
+ delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
tlinfo, instance_config.xlog_seg_size, dry_run);
}
else
{
- delete_walfiles_in_tli(tlinfo->oldest_backup->start_lsn,
+ delete_walfiles_in_tli(instanceState, tlinfo->oldest_backup->start_lsn,
tlinfo, instance_config.xlog_seg_size, dry_run);
}
}
else
{
if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
- delete_walfiles_in_tli(tlinfo->anchor_lsn,
+ delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
tlinfo, instance_config.xlog_seg_size, dry_run);
else
- delete_walfiles_in_tli(InvalidXLogRecPtr,
+ delete_walfiles_in_tli(instanceState, InvalidXLogRecPtr,
tlinfo, instance_config.xlog_seg_size, dry_run);
}
}
@@ -758,7 +760,7 @@ delete_backup_files(pgBackup *backup)
* Update STATUS to BACKUP_STATUS_DELETING in preparation for the case which
* the error occurs before deleting all backup files.
*/
- write_backup_status(backup, BACKUP_STATUS_DELETING, instance_name, false);
+ write_backup_status(backup, BACKUP_STATUS_DELETING, false);
/* list files to be deleted */
files = parray_new();
@@ -812,7 +814,7 @@ delete_backup_files(pgBackup *backup)
* Q: Maybe we should stop treating partial WAL segments as second-class citizens?
*/
static void
-delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tlinfo,
+delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timelineInfo *tlinfo,
uint32 xlog_seg_size, bool dry_run)
{
XLogSegNo FirstToDeleteSegNo;
@@ -937,7 +939,7 @@ delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tlinfo,
{
char wal_fullpath[MAXPGPATH];
- join_path_components(wal_fullpath, instance_config.arclog_path, wal_file->file.name);
+ join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name);
/* save segment from purging */
if (instance_config.wal_depth >= 0 && wal_file->keep)
@@ -974,15 +976,13 @@ delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tlinfo,
/* Delete all backup files and wal files of given instance. */
int
-do_delete_instance(void)
+do_delete_instance(InstanceState *instanceState)
{
parray *backup_list;
int i;
- char instance_config_path[MAXPGPATH];
-
/* Delete all backups. */
- backup_list = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID);
+ backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
catalog_lock_backup_list(backup_list, 0, parray_num(backup_list) - 1, true, true);
@@ -997,32 +997,31 @@ do_delete_instance(void)
parray_free(backup_list);
/* Delete all wal files. */
- pgut_rmtree(arclog_path, false, true);
+ pgut_rmtree(instanceState->instance_wal_subdir_path, false, true);
/* Delete backup instance config file */
- join_path_components(instance_config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
- if (remove(instance_config_path))
+ if (remove(instanceState->instance_config_path))
{
- elog(ERROR, "Can't remove \"%s\": %s", instance_config_path,
+ elog(ERROR, "Can't remove \"%s\": %s", instanceState->instance_config_path,
strerror(errno));
}
/* Delete instance root directories */
- if (rmdir(backup_instance_path) != 0)
- elog(ERROR, "Can't remove \"%s\": %s", backup_instance_path,
+ if (rmdir(instanceState->instance_backup_subdir_path) != 0)
+ elog(ERROR, "Can't remove \"%s\": %s", instanceState->instance_backup_subdir_path,
strerror(errno));
- if (rmdir(arclog_path) != 0)
- elog(ERROR, "Can't remove \"%s\": %s", arclog_path,
+ if (rmdir(instanceState->instance_wal_subdir_path) != 0)
+ elog(ERROR, "Can't remove \"%s\": %s", instanceState->instance_wal_subdir_path,
strerror(errno));
- elog(INFO, "Instance '%s' successfully deleted", instance_name);
+ elog(INFO, "Instance '%s' successfully deleted", instanceState->instance_name);
return 0;
}
/* Delete all backups of given status in instance */
void
-do_delete_status(InstanceConfig *instance_config, const char *status)
+do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, const char *status)
{
int i;
parray *backup_list, *delete_list;
@@ -1045,11 +1044,11 @@ do_delete_status(InstanceConfig *instance_config, const char *status)
*/
pretty_status = status2str(status_for_delete);
- backup_list = catalog_get_backup_list(instance_config->name, INVALID_BACKUP_ID);
+ backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
if (parray_num(backup_list) == 0)
{
- elog(WARNING, "Instance '%s' has no backups", instance_config->name);
+ elog(WARNING, "Instance '%s' has no backups", instanceState->instance_name);
return;
}
@@ -1108,12 +1107,12 @@ do_delete_status(InstanceConfig *instance_config, const char *status)
if (!dry_run && n_deleted > 0)
elog(INFO, "Successfully deleted %i %s from instance '%s'",
n_deleted, n_deleted == 1 ? "backup" : "backups",
- instance_config->name);
+ instanceState->instance_name);
if (n_found == 0)
elog(WARNING, "Instance '%s' has no backups with status '%s'",
- instance_config->name, pretty_status);
+ instanceState->instance_name, pretty_status);
// we don`t do WAL purge here, because it is impossible to correctly handle
// dry-run case.
diff --git a/src/dir.c b/src/dir.c
index c5c5b3297..473534c8b 100644
--- a/src/dir.c
+++ b/src/dir.c
@@ -28,7 +28,7 @@
* start so they are not included in backups. The directories themselves are
* kept and included as empty to preserve access permissions.
*/
-const char *pgdata_exclude_dir[] =
+static const char *pgdata_exclude_dir[] =
{
PG_XLOG_DIR,
/*
@@ -222,6 +222,8 @@ pgFileInit(const char *rel_path)
/* Number of blocks backed up during backup */
file->n_headers = 0;
+ // May be add?
+ // pg_atomic_clear_flag(file->lock);
return file;
}
@@ -483,6 +485,13 @@ pgFileCompareSize(const void *f1, const void *f2)
return 0;
}
+/* Compare two pgFile with their size in descending order */
+int
+pgFileCompareSizeDesc(const void *f1, const void *f2)
+{
+ return -1 * pgFileCompareSize(f1, f2);
+}
+
static int
pgCompareString(const void *str1, const void *str2)
{
@@ -675,26 +684,16 @@ dir_check_file(pgFile *file, bool backup_logs)
*/
if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0)
return CHECK_FALSE;
-
- if (sscanf_res == 3 && S_ISDIR(file->mode) &&
- strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) == 0)
- file->is_database = true;
}
else if (path_is_prefix_of_path("global", file->rel_path))
{
file->tblspcOid = GLOBALTABLESPACE_OID;
-
- if (S_ISDIR(file->mode) && strcmp(file->name, "global") == 0)
- file->is_database = true;
}
else if (path_is_prefix_of_path("base", file->rel_path))
{
file->tblspcOid = DEFAULTTABLESPACE_OID;
sscanf(file->rel_path, "base/%u/", &(file->dbOid));
-
- if (S_ISDIR(file->mode) && strcmp(file->name, "base") != 0)
- file->is_database = true;
}
/* Do not backup ptrack_init files */
@@ -895,7 +894,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir,
*
* Copy of function get_tablespace_mapping() from pg_basebackup.c.
*/
-static const char *
+const char *
get_tablespace_mapping(const char *dir)
{
TablespaceListCell *cell;
@@ -1450,7 +1449,7 @@ get_external_remap(char *current_dir)
*
* Returns true if the value was found in the line.
*/
-static bool
+bool
get_control_value(const char *str, const char *name,
char *value_str, int64 *value_int64, bool is_mandatory)
{
@@ -1574,123 +1573,6 @@ get_control_value(const char *str, const char *name,
return false; /* Make compiler happy */
}
-/*
- * Construct parray of pgFile from the backup content list.
- * If root is not NULL, path will be absolute path.
- */
-parray *
-dir_read_file_list(const char *root, const char *external_prefix,
- const char *file_txt, fio_location location, pg_crc32 expected_crc)
-{
- FILE *fp;
- parray *files;
- char buf[BLCKSZ];
- char stdio_buf[STDIO_BUFSIZE];
- pg_crc32 content_crc = 0;
-
- fp = fio_open_stream(file_txt, location);
- if (fp == NULL)
- elog(ERROR, "cannot open \"%s\": %s", file_txt, strerror(errno));
-
- /* enable stdio buffering for local file */
- if (!fio_is_remote(location))
- setvbuf(fp, stdio_buf, _IOFBF, STDIO_BUFSIZE);
-
- files = parray_new();
-
- INIT_FILE_CRC32(true, content_crc);
-
- while (fgets(buf, lengthof(buf), fp))
- {
- char path[MAXPGPATH];
- char linked[MAXPGPATH];
- char compress_alg_string[MAXPGPATH];
- int64 write_size,
- mode, /* bit length of mode_t depends on platforms */
- is_datafile,
- is_cfs,
- external_dir_num,
- crc,
- segno,
- n_blocks,
- n_headers,
- dbOid, /* used for partial restore */
- hdr_crc,
- hdr_off,
- hdr_size;
- pgFile *file;
-
- COMP_FILE_CRC32(true, content_crc, buf, strlen(buf));
-
- get_control_value(buf, "path", path, NULL, true);
- get_control_value(buf, "size", NULL, &write_size, true);
- get_control_value(buf, "mode", NULL, &mode, true);
- get_control_value(buf, "is_datafile", NULL, &is_datafile, true);
- get_control_value(buf, "is_cfs", NULL, &is_cfs, false);
- get_control_value(buf, "crc", NULL, &crc, true);
- get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
- get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false);
- get_control_value(buf, "dbOid", NULL, &dbOid, false);
-
- file = pgFileInit(path);
- file->write_size = (int64) write_size;
- file->mode = (mode_t) mode;
- file->is_datafile = is_datafile ? true : false;
- file->is_cfs = is_cfs ? true : false;
- file->crc = (pg_crc32) crc;
- file->compress_alg = parse_compress_alg(compress_alg_string);
- file->external_dir_num = external_dir_num;
- file->dbOid = dbOid ? dbOid : 0;
-
- /*
- * Optional fields
- */
-
- if (get_control_value(buf, "linked", linked, NULL, false) && linked[0])
- {
- file->linked = pgut_strdup(linked);
- canonicalize_path(file->linked);
- }
-
- if (get_control_value(buf, "segno", NULL, &segno, false))
- file->segno = (int) segno;
-
- if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false))
- file->n_blocks = (int) n_blocks;
-
- if (get_control_value(buf, "n_headers", NULL, &n_headers, false))
- file->n_headers = (int) n_headers;
-
- if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false))
- file->hdr_crc = (pg_crc32) hdr_crc;
-
- if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false))
- file->hdr_off = hdr_off;
-
- if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false))
- file->hdr_size = (int) hdr_size;
-
- parray_append(files, file);
- }
-
- FIN_FILE_CRC32(true, content_crc);
-
- if (ferror(fp))
- elog(ERROR, "Failed to read from file: \"%s\"", file_txt);
-
- fio_close_stream(fp);
-
- if (expected_crc != 0 &&
- expected_crc != content_crc)
- {
- elog(WARNING, "Invalid CRC of backup control file '%s': %u. Expected: %u",
- file_txt, content_crc, expected_crc);
- return NULL;
- }
-
- return files;
-}
-
/*
* Check if directory empty.
*/
@@ -1900,7 +1782,6 @@ read_database_map(pgBackup *backup)
char path[MAXPGPATH];
char database_map_path[MAXPGPATH];
-// pgBackupGetPath(backup, path, lengthof(path), DATABASE_DIR);
join_path_components(path, backup->root_dir, DATABASE_DIR);
join_path_components(database_map_path, path, DATABASE_MAP);
@@ -1977,3 +1858,17 @@ cleanup_tablespace(const char *path)
parray_walk(files, pgFileFree);
parray_free(files);
}
+
+/*
+ * Clear the synchronisation locks in a parray of (pgFile *)'s
+ */
+void
+pfilearray_clear_locks(parray *file_list)
+{
+ int i;
+ for (i = 0; i < parray_num(file_list); i++)
+ {
+ pgFile *file = (pgFile *) parray_get(file_list, i);
+ pg_atomic_clear_flag(&file->lock);
+ }
+}
diff --git a/src/help.c b/src/help.c
index f72dc90dc..921feaec0 100644
--- a/src/help.c
+++ b/src/help.c
@@ -2,13 +2,16 @@
*
* help.c
*
- * Copyright (c) 2017-2019, Postgres Professional
+ * Copyright (c) 2017-2021, Postgres Professional
*
*-------------------------------------------------------------------------
*/
+#include
#include "pg_probackup.h"
+static void help_nocmd(void);
+static void help_internal(void);
static void help_init(void);
static void help_backup(void);
static void help_restore(void);
@@ -24,58 +27,63 @@ static void help_del_instance(void);
static void help_archive_push(void);
static void help_archive_get(void);
static void help_checkdb(void);
+static void help_help(void);
+static void help_version(void);
+static void help_catchup(void);
void
-help_command(char *command)
+help_print_version(void)
{
- if (strcmp(command, "init") == 0)
- help_init();
- else if (strcmp(command, "backup") == 0)
- help_backup();
- else if (strcmp(command, "restore") == 0)
- help_restore();
- else if (strcmp(command, "validate") == 0)
- help_validate();
- else if (strcmp(command, "show") == 0)
- help_show();
- else if (strcmp(command, "delete") == 0)
- help_delete();
- else if (strcmp(command, "merge") == 0)
- help_merge();
- else if (strcmp(command, "set-backup") == 0)
- help_set_backup();
- else if (strcmp(command, "set-config") == 0)
- help_set_config();
- else if (strcmp(command, "show-config") == 0)
- help_show_config();
- else if (strcmp(command, "add-instance") == 0)
- help_add_instance();
- else if (strcmp(command, "del-instance") == 0)
- help_del_instance();
- else if (strcmp(command, "archive-push") == 0)
- help_archive_push();
- else if (strcmp(command, "archive-get") == 0)
- help_archive_get();
- else if (strcmp(command, "checkdb") == 0)
- help_checkdb();
- else if (strcmp(command, "--help") == 0
- || strcmp(command, "help") == 0
- || strcmp(command, "-?") == 0
- || strcmp(command, "--version") == 0
- || strcmp(command, "version") == 0
- || strcmp(command, "-V") == 0)
- printf(_("No help page for \"%s\" command. Try pg_probackup help\n"), command);
- else
- printf(_("Unknown command \"%s\". Try pg_probackup help\n"), command);
- exit(0);
+#ifdef PGPRO_VERSION
+ fprintf(stdout, "%s %s (Postgres Pro %s %s)\n",
+ PROGRAM_NAME, PROGRAM_VERSION,
+ PGPRO_VERSION, PGPRO_EDITION);
+#else
+ fprintf(stdout, "%s %s (PostgreSQL %s)\n",
+ PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION);
+#endif
+}
+
+void
+help_command(ProbackupSubcmd const subcmd)
+{
+ typedef void (* help_function_ptr)(void);
+ /* Order is important, keep it in sync with utils/configuration.h:enum ProbackupSubcmd declaration */
+ static help_function_ptr const help_functions[] =
+ {
+ &help_nocmd,
+ &help_init,
+ &help_add_instance,
+ &help_del_instance,
+ &help_archive_push,
+ &help_archive_get,
+ &help_backup,
+ &help_restore,
+ &help_validate,
+ &help_delete,
+ &help_merge,
+ &help_show,
+ &help_set_config,
+ &help_set_backup,
+ &help_show_config,
+ &help_checkdb,
+ &help_internal, // SSH_CMD
+ &help_internal, // AGENT_CMD
+ &help_help,
+ &help_version,
+ &help_catchup,
+ };
+
+ Assert((int)subcmd < sizeof(help_functions) / sizeof(help_functions[0]));
+ help_functions[(int)subcmd]();
}
void
help_pg_probackup(void)
{
- printf(_("\n%s - utility to manage backup/recovery of PostgreSQL database.\n\n"), PROGRAM_NAME);
+ printf(_("\n%s - utility to manage backup/recovery of PostgreSQL database.\n"), PROGRAM_NAME);
- printf(_(" %s help [COMMAND]\n"), PROGRAM_NAME);
+ printf(_("\n %s help [COMMAND]\n"), PROGRAM_NAME);
printf(_("\n %s version\n"), PROGRAM_NAME);
@@ -127,7 +135,7 @@ help_pg_probackup(void)
printf(_(" [--error-log-filename=error-log-filename]\n"));
printf(_(" [--log-directory=log-directory]\n"));
printf(_(" [--log-rotation-size=log-rotation-size]\n"));
- printf(_(" [--log-rotation-age=log-rotation-age]\n"));
+ printf(_(" [--log-rotation-age=log-rotation-age] [--no-color]\n"));
printf(_(" [--delete-expired] [--delete-wal] [--merge-expired]\n"));
printf(_(" [--retention-redundancy=retention-redundancy]\n"));
printf(_(" [--retention-window=retention-window]\n"));
@@ -188,7 +196,7 @@ help_pg_probackup(void)
printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME);
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
printf(_(" [--format=format] [--archive]\n"));
- printf(_(" [--help]\n"));
+ printf(_(" [--no-color] [--help]\n"));
printf(_("\n %s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-j num-threads] [--progress]\n"));
@@ -240,6 +248,19 @@ help_pg_probackup(void)
printf(_(" [--ssh-options]\n"));
printf(_(" [--help]\n"));
+ printf(_("\n %s catchup -b catchup-mode\n"), PROGRAM_NAME);
+ printf(_(" --source-pgdata=path_to_pgdata_on_remote_server\n"));
+ printf(_(" --destination-pgdata=path_to_local_dir\n"));
+ printf(_(" [--stream [-S slot-name]] [--temp-slot]\n"));
+ printf(_(" [-j num-threads]\n"));
+ printf(_(" [-T OLDDIR=NEWDIR]\n"));
+ printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
+ printf(_(" [-w --no-password] [-W --password]\n"));
+ printf(_(" [--remote-proto] [--remote-host]\n"));
+ printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
+ printf(_(" [--ssh-options]\n"));
+ printf(_(" [--help]\n"));
+
if ((PROGRAM_URL || PROGRAM_EMAIL))
{
printf("\n");
@@ -248,7 +269,18 @@ help_pg_probackup(void)
if (PROGRAM_EMAIL)
printf("Report bugs to <%s>.\n", PROGRAM_EMAIL);
}
- exit(0);
+}
+
+static void
+help_nocmd(void)
+{
+ printf(_("\nUnknown command. Try pg_probackup help\n\n"));
+}
+
+static void
+help_internal(void)
+{
+ printf(_("\nThis command is intended for internal use\n\n"));
}
static void
@@ -274,7 +306,7 @@ help_backup(void)
printf(_(" [--error-log-filename=error-log-filename]\n"));
printf(_(" [--log-directory=log-directory]\n"));
printf(_(" [--log-rotation-size=log-rotation-size]\n"));
- printf(_(" [--log-rotation-age=log-rotation-age]\n"));
+ printf(_(" [--log-rotation-age=log-rotation-age] [--no-color]\n"));
printf(_(" [--delete-expired] [--delete-wal] [--merge-expired]\n"));
printf(_(" [--retention-redundancy=retention-redundancy]\n"));
printf(_(" [--retention-window=retention-window]\n"));
@@ -330,6 +362,7 @@ help_backup(void)
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
+ printf(_(" --no-color disable the coloring of error and warning console messages\n"));
printf(_("\n Retention options:\n"));
printf(_(" --delete-expired delete backups expired according to current\n"));
@@ -490,6 +523,7 @@ help_restore(void)
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
+ printf(_(" --no-color disable the coloring of error and warning console messages\n"));
printf(_("\n Remote options:\n"));
printf(_(" --remote-proto=protocol remote protocol to use\n"));
@@ -555,7 +589,8 @@ help_validate(void)
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
- printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n\n"));
+ printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
+ printf(_(" --no-color disable the coloring of error and warning console messages\n\n"));
}
static void
@@ -600,6 +635,7 @@ help_checkdb(void)
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
+ printf(_(" --no-color disable the coloring of error and warning console messages\n"));
printf(_("\n Connection options:\n"));
printf(_(" -U, --pguser=USERNAME user name to connect as (default: current local user)\n"));
@@ -621,7 +657,8 @@ help_show(void)
printf(_(" --instance=instance_name show info about specific instance\n"));
printf(_(" -i, --backup-id=backup-id show info about specific backups\n"));
printf(_(" --archive show WAL archive information\n"));
- printf(_(" --format=format show format=PLAIN|JSON\n\n"));
+ printf(_(" --format=format show format=PLAIN|JSON\n"));
+ printf(_(" --no-color disable the coloring for plain format\n\n"));
}
static void
@@ -677,7 +714,8 @@ help_delete(void)
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
- printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n\n"));
+ printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
+ printf(_(" --no-color disable the coloring of error and warning console messages\n\n"));
}
static void
@@ -722,7 +760,8 @@ help_merge(void)
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
- printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n\n"));
+ printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
+ printf(_(" --no-color disable the coloring of error and warning console messages\n\n"));
}
static void
@@ -971,3 +1010,63 @@ help_archive_get(void)
printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n"));
printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n"));
}
+
+static void
+help_help(void)
+{
+ printf(_("\n%s help [command]\n"), PROGRAM_NAME);
+ printf(_("%s command --help\n\n"), PROGRAM_NAME);
+}
+
+static void
+help_version(void)
+{
+ printf(_("\n%s version\n"), PROGRAM_NAME);
+ printf(_("%s --version\n\n"), PROGRAM_NAME);
+}
+
+static void
+help_catchup(void)
+{
+ printf(_("\n%s catchup -b catchup-mode\n"), PROGRAM_NAME);
+ printf(_(" --source-pgdata=path_to_pgdata_on_remote_server\n"));
+ printf(_(" --destination-pgdata=path_to_local_dir\n"));
+ printf(_(" [--stream [-S slot-name]] [--temp-slot]\n"));
+ printf(_(" [-j num-threads]\n"));
+ printf(_(" [-T OLDDIR=NEWDIR]\n"));
+ printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
+ printf(_(" [-w --no-password] [-W --password]\n"));
+ printf(_(" [--remote-proto] [--remote-host]\n"));
+ printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
+ printf(_(" [--ssh-options]\n"));
+ printf(_(" [--help]\n\n"));
+
+ printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n"));
+ printf(_(" --stream stream the transaction log (only supported mode)\n"));
+ printf(_(" -S, --slot=SLOTNAME replication slot to use\n"));
+ printf(_(" --temp-slot use temporary replication slot\n"));
+
+ printf(_(" -j, --threads=NUM number of parallel threads\n"));
+
+ printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"));
+ printf(_(" relocate the tablespace from directory OLDDIR to NEWDIR\n"));
+
+ printf(_("\n Connection options:\n"));
+ printf(_(" -U, --pguser=USERNAME user name to connect as (default: current local user)\n"));
+ printf(_(" -d, --pgdatabase=DBNAME database to connect (default: username)\n"));
+ printf(_(" -h, --pghost=HOSTNAME database server host or socket directory(default: 'local socket')\n"));
+ printf(_(" -p, --pgport=PORT database server port (default: 5432)\n"));
+ printf(_(" -w, --no-password never prompt for password\n"));
+ printf(_(" -W, --password force password prompt\n\n"));
+
+ printf(_("\n Remote options:\n"));
+ printf(_(" --remote-proto=protocol remote protocol to use\n"));
+ printf(_(" available options: 'ssh', 'none' (default: ssh)\n"));
+ printf(_(" --remote-host=hostname remote host address or hostname\n"));
+ printf(_(" --remote-port=port remote host port (default: 22)\n"));
+ printf(_(" --remote-path=path path to directory with pg_probackup binary on remote host\n"));
+ printf(_(" (default: current binary path)\n"));
+ printf(_(" --remote-user=username user name for ssh connection (default: current user)\n"));
+ printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n"));
+ printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n"));
+}
diff --git a/src/init.c b/src/init.c
index 1ab6dc0f9..a4911cb5c 100644
--- a/src/init.c
+++ b/src/init.c
@@ -17,43 +17,39 @@
* Initialize backup catalog.
*/
int
-do_init(void)
+do_init(CatalogState *catalogState)
{
- char path[MAXPGPATH];
- char arclog_path_dir[MAXPGPATH];
int results;
- results = pg_check_dir(backup_path);
+ results = pg_check_dir(catalogState->catalog_path);
+
if (results == 4) /* exists and not empty*/
elog(ERROR, "backup catalog already exist and it's not empty");
else if (results == -1) /*trouble accessing directory*/
{
int errno_tmp = errno;
elog(ERROR, "cannot open backup catalog directory \"%s\": %s",
- backup_path, strerror(errno_tmp));
+ catalogState->catalog_path, strerror(errno_tmp));
}
/* create backup catalog root directory */
- dir_create_dir(backup_path, DIR_PERMISSION, false);
+ dir_create_dir(catalogState->catalog_path, DIR_PERMISSION, false);
/* create backup catalog data directory */
- join_path_components(path, backup_path, BACKUPS_DIR);
- dir_create_dir(path, DIR_PERMISSION, false);
+ dir_create_dir(catalogState->backup_subdir_path, DIR_PERMISSION, false);
/* create backup catalog wal directory */
- join_path_components(arclog_path_dir, backup_path, "wal");
- dir_create_dir(arclog_path_dir, DIR_PERMISSION, false);
+ dir_create_dir(catalogState->wal_subdir_path, DIR_PERMISSION, false);
- elog(INFO, "Backup catalog '%s' successfully inited", backup_path);
+ elog(INFO, "Backup catalog '%s' successfully inited", catalogState->catalog_path);
return 0;
}
int
-do_add_instance(InstanceConfig *instance)
+do_add_instance(InstanceState *instanceState, InstanceConfig *instance)
{
- char path[MAXPGPATH];
- char arclog_path_dir[MAXPGPATH];
struct stat st;
+ CatalogState *catalogState = instanceState->catalog_state;
/* PGDATA is always required */
if (instance->pgdata == NULL)
@@ -61,38 +57,37 @@ do_add_instance(InstanceConfig *instance)
"(-D, --pgdata)");
/* Read system_identifier from PGDATA */
- instance->system_identifier = get_system_identifier(instance->pgdata);
+ instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST);
/* Starting from PostgreSQL 11 read WAL segment size from PGDATA */
instance->xlog_seg_size = get_xlog_seg_size(instance->pgdata);
/* Ensure that all root directories already exist */
- if (access(backup_path, F_OK) != 0)
- elog(ERROR, "Directory does not exist: '%s'", backup_path);
+ /* TODO maybe call do_init() here instead of error?*/
+ if (access(catalogState->catalog_path, F_OK) != 0)
+ elog(ERROR, "Directory does not exist: '%s'", catalogState->catalog_path);
- join_path_components(path, backup_path, BACKUPS_DIR);
- if (access(path, F_OK) != 0)
- elog(ERROR, "Directory does not exist: '%s'", path);
+ if (access(catalogState->backup_subdir_path, F_OK) != 0)
+ elog(ERROR, "Directory does not exist: '%s'", catalogState->backup_subdir_path);
- join_path_components(arclog_path_dir, backup_path, "wal");
- if (access(arclog_path_dir, F_OK) != 0)
- elog(ERROR, "Directory does not exist: '%s'", arclog_path_dir);
+ if (access(catalogState->wal_subdir_path, F_OK) != 0)
+ elog(ERROR, "Directory does not exist: '%s'", catalogState->wal_subdir_path);
- if (stat(instance->backup_instance_path, &st) == 0 && S_ISDIR(st.st_mode))
+ if (stat(instanceState->instance_backup_subdir_path, &st) == 0 && S_ISDIR(st.st_mode))
elog(ERROR, "Instance '%s' backup directory already exists: '%s'",
- instance->name, instance->backup_instance_path);
+ instanceState->instance_name, instanceState->instance_backup_subdir_path);
/*
* Create directory for wal files of this specific instance.
* Existence check is extra paranoid because if we don't have such a
* directory in data dir, we shouldn't have it in wal as well.
*/
- if (stat(instance->arclog_path, &st) == 0 && S_ISDIR(st.st_mode))
+ if (stat(instanceState->instance_wal_subdir_path, &st) == 0 && S_ISDIR(st.st_mode))
elog(ERROR, "Instance '%s' WAL archive directory already exists: '%s'",
- instance->name, instance->arclog_path);
+ instanceState->instance_name, instanceState->instance_wal_subdir_path);
/* Create directory for data files of this specific instance */
- dir_create_dir(instance->backup_instance_path, DIR_PERMISSION, false);
- dir_create_dir(instance->arclog_path, DIR_PERMISSION, false);
+ dir_create_dir(instanceState->instance_backup_subdir_path, DIR_PERMISSION, false);
+ dir_create_dir(instanceState->instance_wal_subdir_path, DIR_PERMISSION, false);
/*
* Write initial configuration file.
@@ -124,8 +119,8 @@ do_add_instance(InstanceConfig *instance)
SOURCE_DEFAULT);
/* pgdata was set through command line */
- do_set_config(true);
+ do_set_config(instanceState, true);
- elog(INFO, "Instance '%s' successfully inited", instance_name);
+ elog(INFO, "Instance '%s' successfully inited", instanceState->instance_name);
return 0;
}
diff --git a/src/merge.c b/src/merge.c
index f351975d3..cd070fce4 100644
--- a/src/merge.c
+++ b/src/merge.c
@@ -69,7 +69,7 @@ static bool is_forward_compatible(parray *parent_chain);
* - Remove unnecessary files, which doesn't exist in the target backup anymore
*/
void
-do_merge(time_t backup_id, bool no_validate, bool no_sync)
+do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool no_sync)
{
parray *backups;
parray *merge_list = parray_new();
@@ -81,13 +81,13 @@ do_merge(time_t backup_id, bool no_validate, bool no_sync)
if (backup_id == INVALID_BACKUP_ID)
elog(ERROR, "required parameter is not specified: --backup-id");
- if (instance_name == NULL)
+ if (instanceState == NULL)
elog(ERROR, "required parameter is not specified: --instance");
elog(INFO, "Merge started");
/* Get list of all backups sorted in order of descending start time */
- backups = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID);
+ backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
/* Find destination backup first */
for (i = 0; i < parray_num(backups); i++)
@@ -406,7 +406,7 @@ do_merge(time_t backup_id, bool no_validate, bool no_sync)
catalog_lock_backup_list(merge_list, parray_num(merge_list) - 1, 0, true, true);
/* do actual merge */
- merge_chain(merge_list, full_backup, dest_backup, no_validate, no_sync);
+ merge_chain(instanceState, merge_list, full_backup, dest_backup, no_validate, no_sync);
if (!no_validate)
pgBackupValidate(full_backup, NULL);
@@ -436,7 +436,8 @@ do_merge(time_t backup_id, bool no_validate, bool no_sync)
* that chain is ok.
*/
void
-merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup,
+merge_chain(InstanceState *instanceState,
+ parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup,
bool no_validate, bool no_sync)
{
int i;
@@ -603,7 +604,7 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup,
write_backup(backup, true);
}
else
- write_backup_status(backup, BACKUP_STATUS_MERGING, instance_name, true);
+ write_backup_status(backup, BACKUP_STATUS_MERGING, true);
}
/* Construct path to database dir: /backup_dir/instance_name/FULL/database */
@@ -853,13 +854,9 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup,
else
{
/* Ugly */
- char backups_dir[MAXPGPATH];
- char instance_dir[MAXPGPATH];
char destination_path[MAXPGPATH];
- join_path_components(backups_dir, backup_path, BACKUPS_DIR);
- join_path_components(instance_dir, backups_dir, instance_name);
- join_path_components(destination_path, instance_dir,
+ join_path_components(destination_path, instanceState->instance_backup_subdir_path,
base36enc(full_backup->merge_dest_backup));
elog(LOG, "Rename %s to %s", full_backup->root_dir, destination_path);
@@ -1256,7 +1253,7 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup,
* 2 backups of old versions, where n_blocks is missing.
*/
- backup_data_file(NULL, tmp_file, to_fullpath_tmp1, to_fullpath_tmp2,
+ backup_data_file(tmp_file, to_fullpath_tmp1, to_fullpath_tmp2,
InvalidXLogRecPtr, BACKUP_MODE_FULL,
dest_backup->compress_alg, dest_backup->compress_level,
dest_backup->checksum_version, 0, NULL,
diff --git a/src/parsexlog.c b/src/parsexlog.c
index 19078fb64..7f1ca9c75 100644
--- a/src/parsexlog.c
+++ b/src/parsexlog.c
@@ -385,7 +385,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup,
* If we don't have WAL between start_lsn and stop_lsn,
* the backup is definitely corrupted. Update its status.
*/
- write_backup_status(backup, BACKUP_STATUS_CORRUPT, instance_name, true);
+ write_backup_status(backup, BACKUP_STATUS_CORRUPT, true);
elog(WARNING, "There are not enough WAL records to consistenly restore "
"backup %s from START LSN: %X/%X to STOP LSN: %X/%X",
diff --git a/src/pg_probackup.c b/src/pg_probackup.c
index 854493bdc..00796be04 100644
--- a/src/pg_probackup.c
+++ b/src/pg_probackup.c
@@ -2,6 +2,38 @@
*
* pg_probackup.c: Backup/Recovery manager for PostgreSQL.
*
+ * This is an entry point for the program.
+ * Parse command name and it's options, verify them and call a
+ * do_***() function that implements the command.
+ *
+ * Avoid using global variables in the code.
+ * Pass all needed information as funciton arguments:
+ *
+
+ *
+ * TODO (see pg_probackup_state.h):
+ *
+ * Functions that work with a backup catalog accept catalogState,
+ * which currently only contains pathes to backup catalog subdirectories
+ * + function specific options.
+ *
+ * Functions that work with an instance accept instanceState argument, which
+ * includes catalogState, instance_name,
+ * info about pgdata associated with the instance (see pgState),
+ * various instance config options, and list of backups belonging to the instance.
+ * + function specific options.
+ *
+ * Functions that work with multiple backups in the catalog
+ * accept instanceState and info needed to determine the range of backups to handle.
+ * + function specific options.
+ *
+ * Functions that work with a single backup accept backupState argument,
+ * which includes link to the instanceState, backup_id and backup-specific info.
+ * + function specific options.
+ *
+ * Functions that work with a postgreSQL instance (i.e. checkdb) accept pgState,
+ * which includes info about pgdata directory and connection.
+ *
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2019, Postgres Professional
*
@@ -9,6 +41,7 @@
*/
#include "pg_probackup.h"
+#include "pg_probackup_state.h"
#include "pg_getopt.h"
#include "streamutil.h"
@@ -27,46 +60,19 @@ const char *PROGRAM_FULL_PATH = NULL;
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
-typedef enum ProbackupSubcmd
-{
- NO_CMD = 0,
- INIT_CMD,
- ADD_INSTANCE_CMD,
- DELETE_INSTANCE_CMD,
- ARCHIVE_PUSH_CMD,
- ARCHIVE_GET_CMD,
- BACKUP_CMD,
- RESTORE_CMD,
- VALIDATE_CMD,
- DELETE_CMD,
- MERGE_CMD,
- SHOW_CMD,
- SET_CONFIG_CMD,
- SET_BACKUP_CMD,
- SHOW_CONFIG_CMD,
- CHECKDB_CMD
-} ProbackupSubcmd;
-
-
+/* ================ catalogState =========== */
/* directory options */
-char *backup_path = NULL;
-/*
- * path or to the data files in the backup catalog
- * $BACKUP_PATH/backups/instance_name
- */
-char backup_instance_path[MAXPGPATH];
-/*
- * path or to the wal files in the backup catalog
- * $BACKUP_PATH/wal/instance_name
- */
-char arclog_path[MAXPGPATH] = "";
+/* TODO make it local variable, pass as an argument to all commands that need it. */
+static char *backup_path = NULL;
+
+static CatalogState *catalogState = NULL;
+/* ================ catalogState (END) =========== */
-/* colon separated external directories list ("/path1:/path2") */
-char *externaldir = NULL;
/* common options */
-static char *backup_id_string = NULL;
int num_threads = 1;
bool stream_wal = false;
+bool no_color = false;
+bool show_color = true;
bool is_archive_cmd = false;
pid_t my_pid = 0;
__thread int my_thread_num = 1;
@@ -82,6 +88,9 @@ bool backup_logs = false;
bool smooth_checkpoint;
char *remote_agent;
static char *backup_note = NULL;
+/* catchup options */
+static char *catchup_source_pgdata = NULL;
+static char *catchup_destination_pgdata = NULL;
/* restore options */
static char *target_time = NULL;
static char *target_xid = NULL;
@@ -123,10 +132,14 @@ bool force = false;
bool dry_run = false;
static char *delete_status = NULL;
/* compression options */
-bool compress_shortcut = false;
+static bool compress_shortcut = false;
+
+/* ================ instanceState =========== */
+static char *instance_name;
+
+static InstanceState *instanceState = NULL;
-/* other options */
-char *instance_name;
+/* ================ instanceState (END) =========== */
/* archive push options */
int batch_size = 1;
@@ -148,9 +161,10 @@ int64 ttl = -1;
static char *expire_time_string = NULL;
static pgSetBackupParams *set_backup_params = NULL;
-/* current settings */
+/* ================ backupState =========== */
+static char *backup_id_string = NULL;
pgBackup current;
-static ProbackupSubcmd backup_subcmd = NO_CMD;
+/* ================ backupState (END) =========== */
static bool help_opt = false;
@@ -158,7 +172,7 @@ static void opt_incr_restore_mode(ConfigOption *opt, const char *arg);
static void opt_backup_mode(ConfigOption *opt, const char *arg);
static void opt_show_format(ConfigOption *opt, const char *arg);
-static void compress_init(void);
+static void compress_init(ProbackupSubcmd const subcmd);
static void opt_datname_exclude_list(ConfigOption *opt, const char *arg);
static void opt_datname_include_list(ConfigOption *opt, const char *arg);
@@ -178,6 +192,7 @@ static ConfigOption cmd_options[] =
{ 'b', 132, "progress", &progress, SOURCE_CMD_STRICT },
{ 's', 'i', "backup-id", &backup_id_string, SOURCE_CMD_STRICT },
{ 'b', 133, "no-sync", &no_sync, SOURCE_CMD_STRICT },
+ { 'b', 134, "no-color", &no_color, SOURCE_CMD_STRICT },
/* backup options */
{ 'b', 180, "backup-pg-log", &backup_logs, SOURCE_CMD_STRICT },
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT },
@@ -189,6 +204,9 @@ static ConfigOption cmd_options[] =
{ 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT },
{ 'b', 185, "dry-run", &dry_run, SOURCE_CMD_STRICT },
{ 's', 238, "note", &backup_note, SOURCE_CMD_STRICT },
+ /* catchup options */
+ { 's', 239, "source-pgdata", &catchup_source_pgdata, SOURCE_CMD_STRICT },
+ { 's', 240, "destination-pgdata", &catchup_destination_pgdata, SOURCE_CMD_STRICT },
/* restore options */
{ 's', 136, "recovery-target-time", &target_time, SOURCE_CMD_STRICT },
{ 's', 137, "recovery-target-xid", &target_xid, SOURCE_CMD_STRICT },
@@ -256,39 +274,25 @@ static ConfigOption cmd_options[] =
{ 0 }
};
-static void
-setMyLocation(void)
-{
-
-#ifdef WIN32
- if (IsSshProtocol())
- elog(ERROR, "Currently remote operations on Windows are not supported");
-#endif
-
- MyLocation = IsSshProtocol()
- ? (backup_subcmd == ARCHIVE_PUSH_CMD || backup_subcmd == ARCHIVE_GET_CMD)
- ? FIO_DB_HOST
- : (backup_subcmd == BACKUP_CMD || backup_subcmd == RESTORE_CMD || backup_subcmd == ADD_INSTANCE_CMD)
- ? FIO_BACKUP_HOST
- : FIO_LOCAL_HOST
- : FIO_LOCAL_HOST;
-}
-
/*
* Entry point of pg_probackup command.
*/
int
main(int argc, char *argv[])
{
- char *command = NULL,
- *command_name;
+ char *command = NULL;
+ ProbackupSubcmd backup_subcmd = NO_CMD;
PROGRAM_NAME_FULL = argv[0];
+ /* Check terminal presense and initialize ANSI escape codes for Windows */
+ init_console();
+
/* Initialize current backup */
pgBackupInit(¤t);
/* Initialize current instance configuration */
+ //TODO get git of this global variable craziness
init_config(&instance_config, instance_name);
PROGRAM_NAME = get_progname(argv[0]);
@@ -316,91 +320,59 @@ main(int argc, char *argv[])
/* Parse subcommands and non-subcommand options */
if (argc > 1)
{
- if (strcmp(argv[1], "archive-push") == 0)
- backup_subcmd = ARCHIVE_PUSH_CMD;
- else if (strcmp(argv[1], "archive-get") == 0)
- backup_subcmd = ARCHIVE_GET_CMD;
- else if (strcmp(argv[1], "add-instance") == 0)
- backup_subcmd = ADD_INSTANCE_CMD;
- else if (strcmp(argv[1], "del-instance") == 0)
- backup_subcmd = DELETE_INSTANCE_CMD;
- else if (strcmp(argv[1], "init") == 0)
- backup_subcmd = INIT_CMD;
- else if (strcmp(argv[1], "backup") == 0)
- backup_subcmd = BACKUP_CMD;
- else if (strcmp(argv[1], "restore") == 0)
- backup_subcmd = RESTORE_CMD;
- else if (strcmp(argv[1], "validate") == 0)
- backup_subcmd = VALIDATE_CMD;
- else if (strcmp(argv[1], "delete") == 0)
- backup_subcmd = DELETE_CMD;
- else if (strcmp(argv[1], "merge") == 0)
- backup_subcmd = MERGE_CMD;
- else if (strcmp(argv[1], "show") == 0)
- backup_subcmd = SHOW_CMD;
- else if (strcmp(argv[1], "set-config") == 0)
- backup_subcmd = SET_CONFIG_CMD;
- else if (strcmp(argv[1], "set-backup") == 0)
- backup_subcmd = SET_BACKUP_CMD;
- else if (strcmp(argv[1], "show-config") == 0)
- backup_subcmd = SHOW_CONFIG_CMD;
- else if (strcmp(argv[1], "checkdb") == 0)
- backup_subcmd = CHECKDB_CMD;
-#ifdef WIN32
- else if (strcmp(argv[1], "ssh") == 0)
- launch_ssh(argv);
-#endif
- else if (strcmp(argv[1], "agent") == 0)
- {
- /* 'No forward compatibility' sanity:
- * /old/binary -> ssh execute -> /newer/binary agent version_num
- * If we are executed as an agent for older binary, then exit with error
- */
- if (argc > 2)
- {
- elog(ERROR, "Version mismatch, pg_probackup binary with version '%s' "
- "is launched as an agent for pg_probackup binary with version '%s'",
- PROGRAM_VERSION, argv[2]);
- }
- fio_communicate(STDIN_FILENO, STDOUT_FILENO);
- return 0;
- }
- else if (strcmp(argv[1], "--help") == 0 ||
- strcmp(argv[1], "-?") == 0 ||
- strcmp(argv[1], "help") == 0)
- {
- if (argc > 2)
- help_command(argv[2]);
- else
- help_pg_probackup();
- }
- else if (strcmp(argv[1], "--version") == 0
- || strcmp(argv[1], "version") == 0
- || strcmp(argv[1], "-V") == 0)
+ backup_subcmd = parse_subcmd(argv[1]);
+ switch(backup_subcmd)
{
-#ifdef PGPRO_VERSION
- fprintf(stdout, "%s %s (Postgres Pro %s %s)\n",
- PROGRAM_NAME, PROGRAM_VERSION,
- PGPRO_VERSION, PGPRO_EDITION);
+ case SSH_CMD:
+#ifdef WIN32
+ launch_ssh(argv);
+ break;
#else
- fprintf(stdout, "%s %s (PostgreSQL %s)\n",
- PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION);
+ elog(ERROR, "\"ssh\" command implemented only for Windows");
+ break;
#endif
- exit(0);
+ case AGENT_CMD:
+ /* 'No forward compatibility' sanity:
+ * /old/binary -> ssh execute -> /newer/binary agent version_num
+ * If we are executed as an agent for older binary, then exit with error
+ */
+ if (argc > 2)
+ elog(ERROR, "Version mismatch, pg_probackup binary with version '%s' "
+ "is launched as an agent for pg_probackup binary with version '%s'",
+ PROGRAM_VERSION, argv[2]);
+ fio_communicate(STDIN_FILENO, STDOUT_FILENO);
+ return 0;
+ case HELP_CMD:
+ if (argc > 2)
+ {
+ /* 'pg_probackup help command' style */
+ help_command(parse_subcmd(argv[2]));
+ exit(0);
+ }
+ else
+ {
+ help_pg_probackup();
+ exit(0);
+ }
+ break;
+ case VERSION_CMD:
+ help_print_version();
+ exit(0);
+ case NO_CMD:
+ elog(ERROR, "Unknown subcommand \"%s\"", argv[1]);
+ default:
+ /* Silence compiler warnings */
+ break;
}
- else
- elog(ERROR, "Unknown subcommand \"%s\"", argv[1]);
}
-
- if (backup_subcmd == NO_CMD)
- elog(ERROR, "No subcommand specified");
+ else
+ elog(ERROR, "No subcommand specified. Please run with \"help\" argument to see possible subcommands.");
/*
* Make command string before getopt_long() will call. It permutes the
* content of argv.
*/
/* TODO why do we do that only for some commands? */
- command_name = pstrdup(argv[1]);
if (backup_subcmd == BACKUP_CMD ||
backup_subcmd == RESTORE_CMD ||
backup_subcmd == VALIDATE_CMD ||
@@ -440,10 +412,20 @@ main(int argc, char *argv[])
pgut_init();
+ if (no_color)
+ show_color = false;
+
if (help_opt)
- help_command(command_name);
+ {
+ /* 'pg_probackup command --help' style */
+ help_command(backup_subcmd);
+ exit(0);
+ }
- /* backup_path is required for all pg_probackup commands except help and checkdb */
+ /* set location based on cmdline options only */
+ setMyLocation(backup_subcmd);
+
+ /* ===== catalogState ======*/
if (backup_path == NULL)
{
/*
@@ -451,12 +433,8 @@ main(int argc, char *argv[])
* from environment variable
*/
backup_path = getenv("BACKUP_PATH");
- if (backup_path == NULL && backup_subcmd != CHECKDB_CMD)
- elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)");
}
- setMyLocation();
-
if (backup_path != NULL)
{
canonicalize_path(backup_path);
@@ -464,26 +442,52 @@ main(int argc, char *argv[])
/* Ensure that backup_path is an absolute path */
if (!is_absolute_path(backup_path))
elog(ERROR, "-B, --backup-path must be an absolute path");
+
+ catalogState = pgut_new(CatalogState);
+ strncpy(catalogState->catalog_path, backup_path, MAXPGPATH);
+ join_path_components(catalogState->backup_subdir_path,
+ catalogState->catalog_path, BACKUPS_DIR);
+ join_path_components(catalogState->wal_subdir_path,
+ catalogState->catalog_path, WAL_SUBDIR);
}
- /* Ensure that backup_path is an absolute path */
- if (backup_path && !is_absolute_path(backup_path))
- elog(ERROR, "-B, --backup-path must be an absolute path");
+ /* backup_path is required for all pg_probackup commands except help, version, checkdb and catchup */
+ if (backup_path == NULL &&
+ backup_subcmd != CHECKDB_CMD &&
+ backup_subcmd != HELP_CMD &&
+ backup_subcmd != VERSION_CMD &&
+ backup_subcmd != CATCHUP_CMD)
+ elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)");
+ /* ===== catalogState (END) ======*/
+
+ /* ===== instanceState ======*/
/*
* Option --instance is required for all commands except
- * init, show, checkdb and validate
+ * init, show, checkdb, validate and catchup
*/
if (instance_name == NULL)
{
if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD &&
- backup_subcmd != VALIDATE_CMD && backup_subcmd != CHECKDB_CMD)
+ backup_subcmd != VALIDATE_CMD && backup_subcmd != CHECKDB_CMD && backup_subcmd != CATCHUP_CMD)
elog(ERROR, "required parameter not specified: --instance");
}
else
- /* Set instance name */
- instance_config.name = pgut_strdup(instance_name);
+ {
+ instanceState = pgut_new(InstanceState);
+ instanceState->catalog_state = catalogState;
+
+ strncpy(instanceState->instance_name, instance_name, MAXPGPATH);
+ join_path_components(instanceState->instance_backup_subdir_path,
+ catalogState->backup_subdir_path, instanceState->instance_name);
+ join_path_components(instanceState->instance_wal_subdir_path,
+ catalogState->wal_subdir_path, instanceState->instance_name);
+ join_path_components(instanceState->instance_config_path,
+ instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE);
+
+ }
+ /* ===== instanceState (END) ======*/
/*
* If --instance option was passed, construct paths for backup data and
@@ -491,28 +495,6 @@ main(int argc, char *argv[])
*/
if ((backup_path != NULL) && instance_name)
{
- /*
- * Fill global variables used to generate pathes inside the instance's
- * backup catalog.
- * TODO replace global variables with InstanceConfig structure fields
- */
- sprintf(backup_instance_path, "%s/%s/%s",
- backup_path, BACKUPS_DIR, instance_name);
- sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
-
- /*
- * Fill InstanceConfig structure fields used to generate pathes inside
- * the instance's backup catalog.
- * TODO continue refactoring to use these fields instead of global vars
- */
- sprintf(instance_config.backup_instance_path, "%s/%s/%s",
- backup_path, BACKUPS_DIR, instance_name);
- canonicalize_path(instance_config.backup_instance_path);
-
- sprintf(instance_config.arclog_path, "%s/%s/%s",
- backup_path, "wal", instance_name);
- canonicalize_path(instance_config.arclog_path);
-
/*
* Ensure that requested backup instance exists.
* for all commands except init, which doesn't take this parameter,
@@ -524,10 +506,11 @@ main(int argc, char *argv[])
{
struct stat st;
- if (fio_stat(backup_instance_path, &st, true, FIO_BACKUP_HOST) != 0)
+ if (fio_stat(instanceState->instance_backup_subdir_path,
+ &st, true, FIO_BACKUP_HOST) != 0)
{
elog(WARNING, "Failed to access directory \"%s\": %s",
- backup_instance_path, strerror(errno));
+ instanceState->instance_backup_subdir_path, strerror(errno));
// TODO: redundant message, should we get rid of it?
elog(ERROR, "Instance '%s' does not exist in this backup catalog",
@@ -549,7 +532,6 @@ main(int argc, char *argv[])
*/
if (instance_name)
{
- char path[MAXPGPATH];
/* Read environment variables */
config_get_opt_env(instance_options);
@@ -557,15 +539,22 @@ main(int argc, char *argv[])
if (backup_subcmd != ADD_INSTANCE_CMD &&
backup_subcmd != ARCHIVE_GET_CMD)
{
- join_path_components(path, backup_instance_path,
- BACKUP_CATALOG_CONF_FILE);
-
if (backup_subcmd == CHECKDB_CMD)
- config_read_opt(path, instance_options, ERROR, true, true);
+ config_read_opt(instanceState->instance_config_path, instance_options, ERROR, true, true);
else
- config_read_opt(path, instance_options, ERROR, true, false);
+ config_read_opt(instanceState->instance_config_path, instance_options, ERROR, true, false);
+
+ /*
+ * We can determine our location only after reading the configuration file,
+ * unless we are running arcive-push/archive-get - they are allowed to trust
+ * cmdline only.
+ */
+ setMyLocation(backup_subcmd);
}
- setMyLocation();
+ }
+ else if (backup_subcmd == CATCHUP_CMD)
+ {
+ config_get_opt_env(instance_options);
}
/*
@@ -609,6 +598,13 @@ main(int argc, char *argv[])
"You must specify --log-directory option when running checkdb with "
"--log-level-file option enabled.");
+ if (backup_subcmd == CATCHUP_CMD &&
+ instance_config.logger.log_level_file != LOG_OFF &&
+ instance_config.logger.log_directory == NULL)
+ elog(ERROR, "Cannot save catchup logs to a file. "
+ "You must specify --log-directory option when running catchup with "
+ "--log-level-file option enabled.");
+
/* Initialize logger */
init_logger(backup_path, &instance_config.logger);
@@ -667,7 +663,7 @@ main(int argc, char *argv[])
backup_subcmd != SET_BACKUP_CMD &&
backup_subcmd != SHOW_CMD)
elog(ERROR, "Cannot use -i (--backup-id) option together with the \"%s\" command",
- command_name);
+ get_subcmd_name(backup_subcmd));
current.backup_id = base36dec(backup_id_string);
if (current.backup_id == 0)
@@ -700,7 +696,7 @@ main(int argc, char *argv[])
if (force && backup_subcmd != RESTORE_CMD)
elog(ERROR, "You cannot specify \"--force\" flag with the \"%s\" command",
- command_name);
+ get_subcmd_name(backup_subcmd));
if (force)
no_validate = true;
@@ -767,10 +763,29 @@ main(int argc, char *argv[])
}
}
+ /* checking required options */
+ if (backup_subcmd == CATCHUP_CMD)
+ {
+ if (catchup_source_pgdata == NULL)
+ elog(ERROR, "You must specify \"--source-pgdata\" option with the \"%s\" command", get_subcmd_name(backup_subcmd));
+ if (catchup_destination_pgdata == NULL)
+ elog(ERROR, "You must specify \"--destination-pgdata\" option with the \"%s\" command", get_subcmd_name(backup_subcmd));
+ if (current.backup_mode == BACKUP_MODE_INVALID)
+ elog(ERROR, "Required parameter not specified: BACKUP_MODE (-b, --backup-mode)");
+ if (current.backup_mode != BACKUP_MODE_FULL && current.backup_mode != BACKUP_MODE_DIFF_PTRACK && current.backup_mode != BACKUP_MODE_DIFF_DELTA)
+ elog(ERROR, "Only \"FULL\", \"PTRACK\" and \"DELTA\" modes are supported with the \"%s\" command", get_subcmd_name(backup_subcmd));
+ if (!stream_wal)
+ elog(INFO, "--stream is required, forcing stream mode");
+ current.stream = stream_wal = true;
+ if (instance_config.external_dir_str)
+ elog(ERROR, "external directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd));
+ // TODO проверить instance_config.conn_opt
+ }
+
/* sanity */
if (backup_subcmd == VALIDATE_CMD && restore_params->no_validate)
elog(ERROR, "You cannot specify \"--no-validate\" option with the \"%s\" command",
- command_name);
+ get_subcmd_name(backup_subcmd));
if (num_threads < 1)
num_threads = 1;
@@ -778,25 +793,25 @@ main(int argc, char *argv[])
if (batch_size < 1)
batch_size = 1;
- compress_init();
+ compress_init(backup_subcmd);
/* do actual operation */
switch (backup_subcmd)
{
case ARCHIVE_PUSH_CMD:
- do_archive_push(&instance_config, wal_file_path, wal_file_name,
+ do_archive_push(instanceState, &instance_config, wal_file_path, wal_file_name,
batch_size, file_overwrite, no_sync, no_ready_rename);
break;
case ARCHIVE_GET_CMD:
- do_archive_get(&instance_config, prefetch_dir,
+ do_archive_get(instanceState, &instance_config, prefetch_dir,
wal_file_path, wal_file_name, batch_size, !no_validate_wal);
break;
case ADD_INSTANCE_CMD:
- return do_add_instance(&instance_config);
+ return do_add_instance(instanceState, &instance_config);
case DELETE_INSTANCE_CMD:
- return do_delete_instance();
+ return do_delete_instance(instanceState);
case INIT_CMD:
- return do_init();
+ return do_init(catalogState);
case BACKUP_CMD:
{
current.stream = stream_wal;
@@ -806,10 +821,13 @@ main(int argc, char *argv[])
elog(ERROR, "required parameter not specified: BACKUP_MODE "
"(-b, --backup-mode)");
- return do_backup(set_backup_params, no_validate, no_sync, backup_logs);
+ return do_backup(instanceState, set_backup_params,
+ no_validate, no_sync, backup_logs);
}
+ case CATCHUP_CMD:
+ return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync);
case RESTORE_CMD:
- return do_restore_or_validate(current.backup_id,
+ return do_restore_or_validate(instanceState, current.backup_id,
recovery_target_options,
restore_params, no_sync);
case VALIDATE_CMD:
@@ -819,16 +837,16 @@ main(int argc, char *argv[])
if (datname_exclude_list || datname_include_list)
elog(ERROR, "You must specify parameter (-i, --backup-id) for partial validation");
- return do_validate_all();
+ return do_validate_all(catalogState, instanceState);
}
else
/* PITR validation and, optionally, partial validation */
- return do_restore_or_validate(current.backup_id,
+ return do_restore_or_validate(instanceState, current.backup_id,
recovery_target_options,
restore_params,
no_sync);
case SHOW_CMD:
- return do_show(instance_name, current.backup_id, show_archive);
+ return do_show(catalogState, instanceState, current.backup_id, show_archive);
case DELETE_CMD:
if (delete_expired && backup_id_string)
@@ -843,26 +861,26 @@ main(int argc, char *argv[])
if (!backup_id_string)
{
if (delete_status)
- do_delete_status(&instance_config, delete_status);
+ do_delete_status(instanceState, &instance_config, delete_status);
else
- do_retention(no_validate, no_sync);
+ do_retention(instanceState, no_validate, no_sync);
}
else
- do_delete(current.backup_id);
+ do_delete(instanceState, current.backup_id);
break;
case MERGE_CMD:
- do_merge(current.backup_id, no_validate, no_sync);
+ do_merge(instanceState, current.backup_id, no_validate, no_sync);
break;
case SHOW_CONFIG_CMD:
do_show_config();
break;
case SET_CONFIG_CMD:
- do_set_config(false);
+ do_set_config(instanceState, false);
break;
case SET_BACKUP_CMD:
if (!backup_id_string)
elog(ERROR, "You must specify parameter (-i, --backup-id) for 'set-backup' command");
- do_set_backup(instance_name, current.backup_id, set_backup_params);
+ do_set_backup(instanceState, current.backup_id, set_backup_params);
break;
case CHECKDB_CMD:
do_checkdb(need_amcheck,
@@ -871,6 +889,13 @@ main(int argc, char *argv[])
case NO_CMD:
/* Should not happen */
elog(ERROR, "Unknown subcommand");
+ case SSH_CMD:
+ case AGENT_CMD:
+ /* Может перейти на использование какого-нибудь do_agent() для однобразия? */
+ case HELP_CMD:
+ case VERSION_CMD:
+ /* Silence compiler warnings, these already handled earlier */
+ break;
}
return 0;
@@ -933,13 +958,13 @@ opt_show_format(ConfigOption *opt, const char *arg)
* Initialize compress and sanity checks for compress.
*/
static void
-compress_init(void)
+compress_init(ProbackupSubcmd const subcmd)
{
/* Default algorithm is zlib */
if (compress_shortcut)
instance_config.compress_alg = ZLIB_COMPRESS;
- if (backup_subcmd != SET_CONFIG_CMD)
+ if (subcmd != SET_CONFIG_CMD)
{
if (instance_config.compress_level != COMPRESS_LEVEL_DEFAULT
&& instance_config.compress_alg == NOT_DEFINED_COMPRESS)
@@ -953,7 +978,7 @@ compress_init(void)
if (instance_config.compress_alg == ZLIB_COMPRESS && instance_config.compress_level == 0)
elog(WARNING, "Compression level 0 will lead to data bloat!");
- if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD)
+ if (subcmd == BACKUP_CMD || subcmd == ARCHIVE_PUSH_CMD)
{
#ifndef HAVE_LIBZ
if (instance_config.compress_alg == ZLIB_COMPRESS)
diff --git a/src/pg_probackup.h b/src/pg_probackup.h
index fca08bdac..1cad526dd 100644
--- a/src/pg_probackup.h
+++ b/src/pg_probackup.h
@@ -10,12 +10,14 @@
#ifndef PG_PROBACKUP_H
#define PG_PROBACKUP_H
+
#include "postgres_fe.h"
#include "libpq-fe.h"
#include "libpq-int.h"
#include "access/xlog_internal.h"
#include "utils/pg_crc.h"
+#include "catalog/pg_control.h"
#if PG_VERSION_NUM >= 120000
#include "common/logging.h"
@@ -39,12 +41,18 @@
#include "datapagemap.h"
#include "utils/thread.h"
+#include "pg_probackup_state.h"
+
+
#ifdef WIN32
#define __thread __declspec(thread)
#else
#include
#endif
+/* Wrap the code that we're going to delete after refactoring in this define*/
+#define REFACTORE_ME
+
/* pgut client variables and full path */
extern const char *PROGRAM_NAME;
extern const char *PROGRAM_NAME_FULL;
@@ -55,6 +63,7 @@ extern const char *PROGRAM_EMAIL;
/* Directory/File names */
#define DATABASE_DIR "database"
#define BACKUPS_DIR "backups"
+#define WAL_SUBDIR "wal"
#if PG_VERSION_NUM >= 100000
#define PG_XLOG_DIR "pg_wal"
#define PG_LOG_DIR "log"
@@ -116,6 +125,23 @@ extern const char *PROGRAM_EMAIL;
#define XRecOffIsNull(xlrp) \
((xlrp) % XLOG_BLCKSZ == 0)
+/* Text Coloring macro */
+#define TC_LEN 11
+#define TC_RED "\033[0;31m"
+#define TC_RED_BOLD "\033[1;31m"
+#define TC_BLUE "\033[0;34m"
+#define TC_BLUE_BOLD "\033[1;34m"
+#define TC_GREEN "\033[0;32m"
+#define TC_GREEN_BOLD "\033[1;32m"
+#define TC_YELLOW "\033[0;33m"
+#define TC_YELLOW_BOLD "\033[1;33m"
+#define TC_MAGENTA "\033[0;35m"
+#define TC_MAGENTA_BOLD "\033[1;35m"
+#define TC_CYAN "\033[0;36m"
+#define TC_CYAN_BOLD "\033[1;36m"
+#define TC_RESET "\033[0m"
+
+
typedef struct RedoParams
{
TimeLineID tli;
@@ -239,7 +265,6 @@ typedef struct pgFile
int segno; /* Segment number for ptrack */
int n_blocks; /* number of blocks in the data file in data directory */
bool is_cfs; /* Flag to distinguish files compressed by CFS*/
- bool is_database; /* Flag used strictly by ptrack 1.x backup */
int external_dir_num; /* Number of external directory. 0 if not external */
bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */
CompressAlg compress_alg; /* compression algorithm applied to the file */
@@ -308,14 +333,14 @@ typedef enum ShowFormat
#define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */
#define FILE_NOT_FOUND (-2) /* file disappeared during backup */
#define BLOCKNUM_INVALID (-1)
-#define PROGRAM_VERSION "2.4.16"
+#define PROGRAM_VERSION "2.5.0"
/* update when remote agent API or behaviour changes */
-#define AGENT_PROTOCOL_VERSION 20409
-#define AGENT_PROTOCOL_VERSION_STR "2.4.9"
+#define AGENT_PROTOCOL_VERSION 20500
+#define AGENT_PROTOCOL_VERSION_STR "2.5.0"
/* update only when changing storage format */
-#define STORAGE_FORMAT_VERSION "2.4.4"
+#define STORAGE_FORMAT_VERSION "2.5.0"
typedef struct ConnectionOptions
{
@@ -345,10 +370,6 @@ typedef struct ArchiveOptions
*/
typedef struct InstanceConfig
{
- char *name;
- char arclog_path[MAXPGPATH];
- char backup_instance_path[MAXPGPATH];
-
uint64 system_identifier;
uint32 xlog_seg_size;
@@ -358,7 +379,7 @@ typedef struct InstanceConfig
ConnectionOptions conn_opt;
ConnectionOptions master_conn_opt;
- uint32 replica_timeout;
+ uint32 replica_timeout; //Deprecated. Not used anywhere
/* Wait timeout for WAL segment archiving */
uint32 archive_timeout;
@@ -400,7 +421,7 @@ typedef struct PGNodeInfo
char server_version_str[100];
int ptrack_version_num;
- bool is_ptrack_enable;
+ bool is_ptrack_enabled;
const char *ptrack_schema; /* used only for ptrack 2.x */
} PGNodeInfo;
@@ -568,7 +589,6 @@ typedef struct
parray *external_dirs;
XLogRecPtr prev_start_lsn;
- ConnectionArgs conn_arg;
int thread_num;
HeaderMap *hdr_map;
@@ -579,7 +599,6 @@ typedef struct
int ret;
} backup_files_arg;
-
typedef struct timelineInfo timelineInfo;
/* struct to collect info about timelines in WAL archive */
@@ -658,22 +677,18 @@ typedef struct BackupPageHeader2
uint16 checksum;
} BackupPageHeader2;
+typedef struct StopBackupCallbackParams
+{
+ PGconn *conn;
+ int server_version;
+} StopBackupCallbackParams;
+
/* Special value for compressed_size field */
#define PageIsOk 0
#define SkipCurrentPage -1
#define PageIsTruncated -2
#define PageIsCorrupted -3 /* used by checkdb */
-
-/*
- * return pointer that exceeds the length of prefix from character string.
- * ex. str="/xxx/yyy/zzz", prefix="/xxx/yyy", return="zzz".
- *
- * Deprecated. Do not use this in new code.
- */
-#define GetRelativePath(str, prefix) \
- ((strlen(str) <= strlen(prefix)) ? "" : str + strlen(prefix) + 1)
-
/*
* Return timeline, xlog ID and record offset from an LSN of the type
* 0/B000188, usual result from pg_stop_backup() and friends.
@@ -748,16 +763,12 @@ typedef struct BackupPageHeader2
#define IsSshProtocol() (instance_config.remote.host && strcmp(instance_config.remote.proto, "ssh") == 0)
-/* directory options */
-extern char *backup_path;
-extern char backup_instance_path[MAXPGPATH];
-extern char arclog_path[MAXPGPATH];
-
/* common options */
extern pid_t my_pid;
extern __thread int my_thread_num;
extern int num_threads;
extern bool stream_wal;
+extern bool show_color;
extern bool progress;
extern bool is_archive_cmd; /* true for archive-{get,push} */
#if PG_VERSION_NUM >= 100000
@@ -780,11 +791,32 @@ extern bool delete_expired;
extern bool merge_expired;
extern bool dry_run;
-/* compression options */
-extern bool compress_shortcut;
+/* ===== instanceState ===== */
+
+typedef struct InstanceState
+{
+ /* catalog, this instance belongs to */
+ CatalogState *catalog_state;
+
+ char instance_name[MAXPGPATH]; //previously global var instance_name
+ /* $BACKUP_PATH/backups/instance_name */
+ char instance_backup_subdir_path[MAXPGPATH];
+
+ /* $BACKUP_PATH/backups/instance_name/BACKUP_CATALOG_CONF_FILE */
+ char instance_config_path[MAXPGPATH];
+
+ /* $BACKUP_PATH/backups/instance_name */
+ char instance_wal_subdir_path[MAXPGPATH]; // previously global var arclog_path
+
+ /* TODO: Make it more specific */
+ PGconn *conn;
-/* other options */
-extern char *instance_name;
+
+ //TODO split into some more meaningdul parts
+ InstanceConfig *config;
+} InstanceState;
+
+/* ===== instanceState (END) ===== */
/* show options */
extern ShowFormat show_format;
@@ -799,12 +831,8 @@ extern pgBackup current;
/* argv of the process */
extern char** commands_args;
-/* in dir.c */
-/* exclude directory list for $PGDATA file listing */
-extern const char *pgdata_exclude_dir[];
-
/* in backup.c */
-extern int do_backup(pgSetBackupParams *set_backup_params,
+extern int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params,
bool no_validate, bool no_sync, bool backup_logs);
extern void do_checkdb(bool need_amcheck, ConnectionOptions conn_opt,
char *pgdata);
@@ -813,16 +841,16 @@ extern const char *deparse_backup_mode(BackupMode mode);
extern void process_block_change(ForkNumber forknum, RelFileNode rnode,
BlockNumber blkno);
-extern char *pg_ptrack_get_block(ConnectionArgs *arguments,
- Oid dbOid, Oid tblsOid, Oid relOid,
- BlockNumber blknum, size_t *result_size,
- int ptrack_version_num, const char *ptrack_schema);
+/* in catchup.c */
+extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files);
+
/* in restore.c */
-extern int do_restore_or_validate(time_t target_backup_id,
+extern int do_restore_or_validate(InstanceState *instanceState,
+ time_t target_backup_id,
pgRecoveryTarget *rt,
pgRestoreParams *params,
bool no_sync);
-extern bool satisfy_timeline(const parray *timelines, const pgBackup *backup);
+extern bool satisfy_timeline(const parray *timelines, TimeLineID tli, XLogRecPtr lsn);
extern bool satisfy_recovery_target(const pgBackup *backup,
const pgRecoveryTarget *rt);
extern pgRecoveryTarget *parseRecoveryTargetOptions(
@@ -837,41 +865,46 @@ extern parray *get_dbOid_exclude_list(pgBackup *backup, parray *datname_list,
extern parray *get_backup_filelist(pgBackup *backup, bool strict);
extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict);
extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli);
+extern DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
+ IncrRestoreMode incremental_mode);
/* in merge.c */
-extern void do_merge(time_t backup_id, bool no_validate, bool no_sync);
+extern void do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool no_sync);
extern void merge_backups(pgBackup *backup, pgBackup *next_backup);
-extern void merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup,
+extern void merge_chain(InstanceState *instanceState, parray *parent_chain,
+ pgBackup *full_backup, pgBackup *dest_backup,
bool no_validate, bool no_sync);
extern parray *read_database_map(pgBackup *backup);
/* in init.c */
-extern int do_init(void);
-extern int do_add_instance(InstanceConfig *instance);
+extern int do_init(CatalogState *catalogState);
+extern int do_add_instance(InstanceState *instanceState, InstanceConfig *instance);
/* in archive.c */
-extern void do_archive_push(InstanceConfig *instance, char *wal_file_path,
+extern void do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wal_file_path,
char *wal_file_name, int batch_size, bool overwrite,
bool no_sync, bool no_ready_rename);
-extern void do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg, char *wal_file_path,
+extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const char *prefetch_dir_arg, char *wal_file_path,
char *wal_file_name, int batch_size, bool validate_wal);
/* in configure.c */
extern void do_show_config(void);
-extern void do_set_config(bool missing_ok);
+extern void do_set_config(InstanceState *instanceState, bool missing_ok);
extern void init_config(InstanceConfig *config, const char *instance_name);
-extern InstanceConfig *readInstanceConfigFile(const char *instance_name);
+extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState);
/* in show.c */
-extern int do_show(const char *instance_name, time_t requested_backup_id, bool show_archive);
+extern int do_show(CatalogState *catalogState, InstanceState *instanceState,
+ time_t requested_backup_id, bool show_archive);
/* in delete.c */
-extern void do_delete(time_t backup_id);
+extern void do_delete(InstanceState *instanceState, time_t backup_id);
extern void delete_backup_files(pgBackup *backup);
-extern void do_retention(bool no_validate, bool no_sync);
-extern int do_delete_instance(void);
-extern void do_delete_status(InstanceConfig *instance_config, const char *status);
+extern void do_retention(InstanceState *instanceState, bool no_validate, bool no_sync);
+extern int do_delete_instance(InstanceState *instanceState);
+extern void do_delete_status(InstanceState *instanceState,
+ InstanceConfig *instance_config, const char *status);
/* in fetch.c */
extern char *slurpFile(const char *datadir,
@@ -882,12 +915,13 @@ extern char *slurpFile(const char *datadir,
extern char *fetchFile(PGconn *conn, const char *filename, size_t *filesize);
/* in help.c */
+extern void help_print_version(void);
extern void help_pg_probackup(void);
-extern void help_command(char *command);
+extern void help_command(ProbackupSubcmd const subcmd);
/* in validate.c */
extern void pgBackupValidate(pgBackup* backup, pgRestoreParams *params);
-extern int do_validate_all(void);
+extern int do_validate_all(CatalogState *catalogState, InstanceState *instanceState);
extern int validate_one_page(Page page, BlockNumber absolute_blkno,
XLogRecPtr stop_lsn, PageState *page_st,
uint32 checksum_version);
@@ -908,14 +942,16 @@ extern parray* get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli
extern pgBackup *read_backup(const char *root_dir);
extern void write_backup(pgBackup *backup, bool strict);
extern void write_backup_status(pgBackup *backup, BackupStatus status,
- const char *instance_name, bool strict);
+ bool strict);
extern void write_backup_data_bytes(pgBackup *backup);
extern bool lock_backup(pgBackup *backup, bool strict, bool exclusive);
-extern const char *pgBackupGetBackupMode(pgBackup *backup);
+extern const char *pgBackupGetBackupMode(pgBackup *backup, bool show_color);
+extern void pgBackupGetBackupModeColor(pgBackup *backup, char *mode);
+
+extern parray *catalog_get_instance_list(CatalogState *catalogState);
-extern parray *catalog_get_instance_list(void);
-extern parray *catalog_get_backup_list(const char *instance_name, time_t requested_backup_id);
+extern parray *catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id);
extern void catalog_lock_backup_list(parray *backup_list, int from_idx,
int to_idx, bool strict, bool exclusive);
extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
@@ -926,8 +962,8 @@ extern pgBackup *get_multi_timeline_parent(parray *backup_list, parray *tli_list
InstanceConfig *instance);
extern timelineInfo *timelineInfoNew(TimeLineID tli);
extern void timelineInfoFree(void *tliInfo);
-extern parray *catalog_get_timelines(InstanceConfig *instance);
-extern void do_set_backup(const char *instance_name, time_t backup_id,
+extern parray *catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance);
+extern void do_set_backup(InstanceState *instanceState, time_t backup_id,
pgSetBackupParams *set_backup_params);
extern void pin_backup(pgBackup *target_backup,
pgSetBackupParams *set_backup_params);
@@ -936,13 +972,7 @@ extern void pgBackupWriteControl(FILE *out, pgBackup *backup, bool utc);
extern void write_backup_filelist(pgBackup *backup, parray *files,
const char *root, parray *external_list, bool sync);
-extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len,
- const char *subdir);
-extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
- const char *subdir1, const char *subdir2);
-extern void pgBackupGetPathInInstance(const char *instance_name,
- const pgBackup *backup, char *path, size_t len,
- const char *subdir1, const char *subdir2);
+
extern void pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path);
extern void pgNodeInit(PGNodeInfo *node);
extern void pgBackupInit(pgBackup *backup);
@@ -960,7 +990,6 @@ extern int scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup)
extern bool is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive);
extern bool is_prolific(parray *backup_list, pgBackup *target_backup);
-extern int get_backup_index_number(parray *backup_list, pgBackup *backup);
extern void append_children(parray *backup_list, pgBackup *target_backup, parray *append_list);
extern bool launch_agent(void);
extern void launch_ssh(char* argv[]);
@@ -973,10 +1002,13 @@ extern CompressAlg parse_compress_alg(const char *arg);
extern const char* deparse_compress_alg(int alg);
/* in dir.c */
+extern bool get_control_value(const char *str, const char *name,
+ char *value_str, int64 *value_int64, bool is_mandatory);
extern void dir_list_file(parray *files, const char *root, bool exclude,
bool follow_symlink, bool add_root, bool backup_logs,
bool skip_hidden, int external_dir_num, fio_location location);
+extern const char *get_tablespace_mapping(const char *dir);
extern void create_data_directories(parray *dest_files,
const char *data_dir,
const char *backup_dir,
@@ -998,8 +1030,6 @@ extern void db_map_entry_free(void *map);
extern void print_file_list(FILE *out, const parray *files, const char *root,
const char *external_prefix, parray *external_list);
-extern parray *dir_read_file_list(const char *root, const char *external_prefix,
- const char *file_txt, fio_location location, pg_crc32 expected_crc);
extern parray *make_external_directory_list(const char *colon_separated_dirs,
bool remap);
extern void free_dir_list(parray *list);
@@ -1031,18 +1061,25 @@ extern int pgFileCompareRelPathWithExternal(const void *f1, const void *f2);
extern int pgFileCompareRelPathWithExternalDesc(const void *f1, const void *f2);
extern int pgFileCompareLinked(const void *f1, const void *f2);
extern int pgFileCompareSize(const void *f1, const void *f2);
+extern int pgFileCompareSizeDesc(const void *f1, const void *f2);
extern int pgCompareOid(const void *f1, const void *f2);
+extern void pfilearray_clear_locks(parray *file_list);
/* in data.c */
extern bool check_data_file(ConnectionArgs *arguments, pgFile *file,
const char *from_fullpath, uint32 checksum_version);
-extern void backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
- const char *from_fullpath, const char *to_fullpath,
+
+extern void catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
CompressAlg calg, int clevel, uint32 checksum_version,
int ptrack_version_num, const char *ptrack_schema,
- HeaderMap *hdr_map, bool missing_ok);
+ bool is_merge, size_t prev_size);
+extern void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
+ XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
+ CompressAlg calg, int clevel, uint32 checksum_version,
+ int ptrack_version_num, const char *ptrack_schema,
+ HeaderMap *hdr_map, bool missing_ok);
extern void backup_non_data_file(pgFile *file, pgFile *prev_file,
const char *from_fullpath, const char *to_fullpath,
BackupMode backup_mode, time_t parent_backup_time,
@@ -1106,14 +1143,15 @@ extern XLogRecPtr get_next_record_lsn(const char *archivedir, XLogSegNo segno, T
/* in util.c */
extern TimeLineID get_current_timeline(PGconn *conn);
-extern TimeLineID get_current_timeline_from_control(bool safe);
+extern TimeLineID get_current_timeline_from_control(const char *pgdata_path, fio_location location, bool safe);
extern XLogRecPtr get_checkpoint_location(PGconn *conn);
-extern uint64 get_system_identifier(const char *pgdata_path);
+extern uint64 get_system_identifier(const char *pgdata_path, fio_location location);
extern uint64 get_remote_system_identifier(PGconn *conn);
extern uint32 get_data_checksum_version(bool safe);
extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path);
-extern uint32 get_xlog_seg_size(char *pgdata_path);
-extern void get_redo(const char *pgdata_path, RedoParams *redo);
+extern DBState get_system_dbstate(const char *pgdata_path, fio_location location);
+extern uint32 get_xlog_seg_size(const char *pgdata_path);
+extern void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo);
extern void set_min_recovery_point(pgFile *file, const char *backup_path,
XLogRecPtr stop_backup_lsn);
extern void copy_pgcontrol_file(const char *from_fullpath, fio_location from_location,
@@ -1121,6 +1159,7 @@ extern void copy_pgcontrol_file(const char *from_fullpath, fio_location from_loc
extern void time2iso(char *buf, size_t len, time_t time, bool utc);
extern const char *status2str(BackupStatus status);
+const char *status2str_color(BackupStatus status);
extern BackupStatus str2status(const char *status);
extern const char *base36enc(long unsigned int value);
extern char *base36enc_dup(long unsigned int value);
@@ -1137,24 +1176,17 @@ extern void pretty_size(int64 size, char *buf, size_t len);
extern void pretty_time_interval(double time, char *buf, size_t len);
extern PGconn *pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo);
-extern void check_system_identifiers(PGconn *conn, char *pgdata);
+extern void check_system_identifiers(PGconn *conn, const char *pgdata);
extern void parse_filelist_filenames(parray *files, const char *root);
/* in ptrack.c */
-extern void make_pagemap_from_ptrack_1(parray* files, PGconn* backup_conn);
extern void make_pagemap_from_ptrack_2(parray* files, PGconn* backup_conn,
const char *ptrack_schema,
int ptrack_version_num,
XLogRecPtr lsn);
-extern void pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num);
extern void get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo);
-extern bool pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num);
-extern bool pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn);
-extern char *pg_ptrack_get_and_clear(Oid tablespace_oid,
- Oid db_oid,
- Oid rel_oid,
- size_t *result_size,
- PGconn *backup_conn);
+extern bool pg_is_ptrack_enabled(PGconn *backup_conn, int ptrack_version_num);
+
extern XLogRecPtr get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo);
extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema,
int ptrack_version_num, XLogRecPtr lsn);
@@ -1162,17 +1194,26 @@ extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack
/* open local file to writing */
extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size);
-extern int send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_fullpath,
+extern int send_pages(const char *to_fullpath, const char *from_fullpath,
pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel,
uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers,
BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema);
+extern int copy_pages(const char *to_fullpath, const char *from_fullpath,
+ pgFile *file, XLogRecPtr prev_backup_start_lsn,
+ uint32 checksum_version, bool use_pagemap,
+ BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema);
/* FIO */
+extern void setMyLocation(ProbackupSubcmd const subcmd);
extern void fio_delete(mode_t mode, const char *fullpath, fio_location location);
extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version,
bool use_pagemap, BlockNumber *err_blknum, char **errormsg,
BackupPageHeader2 **headers);
+extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
+ XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version,
+ bool use_pagemap, BlockNumber *err_blknum, char **errormsg,
+ BackupPageHeader2 **headers);
/* return codes for fio_send_pages */
extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg);
extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out,
@@ -1226,4 +1267,44 @@ extern void start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path,
ConnectionOptions *conn_opt,
XLogRecPtr startpos, TimeLineID starttli);
extern int wait_WAL_streaming_end(parray *backup_files_list);
+extern parray* parse_tli_history_buffer(char *history, TimeLineID tli);
+
+/* external variables and functions, implemented in backup.c */
+typedef struct PGStopBackupResult
+{
+ /*
+ * We will use values of snapshot_xid and invocation_time if there are
+ * no transactions between start_lsn and stop_lsn.
+ */
+ TransactionId snapshot_xid;
+ time_t invocation_time;
+ /*
+ * Fields that store pg_catalog.pg_stop_backup() result
+ */
+ XLogRecPtr lsn;
+ size_t backup_label_content_len;
+ char *backup_label_content;
+ size_t tablespace_map_content_len;
+ char *tablespace_map_content;
+} PGStopBackupResult;
+
+extern bool backup_in_progress;
+extern parray *backup_files_list;
+
+extern void pg_start_backup(const char *label, bool smooth, pgBackup *backup,
+ PGNodeInfo *nodeInfo, PGconn *conn);
+extern void pg_silent_client_messages(PGconn *conn);
+extern void pg_create_restore_point(PGconn *conn, time_t backup_start_time);
+extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text);
+extern void pg_stop_backup_consume(PGconn *conn, int server_version,
+ bool is_exclusive, uint32 timeout, const char *query_text,
+ PGStopBackupResult *result);
+extern void pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename,
+ const void *data, size_t len, parray *file_list);
+extern XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli,
+ bool in_prev_segment, bool segment_only,
+ int timeout_elevel, bool in_stream_dir);
+extern void wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup);
+extern int64 calculate_datasize_of_filelist(parray *filelist);
+
#endif /* PG_PROBACKUP_H */
diff --git a/src/pg_probackup_state.h b/src/pg_probackup_state.h
new file mode 100644
index 000000000..56d852537
--- /dev/null
+++ b/src/pg_probackup_state.h
@@ -0,0 +1,31 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_probackup_state.h: Definitions of internal pg_probackup states
+ *
+ * Portions Copyright (c) 2021, Postgres Professional
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PG_PROBACKUP_STATE_H
+#define PG_PROBACKUP_STATE_H
+
+/* ====== CatalogState ======= */
+
+typedef struct CatalogState
+{
+ /* $BACKUP_PATH */
+ char catalog_path[MAXPGPATH]; //previously global var backup_path
+ /* $BACKUP_PATH/backups */
+ char backup_subdir_path[MAXPGPATH];
+ /* $BACKUP_PATH/wal */
+ char wal_subdir_path[MAXPGPATH]; // previously global var arclog_path
+} CatalogState;
+
+/* ====== CatalogState (END) ======= */
+
+
+/* ===== instanceState ===== */
+
+/* ===== instanceState (END) ===== */
+
+#endif /* PG_PROBACKUP_STATE_H */
diff --git a/src/ptrack.c b/src/ptrack.c
index 5a2b9f046..c631d7386 100644
--- a/src/ptrack.c
+++ b/src/ptrack.c
@@ -2,7 +2,7 @@
*
* ptrack.c: support functions for ptrack backups
*
- * Copyright (c) 2019 Postgres Professional
+ * Copyright (c) 2021 Postgres Professional
*
*-------------------------------------------------------------------------
*/
@@ -21,124 +21,6 @@
#define PTRACK_BITS_PER_HEAPBLOCK 1
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
-/*
- * Given a list of files in the instance to backup, build a pagemap for each
- * data file that has ptrack. Result is saved in the pagemap field of pgFile.
- * NOTE we rely on the fact that provided parray is sorted by file->rel_path.
- */
-void
-make_pagemap_from_ptrack_1(parray *files, PGconn *backup_conn)
-{
- size_t i;
- Oid dbOid_with_ptrack_init = 0;
- Oid tblspcOid_with_ptrack_init = 0;
- char *ptrack_nonparsed = NULL;
- size_t ptrack_nonparsed_size = 0;
-
- for (i = 0; i < parray_num(files); i++)
- {
- pgFile *file = (pgFile *) parray_get(files, i);
- size_t start_addr;
-
- /*
- * If there is a ptrack_init file in the database,
- * we must backup all its files, ignoring ptrack files for relations.
- */
- if (file->is_database)
- {
- /*
- * The function pg_ptrack_get_and_clear_db returns true
- * if there was a ptrack_init file.
- * Also ignore ptrack files for global tablespace,
- * to avoid any possible specific errors.
- */
- if ((file->tblspcOid == GLOBALTABLESPACE_OID) ||
- pg_ptrack_get_and_clear_db(file->dbOid, file->tblspcOid, backup_conn))
- {
- dbOid_with_ptrack_init = file->dbOid;
- tblspcOid_with_ptrack_init = file->tblspcOid;
- }
- }
-
- if (file->is_datafile)
- {
- if (file->tblspcOid == tblspcOid_with_ptrack_init &&
- file->dbOid == dbOid_with_ptrack_init)
- {
- /* ignore ptrack if ptrack_init exists */
- elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->rel_path);
- file->pagemap_isabsent = true;
- continue;
- }
-
- /* get ptrack bitmap once for all segments of the file */
- if (file->segno == 0)
- {
- /* release previous value */
- pg_free(ptrack_nonparsed);
- ptrack_nonparsed_size = 0;
-
- ptrack_nonparsed = pg_ptrack_get_and_clear(file->tblspcOid, file->dbOid,
- file->relOid, &ptrack_nonparsed_size, backup_conn);
- }
-
- if (ptrack_nonparsed != NULL)
- {
- /*
- * pg_ptrack_get_and_clear() returns ptrack with VARHDR cut out.
- * Compute the beginning of the ptrack map related to this segment
- *
- * HEAPBLOCKS_PER_BYTE. Number of heap pages one ptrack byte can track: 8
- * RELSEG_SIZE. Number of Pages per segment: 131072
- * RELSEG_SIZE/HEAPBLOCKS_PER_BYTE. number of bytes in ptrack file needed
- * to keep track on one relsegment: 16384
- */
- start_addr = (RELSEG_SIZE/HEAPBLOCKS_PER_BYTE)*file->segno;
-
- /*
- * If file segment was created after we have read ptrack,
- * we won't have a bitmap for this segment.
- */
- if (start_addr > ptrack_nonparsed_size)
- {
- elog(VERBOSE, "Ptrack is missing for file: %s", file->rel_path);
- file->pagemap_isabsent = true;
- }
- else
- {
-
- if (start_addr + RELSEG_SIZE/HEAPBLOCKS_PER_BYTE > ptrack_nonparsed_size)
- {
- file->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr;
- elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
- }
- else
- {
- file->pagemap.bitmapsize = RELSEG_SIZE/HEAPBLOCKS_PER_BYTE;
- elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
- }
-
- file->pagemap.bitmap = pg_malloc(file->pagemap.bitmapsize);
- memcpy(file->pagemap.bitmap, ptrack_nonparsed+start_addr, file->pagemap.bitmapsize);
- }
- }
- else
- {
- /*
- * If ptrack file is missing, try to copy the entire file.
- * It can happen in two cases:
- * - files were created by commands that bypass buffer manager
- * and, correspondingly, ptrack mechanism.
- * i.e. CREATE DATABASE
- * - target relation was deleted.
- */
- elog(VERBOSE, "Ptrack is missing for file: %s", file->rel_path);
- file->pagemap_isabsent = true;
- }
- }
- }
-}
-
/*
* Parse a string like "2.1" into int
* result: int by formula major_number * 100 + minor_number
@@ -218,7 +100,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo)
nodeInfo->ptrack_version_num = ptrack_version_num;
/* ptrack 1.X is buggy, so fall back to DELTA backup strategy for safety */
- if (nodeInfo->ptrack_version_num >= 105 && nodeInfo->ptrack_version_num < 200)
+ if (nodeInfo->ptrack_version_num < 200)
{
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
{
@@ -236,17 +118,12 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo)
* Check if ptrack is enabled in target instance
*/
bool
-pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num)
+pg_is_ptrack_enabled(PGconn *backup_conn, int ptrack_version_num)
{
PGresult *res_db;
bool result = false;
- if (ptrack_version_num < 200)
- {
- res_db = pgut_execute(backup_conn, "SHOW ptrack_enable", 0, NULL);
- result = strcmp(PQgetvalue(res_db, 0, 0), "on") == 0;
- }
- else if (ptrack_version_num == 200)
+ if (ptrack_version_num == 200)
{
res_db = pgut_execute(backup_conn, "SHOW ptrack_map_size", 0, NULL);
result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0;
@@ -262,214 +139,6 @@ pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num)
return result;
}
-
-/* ----------------------------
- * Ptrack 1.* support functions
- * ----------------------------
- */
-
-/* Clear ptrack files in all databases of the instance we connected to */
-void
-pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num)
-{
- PGresult *res_db,
- *res;
- const char *dbname;
- int i;
- Oid dbOid, tblspcOid;
- char *params[2];
-
- // FIXME Perform this check on caller's side
- if (ptrack_version_num >= 200)
- return;
-
- params[0] = palloc(64);
- params[1] = palloc(64);
- res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_database",
- 0, NULL);
-
- for(i = 0; i < PQntuples(res_db); i++)
- {
- PGconn *tmp_conn;
-
- dbname = PQgetvalue(res_db, i, 0);
- if (strcmp(dbname, "template0") == 0)
- continue;
-
- dbOid = atoll(PQgetvalue(res_db, i, 1));
- tblspcOid = atoll(PQgetvalue(res_db, i, 2));
-
- tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport,
- dbname,
- instance_config.conn_opt.pguser);
-
- res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()",
- 0, NULL);
- PQclear(res);
-
- sprintf(params[0], "%i", dbOid);
- sprintf(params[1], "%i", tblspcOid);
- res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
- 2, (const char **)params);
- PQclear(res);
-
- pgut_disconnect(tmp_conn);
- }
-
- pfree(params[0]);
- pfree(params[1]);
- PQclear(res_db);
-}
-
-bool
-pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn)
-{
- char *params[2];
- char *dbname;
- PGresult *res_db;
- PGresult *res;
- bool result;
-
- params[0] = palloc(64);
- params[1] = palloc(64);
-
- sprintf(params[0], "%i", dbOid);
- res_db = pgut_execute(backup_conn,
- "SELECT datname FROM pg_database WHERE oid=$1",
- 1, (const char **) params);
- /*
- * If database is not found, it's not an error.
- * It could have been deleted since previous backup.
- */
- if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
- return false;
-
- dbname = PQgetvalue(res_db, 0, 0);
-
- /* Always backup all files from template0 database */
- if (strcmp(dbname, "template0") == 0)
- {
- PQclear(res_db);
- return true;
- }
- PQclear(res_db);
-
- sprintf(params[0], "%i", dbOid);
- sprintf(params[1], "%i", tblspcOid);
- res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
- 2, (const char **)params);
-
- if (PQnfields(res) != 1)
- elog(ERROR, "cannot perform pg_ptrack_get_and_clear_db()");
-
- if (!parse_bool(PQgetvalue(res, 0, 0), &result))
- elog(ERROR,
- "result of pg_ptrack_get_and_clear_db() is invalid: %s",
- PQgetvalue(res, 0, 0));
-
- PQclear(res);
- pfree(params[0]);
- pfree(params[1]);
-
- return result;
-}
-
-/* Read and clear ptrack files of the target relation.
- * Result is a bytea ptrack map of all segments of the target relation.
- * case 1: we know a tablespace_oid, db_oid, and rel_filenode
- * case 2: we know db_oid and rel_filenode (no tablespace_oid, because file in pg_default)
- * case 3: we know only rel_filenode (because file in pg_global)
- */
-char *
-pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
- size_t *result_size, PGconn *backup_conn)
-{
- PGconn *tmp_conn;
- PGresult *res_db,
- *res;
- char *params[2];
- char *result;
- char *val;
-
- params[0] = palloc(64);
- params[1] = palloc(64);
-
- /* regular file (not in directory 'global') */
- if (db_oid != 0)
- {
- char *dbname;
-
- sprintf(params[0], "%i", db_oid);
- res_db = pgut_execute(backup_conn,
- "SELECT datname FROM pg_database WHERE oid=$1",
- 1, (const char **) params);
- /*
- * If database is not found, it's not an error.
- * It could have been deleted since previous backup.
- */
- if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
- return NULL;
-
- dbname = PQgetvalue(res_db, 0, 0);
-
- if (strcmp(dbname, "template0") == 0)
- {
- PQclear(res_db);
- return NULL;
- }
-
- tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport,
- dbname,
- instance_config.conn_opt.pguser);
- sprintf(params[0], "%i", tablespace_oid);
- sprintf(params[1], "%i", rel_filenode);
- res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
- 2, (const char **)params);
-
- if (PQnfields(res) != 1)
- elog(ERROR, "cannot get ptrack file from database \"%s\" by tablespace oid %u and relation oid %u",
- dbname, tablespace_oid, rel_filenode);
- PQclear(res_db);
- pgut_disconnect(tmp_conn);
- }
- /* file in directory 'global' */
- else
- {
- /*
- * execute ptrack_get_and_clear for relation in pg_global
- * Use backup_conn, cause we can do it from any database.
- */
- sprintf(params[0], "%i", tablespace_oid);
- sprintf(params[1], "%i", rel_filenode);
- res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
- 2, (const char **)params);
-
- if (PQnfields(res) != 1)
- elog(ERROR, "cannot get ptrack file from pg_global tablespace and relation oid %u",
- rel_filenode);
- }
-
- val = PQgetvalue(res, 0, 0);
-
- /* TODO Now pg_ptrack_get_and_clear() returns bytea ending with \x.
- * It should be fixed in future ptrack releases, but till then we
- * can parse it.
- */
- if (strcmp("x", val+1) == 0)
- {
- /* Ptrack file is missing */
- return NULL;
- }
-
- result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
- result_size);
- PQclear(res);
- pfree(params[0]);
- pfree(params[1]);
-
- return result;
-}
-
/*
* Get lsn of the moment when ptrack was enabled the last time.
*/
@@ -482,20 +151,14 @@ get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo)
uint32 lsn_lo;
XLogRecPtr lsn;
- if (nodeInfo->ptrack_version_num < 200)
- res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_control_lsn()",
- 0, NULL);
- else
- {
- char query[128];
+ char query[128];
- if (nodeInfo->ptrack_version_num == 200)
- sprintf(query, "SELECT %s.pg_ptrack_control_lsn()", nodeInfo->ptrack_schema);
- else
- sprintf(query, "SELECT %s.ptrack_init_lsn()", nodeInfo->ptrack_schema);
+ if (nodeInfo->ptrack_version_num == 200)
+ sprintf(query, "SELECT %s.pg_ptrack_control_lsn()", nodeInfo->ptrack_schema);
+ else
+ sprintf(query, "SELECT %s.ptrack_init_lsn()", nodeInfo->ptrack_schema);
- res = pgut_execute(backup_conn, query, 0, NULL);
- }
+ res = pgut_execute(backup_conn, query, 0, NULL);
/* Extract timeline and LSN from results of pg_start_backup() */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
@@ -506,99 +169,6 @@ get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo)
return lsn;
}
-char *
-pg_ptrack_get_block(ConnectionArgs *arguments,
- Oid dbOid,
- Oid tblsOid,
- Oid relOid,
- BlockNumber blknum,
- size_t *result_size,
- int ptrack_version_num,
- const char *ptrack_schema)
-{
- PGresult *res;
- char *params[4];
- char *result;
-
- params[0] = palloc(64);
- params[1] = palloc(64);
- params[2] = palloc(64);
- params[3] = palloc(64);
-
- /*
- * Use tmp_conn, since we may work in parallel threads.
- * We can connect to any database.
- */
- sprintf(params[0], "%i", tblsOid);
- sprintf(params[1], "%i", dbOid);
- sprintf(params[2], "%i", relOid);
- sprintf(params[3], "%u", blknum);
-
- if (arguments->conn == NULL)
- {
- arguments->conn = pgut_connect(instance_config.conn_opt.pghost,
- instance_config.conn_opt.pgport,
- instance_config.conn_opt.pgdatabase,
- instance_config.conn_opt.pguser);
- }
-
- if (arguments->cancel_conn == NULL)
- arguments->cancel_conn = PQgetCancel(arguments->conn);
-
- // elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum);
-
- if (ptrack_version_num < 200)
- res = pgut_execute_parallel(arguments->conn,
- arguments->cancel_conn,
- "SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)",
- 4, (const char **)params, true, false, false);
- else
- {
- char query[128];
-
- /* sanity */
- if (!ptrack_schema)
- elog(ERROR, "Schema name of ptrack extension is missing");
-
- if (ptrack_version_num == 200)
- sprintf(query, "SELECT %s.pg_ptrack_get_block($1, $2, $3, $4)", ptrack_schema);
- else
- elog(ERROR, "ptrack >= 2.1.0 does not support pg_ptrack_get_block()");
- // sprintf(query, "SELECT %s.ptrack_get_block($1, $2, $3, $4)", ptrack_schema);
-
- res = pgut_execute_parallel(arguments->conn,
- arguments->cancel_conn,
- query, 4, (const char **)params,
- true, false, false);
- }
-
- if (PQnfields(res) != 1)
- {
- elog(VERBOSE, "cannot get file block for relation oid %u",
- relOid);
- return NULL;
- }
-
- if (PQgetisnull(res, 0, 0))
- {
- elog(VERBOSE, "cannot get file block for relation oid %u",
- relOid);
- return NULL;
- }
-
- result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
- result_size);
-
- PQclear(res);
-
- pfree(params[0]);
- pfree(params[1]);
- pfree(params[2]);
- pfree(params[3]);
-
- return result;
-}
-
/* ----------------------------
* Ptrack 2.* support functions
* ----------------------------
diff --git a/src/restore.c b/src/restore.c
index 9c0b059e9..005984aed 100644
--- a/src/restore.c
+++ b/src/restore.c
@@ -41,22 +41,22 @@ typedef struct
static void
-print_recovery_settings(FILE *fp, pgBackup *backup,
+print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup,
pgRestoreParams *params, pgRecoveryTarget *rt);
static void
print_standby_settings_common(FILE *fp, pgBackup *backup, pgRestoreParams *params);
#if PG_VERSION_NUM >= 120000
static void
-update_recovery_options(pgBackup *backup,
+update_recovery_options(InstanceState *instanceState, pgBackup *backup,
pgRestoreParams *params, pgRecoveryTarget *rt);
#else
static void
-update_recovery_options_before_v12(pgBackup *backup,
+update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backup,
pgRestoreParams *params, pgRecoveryTarget *rt);
#endif
-static void create_recovery_conf(time_t backup_id,
+static void create_recovery_conf(InstanceState *instanceState, time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup,
pgRestoreParams *params);
@@ -67,8 +67,6 @@ static void restore_chain(pgBackup *dest_backup, parray *parent_chain,
parray *dbOid_exclude_list, pgRestoreParams *params,
const char *pgdata_path, bool no_sync, bool cleanup_pgdata,
bool backup_has_tblspc);
-static DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
- IncrRestoreMode incremental_mode);
/*
* Iterate over backup list to find all ancestors of the broken parent_backup
@@ -94,7 +92,7 @@ set_orphan_status(parray *backups, pgBackup *parent_backup)
if (backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE)
{
- write_backup_status(backup, BACKUP_STATUS_ORPHAN, instance_name, true);
+ write_backup_status(backup, BACKUP_STATUS_ORPHAN, true);
elog(WARNING,
"Backup %s is orphaned because his parent %s has status: %s",
@@ -117,7 +115,7 @@ set_orphan_status(parray *backups, pgBackup *parent_backup)
* Entry point of pg_probackup RESTORE and VALIDATE subcommands.
*/
int
-do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
+do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pgRecoveryTarget *rt,
pgRestoreParams *params, bool no_sync)
{
int i = 0;
@@ -136,7 +134,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
bool backup_has_tblspc = true; /* backup contain tablespace */
XLogRecPtr shift_lsn = InvalidXLogRecPtr;
- if (instance_name == NULL)
+ if (instanceState == NULL)
elog(ERROR, "required parameter not specified: --instance");
if (params->is_restore)
@@ -216,7 +214,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
elog(LOG, "%s begin.", action);
/* Get list of all backups sorted in order of descending start time */
- backups = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID);
+ backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
/* Find backup range we should restore or validate. */
while ((i < parray_num(backups)) && !dest_backup)
@@ -287,12 +285,13 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
// elog(LOG, "target timeline ID = %u", rt->target_tli);
/* Read timeline history files from archives */
- timelines = read_timeline_history(arclog_path, rt->target_tli, true);
+ timelines = read_timeline_history(instanceState->instance_wal_subdir_path,
+ rt->target_tli, true);
if (!timelines)
elog(ERROR, "Failed to get history file for target timeline %i", rt->target_tli);
- if (!satisfy_timeline(timelines, current_backup))
+ if (!satisfy_timeline(timelines, current_backup->tli, current_backup->stop_lsn))
{
if (target_backup_id != INVALID_BACKUP_ID)
elog(ERROR, "target backup %s does not satisfy target timeline",
@@ -367,7 +366,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if (backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE)
{
- write_backup_status(backup, BACKUP_STATUS_ORPHAN, instance_name, true);
+ write_backup_status(backup, BACKUP_STATUS_ORPHAN, true);
elog(WARNING, "Backup %s is orphaned because his parent %s is missing",
base36enc(backup->start_time), missing_backup_id);
@@ -486,13 +485,14 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
{
RedoParams redo;
parray *timelines = NULL;
- get_redo(instance_config.pgdata, &redo);
+ get_redo(instance_config.pgdata, FIO_DB_HOST, &redo);
if (redo.checksum_version == 0)
elog(ERROR, "Incremental restore in 'lsn' mode require "
"data_checksums to be enabled in destination data directory");
- timelines = read_timeline_history(arclog_path, redo.tli, false);
+ timelines = read_timeline_history(instanceState->instance_wal_subdir_path,
+ redo.tli, false);
if (!timelines)
elog(WARNING, "Failed to get history for redo timeline %i, "
@@ -607,7 +607,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
* We pass base_full_backup timeline as last argument to this function,
* because it's needed to form the name of xlog file.
*/
- validate_wal(dest_backup, arclog_path, rt->target_time,
+ validate_wal(dest_backup, instanceState->instance_wal_subdir_path, rt->target_time,
rt->target_xid, rt->target_lsn,
dest_backup->tli, instance_config.xlog_seg_size);
}
@@ -676,7 +676,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
//TODO rename and update comment
/* Create recovery.conf with given recovery target parameters */
- create_recovery_conf(target_backup_id, rt, dest_backup, params);
+ create_recovery_conf(instanceState, target_backup_id, rt, dest_backup, params);
}
/* ssh connection to longer needed */
@@ -822,7 +822,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
}
/*
- * Setup directory structure for external directories and file locks
+ * Setup directory structure for external directories
*/
for (i = 0; i < parray_num(dest_files); i++)
{
@@ -846,11 +846,11 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
elog(VERBOSE, "Create external directory \"%s\"", dirpath);
fio_mkdir(dirpath, file->mode, FIO_DB_HOST);
}
-
- /* setup threads */
- pg_atomic_clear_flag(&file->lock);
}
+ /* setup threads */
+ pfilearray_clear_locks(dest_files);
+
/* Get list of files in destination directory and remove redundant files */
if (params->incremental_mode != INCR_NONE || cleanup_pgdata)
{
@@ -1306,7 +1306,7 @@ restore_files(void *arg)
* with given recovery target parameters
*/
static void
-create_recovery_conf(time_t backup_id,
+create_recovery_conf(InstanceState *instanceState, time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup,
pgRestoreParams *params)
@@ -1353,16 +1353,16 @@ create_recovery_conf(time_t backup_id,
elog(LOG, "----------------------------------------");
#if PG_VERSION_NUM >= 120000
- update_recovery_options(backup, params, rt);
+ update_recovery_options(instanceState, backup, params, rt);
#else
- update_recovery_options_before_v12(backup, params, rt);
+ update_recovery_options_before_v12(instanceState, backup, params, rt);
#endif
}
-/* TODO get rid of using global variables: instance_config, backup_path, instance_name */
+/* TODO get rid of using global variables: instance_config */
static void
-print_recovery_settings(FILE *fp, pgBackup *backup,
+print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup,
pgRestoreParams *params, pgRecoveryTarget *rt)
{
char restore_command_guc[16384];
@@ -1378,7 +1378,8 @@ print_recovery_settings(FILE *fp, pgBackup *backup,
sprintf(restore_command_guc, "\"%s\" archive-get -B \"%s\" --instance \"%s\" "
"--wal-file-path=%%p --wal-file-name=%%f",
PROGRAM_FULL_PATH ? PROGRAM_FULL_PATH : PROGRAM_NAME,
- backup_path, instance_name);
+ /* TODO What is going on here? Why do we use catalog path as wal-file-path? */
+ instanceState->catalog_state->catalog_path, instanceState->instance_name);
/* append --remote-* parameters provided via --archive-* settings */
if (instance_config.archive.host)
@@ -1463,7 +1464,7 @@ print_standby_settings_common(FILE *fp, pgBackup *backup, pgRestoreParams *param
#if PG_VERSION_NUM < 120000
static void
-update_recovery_options_before_v12(pgBackup *backup,
+update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backup,
pgRestoreParams *params, pgRecoveryTarget *rt)
{
FILE *fp;
@@ -1494,7 +1495,7 @@ update_recovery_options_before_v12(pgBackup *backup,
PROGRAM_VERSION);
if (params->recovery_settings_mode == PITR_REQUESTED)
- print_recovery_settings(fp, backup, params, rt);
+ print_recovery_settings(instanceState, fp, backup, params, rt);
if (params->restore_as_replica)
{
@@ -1516,7 +1517,7 @@ update_recovery_options_before_v12(pgBackup *backup,
*/
#if PG_VERSION_NUM >= 120000
static void
-update_recovery_options(pgBackup *backup,
+update_recovery_options(InstanceState *instanceState, pgBackup *backup,
pgRestoreParams *params, pgRecoveryTarget *rt)
{
@@ -1624,7 +1625,7 @@ update_recovery_options(pgBackup *backup,
base36enc(backup->start_time), current_time_str);
if (params->recovery_settings_mode == PITR_REQUESTED)
- print_recovery_settings(fp, backup, params, rt);
+ print_recovery_settings(instanceState, fp, backup, params, rt);
if (params->restore_as_replica)
print_standby_settings_common(fp, backup, params);
@@ -1816,7 +1817,7 @@ satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt)
/* TODO description */
bool
-satisfy_timeline(const parray *timelines, const pgBackup *backup)
+satisfy_timeline(const parray *timelines, TimeLineID tli, XLogRecPtr lsn)
{
int i;
@@ -1825,9 +1826,9 @@ satisfy_timeline(const parray *timelines, const pgBackup *backup)
TimeLineHistoryEntry *timeline;
timeline = (TimeLineHistoryEntry *) parray_get(timelines, i);
- if (backup->tli == timeline->tli &&
+ if (tli == timeline->tli &&
(XLogRecPtrIsInvalid(timeline->end) ||
- backup->stop_lsn <= timeline->end))
+ lsn <= timeline->end))
return true;
}
return false;
@@ -2183,9 +2184,9 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
* data files content, because based on pg_control information we will
* choose a backup suitable for lsn based incremental restore.
*/
- elog(INFO, "Trying to read pg_control file in destination direstory");
+ elog(INFO, "Trying to read pg_control file in destination directory");
- system_id_pgdata = get_system_identifier(pgdata);
+ system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST);
if (system_id_pgdata == instance_config.system_identifier)
system_id_match = true;
diff --git a/src/show.c b/src/show.c
index 496c9d833..22c40cf43 100644
--- a/src/show.c
+++ b/src/show.c
@@ -54,14 +54,14 @@ typedef struct ShowArchiveRow
static void show_instance_start(void);
static void show_instance_end(void);
-static void show_instance(const char *instance_name, time_t requested_backup_id, bool show_name);
+static void show_instance(InstanceState *instanceState, time_t requested_backup_id, bool show_name);
static void print_backup_json_object(PQExpBuffer buf, pgBackup *backup);
-static int show_backup(const char *instance_name, time_t requested_backup_id);
+static int show_backup(InstanceState *instanceState, time_t requested_backup_id);
static void show_instance_plain(const char *instance_name, parray *backup_list, bool show_name);
static void show_instance_json(const char *instance_name, parray *backup_list);
-static void show_instance_archive(InstanceConfig *instance);
+static void show_instance_archive(InstanceState *instanceState, InstanceConfig *instance);
static void show_archive_plain(const char *instance_name, uint32 xlog_seg_size,
parray *timelines_list, bool show_name);
static void show_archive_json(const char *instance_name, uint32 xlog_seg_size,
@@ -75,11 +75,12 @@ static int32 json_level = 0;
* Entry point of pg_probackup SHOW subcommand.
*/
int
-do_show(const char *instance_name, time_t requested_backup_id, bool show_archive)
+do_show(CatalogState *catalogState, InstanceState *instanceState,
+ time_t requested_backup_id, bool show_archive)
{
int i;
- if (instance_name == NULL &&
+ if (instanceState == NULL &&
requested_backup_id != INVALID_BACKUP_ID)
elog(ERROR, "You must specify --instance to use (-i, --backup-id) option");
@@ -88,28 +89,25 @@ do_show(const char *instance_name, time_t requested_backup_id, bool show_archive
elog(ERROR, "You cannot specify --archive and (-i, --backup-id) options together");
/*
- * if instance_name is not specified,
+ * if instance is not specified,
* show information about all instances in this backup catalog
*/
- if (instance_name == NULL)
+ if (instanceState == NULL)
{
- parray *instances = catalog_get_instance_list();
+ parray *instances = catalog_get_instance_list(catalogState);
show_instance_start();
for (i = 0; i < parray_num(instances); i++)
{
- InstanceConfig *instance = parray_get(instances, i);
- char backup_instance_path[MAXPGPATH];
+ InstanceState *instanceState = parray_get(instances, i);
if (interrupted)
elog(ERROR, "Interrupted during show");
- sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance->name);
-
if (show_archive)
- show_instance_archive(instance);
+ show_instance_archive(instanceState, instanceState->config);
else
- show_instance(instance->name, INVALID_BACKUP_ID, true);
+ show_instance(instanceState, INVALID_BACKUP_ID, true);
}
show_instance_end();
@@ -123,11 +121,11 @@ do_show(const char *instance_name, time_t requested_backup_id, bool show_archive
if (show_archive)
{
- InstanceConfig *instance = readInstanceConfigFile(instance_name);
- show_instance_archive(instance);
+ InstanceConfig *instance = readInstanceConfigFile(instanceState);
+ show_instance_archive(instanceState, instance);
}
else
- show_instance(instance_name, requested_backup_id, false);
+ show_instance(instanceState, requested_backup_id, false);
show_instance_end();
@@ -137,11 +135,11 @@ do_show(const char *instance_name, time_t requested_backup_id, bool show_archive
{
if (show_archive)
{
- InstanceConfig *instance = readInstanceConfigFile(instance_name);
- show_instance_archive(instance);
+ InstanceConfig *instance = readInstanceConfigFile(instanceState);
+ show_instance_archive(instanceState, instance);
}
else
- show_backup(instance_name, requested_backup_id);
+ show_backup(instanceState, requested_backup_id);
return 0;
}
@@ -289,16 +287,16 @@ show_instance_end(void)
* Show brief meta information about all backups in the backup instance.
*/
static void
-show_instance(const char *instance_name, time_t requested_backup_id, bool show_name)
+show_instance(InstanceState *instanceState, time_t requested_backup_id, bool show_name)
{
parray *backup_list;
- backup_list = catalog_get_backup_list(instance_name, requested_backup_id);
+ backup_list = catalog_get_backup_list(instanceState, requested_backup_id);
if (show_format == SHOW_PLAIN)
- show_instance_plain(instance_name, backup_list, show_name);
+ show_instance_plain(instanceState->instance_name, backup_list, show_name);
else if (show_format == SHOW_JSON)
- show_instance_json(instance_name, backup_list);
+ show_instance_json(instanceState->instance_name, backup_list);
else
elog(ERROR, "Invalid show format %d", (int) show_format);
@@ -324,7 +322,7 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup)
json_add_value(buf, "parent-backup-id",
base36enc(backup->parent_backup), json_level, true);
- json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup),
+ json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup, false),
json_level, true);
json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE",
@@ -450,13 +448,14 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup)
* Show detailed meta information about specified backup.
*/
static int
-show_backup(const char *instance_name, time_t requested_backup_id)
+show_backup(InstanceState *instanceState, time_t requested_backup_id)
{
int i;
pgBackup *backup = NULL;
parray *backups;
- backups = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID);
+ //TODO pass requested_backup_id to the function
+ backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
/* Find requested backup */
for (i = 0; i < parray_num(backups); i++)
@@ -557,8 +556,8 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na
cur++;
/* Mode */
- row->mode = pgBackupGetBackupMode(backup);
- widths[cur] = Max(widths[cur], strlen(row->mode));
+ row->mode = pgBackupGetBackupMode(backup, show_color);
+ widths[cur] = Max(widths[cur], strlen(row->mode) - (show_color ? TC_LEN : 0));
cur++;
/* WAL mode*/
@@ -631,8 +630,9 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na
cur++;
/* Status */
- row->status = status2str(backup->status);
- widths[cur] = Max(widths[cur], strlen(row->status));
+ row->status = show_color ? status2str_color(backup->status) : status2str(backup->status);
+ widths[cur] = Max(widths[cur], strlen(row->status) - (show_color ? TC_LEN : 0));
+
}
for (i = 0; i < SHOW_FIELDS_COUNT; i++)
@@ -682,7 +682,7 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na
row->recovery_time);
cur++;
- appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
+ appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur] + (show_color ? TC_LEN : 0),
row->mode);
cur++;
@@ -718,7 +718,7 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na
row->stop_lsn);
cur++;
- appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
+ appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur] + (show_color ? TC_LEN : 0),
row->status);
cur++;
@@ -774,16 +774,16 @@ show_instance_json(const char *instance_name, parray *backup_list)
* show information about WAL archive of the instance
*/
static void
-show_instance_archive(InstanceConfig *instance)
+show_instance_archive(InstanceState *instanceState, InstanceConfig *instance)
{
parray *timelineinfos;
- timelineinfos = catalog_get_timelines(instance);
+ timelineinfos = catalog_get_timelines(instanceState, instance);
if (show_format == SHOW_PLAIN)
- show_archive_plain(instance->name, instance->xlog_seg_size, timelineinfos, true);
+ show_archive_plain(instanceState->instance_name, instance->xlog_seg_size, timelineinfos, true);
else if (show_format == SHOW_JSON)
- show_archive_json(instance->name, instance->xlog_seg_size, timelineinfos);
+ show_archive_json(instanceState->instance_name, instance->xlog_seg_size, timelineinfos);
else
elog(ERROR, "Invalid show format %d", (int) show_format);
}
diff --git a/src/stream.c b/src/stream.c
index 01161f720..5912ff44b 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -70,7 +70,6 @@ static void add_walsegment_to_filelist(parray *filelist, uint32 timeline,
uint32 xlog_seg_size);
static void add_history_file_to_filelist(parray *filelist, uint32 timeline,
char *basedir);
-static parray* parse_tli_history_buffer(char *history, TimeLineID tli);
/*
* Run IDENTIFY_SYSTEM through a given connection and
@@ -173,7 +172,7 @@ StreamLog(void *arg)
*/
stream_arg->startpos -= stream_arg->startpos % instance_config.xlog_seg_size;
- xlog_files_list = parray_new();
+ xlog_files_list = parray_new();
/* Initialize timeout */
stream_stop_begin = 0;
@@ -185,7 +184,12 @@ StreamLog(void *arg)
#endif
-#if PG_VERSION_NUM >= 110000
+#if PG_VERSION_NUM >= 150000
+ /* Create temp repslot */
+ if (temp_slot)
+ CreateReplicationSlot(stream_arg->conn, replication_slot,
+ NULL, temp_slot, true, true, false, false);
+#elif PG_VERSION_NUM >= 110000
/* Create temp repslot */
if (temp_slot)
CreateReplicationSlot(stream_arg->conn, replication_slot,
@@ -308,14 +312,14 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
/* we assume that we get called once at the end of each segment */
if (segment_finished)
- {
- elog(VERBOSE, _("finished segment at %X/%X (timeline %u)"),
- (uint32) (xlogpos >> 32), (uint32) xlogpos, timeline);
+ {
+ elog(VERBOSE, _("finished segment at %X/%X (timeline %u)"),
+ (uint32) (xlogpos >> 32), (uint32) xlogpos, timeline);
- add_walsegment_to_filelist(xlog_files_list, timeline, xlogpos,
- (char*) stream_thread_arg.basedir,
- instance_config.xlog_seg_size);
- }
+ add_walsegment_to_filelist(xlog_files_list, timeline, xlogpos,
+ (char*) stream_thread_arg.basedir,
+ instance_config.xlog_seg_size);
+ }
/*
* Note that we report the previous, not current, position here. After a
@@ -588,20 +592,25 @@ start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOption
/* Set error exit code as default */
stream_thread_arg.ret = 1;
/* we must use startpos as start_lsn from start_backup */
- stream_thread_arg.startpos = current.start_lsn;
- stream_thread_arg.starttli = current.tli;
+ stream_thread_arg.startpos = startpos;
+ stream_thread_arg.starttli = starttli;
thread_interrupted = false;
pthread_create(&stream_thread, NULL, StreamLog, &stream_thread_arg);
}
-/* Wait for the completion of stream */
+/*
+ * Wait for the completion of stream
+ * append list of streamed xlog files
+ * into backup_files_list (if it is not NULL)
+ */
int
wait_WAL_streaming_end(parray *backup_files_list)
{
pthread_join(stream_thread, NULL);
- parray_concat(backup_files_list, xlog_files_list);
+ if(backup_files_list != NULL)
+ parray_concat(backup_files_list, xlog_files_list);
parray_free(xlog_files_list);
return stream_thread_arg.ret;
}
diff --git a/src/util.c b/src/util.c
index 9fd0114bb..4e32e0639 100644
--- a/src/util.c
+++ b/src/util.c
@@ -10,8 +10,6 @@
#include "pg_probackup.h"
-#include "catalog/pg_control.h"
-
#include
#include
@@ -174,7 +172,7 @@ get_current_timeline(PGconn *conn)
if (PQresultStatus(res) == PGRES_TUPLES_OK)
val = PQgetvalue(res, 0, 0);
else
- return get_current_timeline_from_control(false);
+ return get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false);
if (!parse_uint32(val, &tli, 0))
{
@@ -182,7 +180,7 @@ get_current_timeline(PGconn *conn)
elog(WARNING, "Invalid value of timeline_id %s", val);
/* TODO 3.0 remove it and just error out */
- return get_current_timeline_from_control(false);
+ return get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false);
}
return tli;
@@ -190,15 +188,15 @@ get_current_timeline(PGconn *conn)
/* Get timeline from pg_control file */
TimeLineID
-get_current_timeline_from_control(bool safe)
+get_current_timeline_from_control(const char *pgdata_path, fio_location location, bool safe)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
- buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size,
- safe, FIO_DB_HOST);
+ buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size,
+ safe, location);
if (safe && buffer == NULL)
return 0;
@@ -249,14 +247,14 @@ get_checkpoint_location(PGconn *conn)
}
uint64
-get_system_identifier(const char *pgdata_path)
+get_system_identifier(const char *pgdata_path, fio_location location)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
- buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST);
+ buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location);
if (buffer == NULL)
return 0;
digestControlFile(&ControlFile, buffer, size);
@@ -299,7 +297,7 @@ get_remote_system_identifier(PGconn *conn)
}
uint32
-get_xlog_seg_size(char *pgdata_path)
+get_xlog_seg_size(const char *pgdata_path)
{
#if PG_VERSION_NUM >= 110000
ControlFileData ControlFile;
@@ -351,15 +349,31 @@ get_pgcontrol_checksum(const char *pgdata_path)
return ControlFile.crc;
}
+DBState
+get_system_dbstate(const char *pgdata_path, fio_location location)
+{
+ ControlFileData ControlFile;
+ char *buffer;
+ size_t size;
+
+ buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location);
+ if (buffer == NULL)
+ return 0;
+ digestControlFile(&ControlFile, buffer, size);
+ pg_free(buffer);
+
+ return ControlFile.state;
+}
+
void
-get_redo(const char *pgdata_path, RedoParams *redo)
+get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
- buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST);
+ buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, pgdata_location);
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
@@ -516,6 +530,29 @@ status2str(BackupStatus status)
return statusName[status];
}
+const char *
+status2str_color(BackupStatus status)
+{
+ char *status_str = pgut_malloc(20);
+
+ /* UNKNOWN */
+ if (status == BACKUP_STATUS_INVALID)
+ snprintf(status_str, 20, "%s%s%s", TC_YELLOW_BOLD, "UNKNOWN", TC_RESET);
+ /* CORRUPT, ERROR and ORPHAN */
+ else if (status == BACKUP_STATUS_CORRUPT || status == BACKUP_STATUS_ERROR ||
+ status == BACKUP_STATUS_ORPHAN)
+ snprintf(status_str, 20, "%s%s%s", TC_RED_BOLD, statusName[status], TC_RESET);
+ /* MERGING, MERGED, DELETING and DELETED */
+ else if (status == BACKUP_STATUS_MERGING || status == BACKUP_STATUS_MERGED ||
+ status == BACKUP_STATUS_DELETING || status == BACKUP_STATUS_DELETED)
+ snprintf(status_str, 20, "%s%s%s", TC_YELLOW_BOLD, statusName[status], TC_RESET);
+ /* OK and DONE */
+ else
+ snprintf(status_str, 20, "%s%s%s", TC_GREEN_BOLD, statusName[status], TC_RESET);
+
+ return status_str;
+}
+
BackupStatus
str2status(const char *status)
{
diff --git a/src/utils/configuration.c b/src/utils/configuration.c
index d6a7d069e..04bfbbe3b 100644
--- a/src/utils/configuration.c
+++ b/src/utils/configuration.c
@@ -87,6 +87,63 @@ static const unit_conversion time_unit_conversion_table[] =
{""} /* end of table marker */
};
+/* Order is important, keep it in sync with utils/configuration.h:enum ProbackupSubcmd declaration */
+static char const * const subcmd_names[] =
+{
+ "NO_CMD",
+ "init",
+ "add-instance",
+ "del-instance",
+ "archive-push",
+ "archive-get",
+ "backup",
+ "restore",
+ "validate",
+ "delete",
+ "merge",
+ "show",
+ "set-config",
+ "set-backup",
+ "show-config",
+ "checkdb",
+ "ssh",
+ "agent",
+ "help",
+ "version",
+ "catchup",
+};
+
+ProbackupSubcmd
+parse_subcmd(char const * const subcmd_str)
+{
+ struct {
+ ProbackupSubcmd id;
+ char *name;
+ }
+ static const subcmd_additional_names[] = {
+ { HELP_CMD, "--help" },
+ { HELP_CMD, "-?" },
+ { VERSION_CMD, "--version" },
+ { VERSION_CMD, "-V" },
+ };
+
+ int i;
+ for(i = (int)NO_CMD + 1; i < sizeof(subcmd_names) / sizeof(subcmd_names[0]); ++i)
+ if(strcmp(subcmd_str, subcmd_names[i]) == 0)
+ return (ProbackupSubcmd)i;
+ for(i = 0; i < sizeof(subcmd_additional_names) / sizeof(subcmd_additional_names[0]); ++i)
+ if(strcmp(subcmd_str, subcmd_additional_names[i].name) == 0)
+ return subcmd_additional_names[i].id;
+ return NO_CMD;
+}
+
+char const *
+get_subcmd_name(ProbackupSubcmd const subcmd)
+{
+ Assert((int)subcmd < sizeof(subcmd_names) / sizeof(subcmd_names[0]));
+ return subcmd_names[(int)subcmd];
+}
+
/*
* Reading functions.
*/
diff --git a/src/utils/configuration.h b/src/utils/configuration.h
index eea8c7746..3a5de4b83 100644
--- a/src/utils/configuration.h
+++ b/src/utils/configuration.h
@@ -16,6 +16,32 @@
#define INFINITE_STR "INFINITE"
+/* Order is important, keep it in sync with configuration.c:subcmd_names[] and help.c:help_command() */
+typedef enum ProbackupSubcmd
+{
+ NO_CMD = 0,
+ INIT_CMD,
+ ADD_INSTANCE_CMD,
+ DELETE_INSTANCE_CMD,
+ ARCHIVE_PUSH_CMD,
+ ARCHIVE_GET_CMD,
+ BACKUP_CMD,
+ RESTORE_CMD,
+ VALIDATE_CMD,
+ DELETE_CMD,
+ MERGE_CMD,
+ SHOW_CMD,
+ SET_CONFIG_CMD,
+ SET_BACKUP_CMD,
+ SHOW_CONFIG_CMD,
+ CHECKDB_CMD,
+ SSH_CMD,
+ AGENT_CMD,
+ HELP_CMD,
+ VERSION_CMD,
+ CATCHUP_CMD,
+} ProbackupSubcmd;
+
typedef enum OptionSource
{
SOURCE_DEFAULT,
@@ -75,6 +101,8 @@ struct ConfigOption
#define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME)
+extern ProbackupSubcmd parse_subcmd(char const * const subcmd_str);
+extern char const *get_subcmd_name(ProbackupSubcmd const subcmd);
extern int config_get_opt(int argc, char **argv, ConfigOption cmd_options[],
ConfigOption options[]);
extern int config_read_opt(const char *path, ConfigOption options[], int elevel,
diff --git a/src/utils/file.c b/src/utils/file.c
index ef322997f..b808d6293 100644
--- a/src/utils/file.c
+++ b/src/utils/file.c
@@ -48,7 +48,6 @@ typedef struct
size_t size;
time_t mtime;
bool is_datafile;
- bool is_database;
Oid tblspcOid;
Oid dbOid;
Oid relOid;
@@ -83,6 +82,24 @@ typedef struct
#undef fopen(a, b)
#endif
+void
+setMyLocation(ProbackupSubcmd const subcmd)
+{
+
+#ifdef WIN32
+ if (IsSshProtocol())
+ elog(ERROR, "Currently remote operations on Windows are not supported");
+#endif
+
+ MyLocation = IsSshProtocol()
+ ? (subcmd == ARCHIVE_PUSH_CMD || subcmd == ARCHIVE_GET_CMD)
+ ? FIO_DB_HOST
+ : (subcmd == BACKUP_CMD || subcmd == RESTORE_CMD || subcmd == ADD_INSTANCE_CMD || subcmd == CATCHUP_CMD)
+ ? FIO_BACKUP_HOST
+ : FIO_LOCAL_HOST
+ : FIO_LOCAL_HOST;
+}
+
/* Use specified file descriptors as stdin/stdout for FIO functions */
void
fio_redirect(int in, int out, int err)
@@ -589,7 +606,15 @@ fio_close(int fd)
fio_fdset &= ~(1 << hdr.handle);
IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr));
- /* Note, that file is closed without waiting for confirmation */
+
+ /* Wait for response */
+ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr));
+
+ if (hdr.arg != 0)
+ {
+ errno = hdr.arg;
+ return -1;
+ }
return 0;
}
@@ -599,6 +624,22 @@ fio_close(int fd)
}
}
+/* Close remote file implementation */
+static void
+fio_close_impl(int fd, int out)
+{
+ fio_header hdr;
+
+ hdr.cop = FIO_CLOSE;
+ hdr.arg = 0;
+
+ if (close(fd) != 0)
+ hdr.arg = errno;
+
+ /* send header */
+ IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
+}
+
/* Truncate stdio file */
int
fio_ftruncate(FILE* f, off_t size)
@@ -1098,6 +1139,46 @@ fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location lo
}
}
+/*
+ * Read value of a symbolic link
+ * this is a wrapper about readlink() syscall
+ * side effects: string truncation occur (and it
+ * can be checked by caller by comparing
+ * returned value >= valsiz)
+ */
+ssize_t
+fio_readlink(const char *path, char *value, size_t valsiz, fio_location location)
+{
+ if (!fio_is_remote(location))
+ {
+ /* readlink don't place trailing \0 */
+ ssize_t len = readlink(path, value, valsiz);
+ value[len < valsiz ? len : valsiz] = '\0';
+ return len;
+ }
+ else
+ {
+ fio_header hdr;
+ size_t path_len = strlen(path) + 1;
+
+ hdr.cop = FIO_READLINK;
+ hdr.handle = -1;
+ Assert(valsiz <= UINT_MAX); /* max value of fio_header.arg */
+ hdr.arg = valsiz;
+ hdr.size = path_len;
+
+ IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr));
+ IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len);
+
+ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr));
+ Assert(hdr.cop == FIO_READLINK);
+ Assert(hdr.size <= valsiz);
+ IO_CHECK(fio_read_all(fio_stdin, value, hdr.size), hdr.size);
+ value[hdr.size < valsiz ? hdr.size : valsiz] = '\0';
+ return hdr.size;
+ }
+}
+
/* Check presence of the file */
int
fio_access(char const* path, int mode, fio_location location)
@@ -1728,7 +1809,7 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
/* send message with header
- 8bytes 24bytes var var
+ 16bytes 24bytes var var
--------------------------------------------------------------
| fio_header | fio_send_request | FILE PATH | BITMAP(if any) |
--------------------------------------------------------------
@@ -1862,6 +1943,198 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
return n_blocks_read;
}
+/*
+ * Return number of actually(!) readed blocks, attempts or
+ * half-readed block are not counted.
+ * Return values in case of error:
+ * FILE_MISSING
+ * OPEN_FAILED
+ * READ_ERROR
+ * PAGE_CORRUPTION
+ * WRITE_FAILED
+ *
+ * If none of the above, this function return number of blocks
+ * readed by remote agent.
+ *
+ * In case of DELTA mode horizonLsn must be a valid lsn,
+ * otherwise it should be set to InvalidXLogRecPtr.
+ * Взято из fio_send_pages
+ */
+int
+fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
+ XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version,
+ bool use_pagemap, BlockNumber* err_blknum, char **errormsg,
+ BackupPageHeader2 **headers)
+{
+ FILE *out = NULL;
+ char *out_buf = NULL;
+ struct {
+ fio_header hdr;
+ fio_send_request arg;
+ } req;
+ BlockNumber n_blocks_read = 0;
+ BlockNumber blknum = 0;
+
+ /* send message with header
+
+ 16bytes 24bytes var var
+ --------------------------------------------------------------
+ | fio_header | fio_send_request | FILE PATH | BITMAP(if any) |
+ --------------------------------------------------------------
+ */
+
+ req.hdr.cop = FIO_SEND_PAGES;
+
+ if (use_pagemap)
+ {
+ req.hdr.size = sizeof(fio_send_request) + (*file).pagemap.bitmapsize + strlen(from_fullpath) + 1;
+ req.arg.bitmapsize = (*file).pagemap.bitmapsize;
+
+ /* TODO: add optimization for the case of pagemap
+ * containing small number of blocks with big serial numbers:
+ * https://github.com/postgrespro/pg_probackup/blob/remote_page_backup/src/utils/file.c#L1211
+ */
+ }
+ else
+ {
+ req.hdr.size = sizeof(fio_send_request) + strlen(from_fullpath) + 1;
+ req.arg.bitmapsize = 0;
+ }
+
+ req.arg.nblocks = file->size/BLCKSZ;
+ req.arg.segmentno = file->segno * RELSEG_SIZE;
+ req.arg.horizonLsn = horizonLsn;
+ req.arg.checksumVersion = checksum_version;
+ req.arg.calg = calg;
+ req.arg.clevel = clevel;
+ req.arg.path_len = strlen(from_fullpath) + 1;
+
+ file->compress_alg = calg; /* TODO: wtf? why here? */
+
+//<-----
+// datapagemap_iterator_t *iter;
+// BlockNumber blkno;
+// iter = datapagemap_iterate(pagemap);
+// while (datapagemap_next(iter, &blkno))
+// elog(INFO, "block %u", blkno);
+// pg_free(iter);
+//<-----
+
+ /* send header */
+ IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req));
+
+ /* send file path */
+ IO_CHECK(fio_write_all(fio_stdout, from_fullpath, req.arg.path_len), req.arg.path_len);
+
+ /* send pagemap if any */
+ if (use_pagemap)
+ IO_CHECK(fio_write_all(fio_stdout, (*file).pagemap.bitmap, (*file).pagemap.bitmapsize), (*file).pagemap.bitmapsize);
+
+ out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_BACKUP_HOST);
+ if (out == NULL)
+ elog(ERROR, "Cannot open restore target file \"%s\": %s", to_fullpath, strerror(errno));
+
+ /* update file permission */
+ if (fio_chmod(to_fullpath, file->mode, FIO_BACKUP_HOST) == -1)
+ elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath,
+ strerror(errno));
+
+ elog(VERBOSE, "ftruncate file \"%s\" to size %lu",
+ to_fullpath, file->size);
+ if (fio_ftruncate(out, file->size) == -1)
+ elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s",
+ to_fullpath, file->size, strerror(errno));
+
+ if (!fio_is_remote_file(out))
+ {
+ out_buf = pgut_malloc(STDIO_BUFSIZE);
+ setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE);
+ }
+
+ while (true)
+ {
+ fio_header hdr;
+ char buf[BLCKSZ + sizeof(BackupPageHeader)];
+ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr));
+
+ if (interrupted)
+ elog(ERROR, "Interrupted during page reading");
+
+ if (hdr.cop == FIO_ERROR)
+ {
+ /* FILE_MISSING, OPEN_FAILED and READ_FAILED */
+ if (hdr.size > 0)
+ {
+ IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size);
+ *errormsg = pgut_malloc(hdr.size);
+ snprintf(*errormsg, hdr.size, "%s", buf);
+ }
+
+ return hdr.arg;
+ }
+ else if (hdr.cop == FIO_SEND_FILE_CORRUPTION)
+ {
+ *err_blknum = hdr.arg;
+
+ if (hdr.size > 0)
+ {
+ IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size);
+ *errormsg = pgut_malloc(hdr.size);
+ snprintf(*errormsg, hdr.size, "%s", buf);
+ }
+ return PAGE_CORRUPTION;
+ }
+ else if (hdr.cop == FIO_SEND_FILE_EOF)
+ {
+ /* n_blocks_read reported by EOF */
+ n_blocks_read = hdr.arg;
+
+ /* receive headers if any */
+ if (hdr.size > 0)
+ {
+ *headers = pgut_malloc(hdr.size);
+ IO_CHECK(fio_read_all(fio_stdin, *headers, hdr.size), hdr.size);
+ file->n_headers = (hdr.size / sizeof(BackupPageHeader2)) -1;
+ }
+
+ break;
+ }
+ else if (hdr.cop == FIO_PAGE)
+ {
+ blknum = hdr.arg;
+
+ Assert(hdr.size <= sizeof(buf));
+ IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size);
+
+ COMP_FILE_CRC32(true, file->crc, buf, hdr.size);
+
+ if (fio_fseek(out, blknum * BLCKSZ) < 0)
+ {
+ elog(ERROR, "Cannot seek block %u of \"%s\": %s",
+ blknum, to_fullpath, strerror(errno));
+ }
+ // должен прилетать некомпрессированный блок с заголовком
+ // Вставить assert?
+ if (fio_fwrite(out, buf + sizeof(BackupPageHeader), hdr.size - sizeof(BackupPageHeader)) != BLCKSZ)
+ {
+ fio_fclose(out);
+ *err_blknum = blknum;
+ return WRITE_FAILED;
+ }
+ file->write_size += BLCKSZ;
+ file->uncompressed_size += BLCKSZ;
+ }
+ else
+ elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop);
+ }
+
+ if (out)
+ fclose(out);
+ pg_free(out_buf);
+
+ return n_blocks_read;
+}
+
/* TODO: read file using large buffer
* Return codes:
* FIO_ERROR:
@@ -2043,13 +2316,13 @@ fio_send_pages_impl(int out, char* buf)
n_blocks_read++;
/*
- * horizonLsn is not 0 only in case of delta backup.
+ * horizonLsn is not 0 only in case of delta and ptrack backup.
* As far as unsigned number are always greater or equal than zero,
* there is no sense to add more checks.
*/
- if ((req->horizonLsn == InvalidXLogRecPtr) || /* full, page, ptrack */
+ if ((req->horizonLsn == InvalidXLogRecPtr) || /* full, page */
(page_st.lsn == InvalidXLogRecPtr) || /* zeroed page */
- (req->horizonLsn > 0 && page_st.lsn > req->horizonLsn)) /* delta */
+ (req->horizonLsn > 0 && page_st.lsn > req->horizonLsn)) /* delta, ptrack */
{
int compressed_size = 0;
char write_buffer[BLCKSZ*2];
@@ -2529,7 +2802,6 @@ fio_list_dir_internal(parray *files, const char *root, bool exclude,
file->size = fio_file.size;
file->mtime = fio_file.mtime;
file->is_datafile = fio_file.is_datafile;
- file->is_database = fio_file.is_database;
file->tblspcOid = fio_file.tblspcOid;
file->dbOid = fio_file.dbOid;
file->relOid = fio_file.relOid;
@@ -2603,7 +2875,6 @@ fio_list_dir_impl(int out, char* buf)
fio_file.size = file->size;
fio_file.mtime = file->mtime;
fio_file.is_datafile = file->is_datafile;
- fio_file.is_database = file->is_database;
fio_file.tblspcOid = file->tblspcOid;
fio_file.dbOid = file->dbOid;
fio_file.relOid = file->relOid;
@@ -2979,7 +3250,7 @@ fio_communicate(int in, int out)
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
break;
case FIO_CLOSE: /* Close file */
- SYS_CHECK(close(fd[hdr.handle]));
+ fio_close_impl(fd[hdr.handle], out);
break;
case FIO_WRITE: /* Write to the current position in file */
// IO_CHECK(fio_write_all(fd[hdr.handle], buf, hdr.size), hdr.size);
@@ -3108,6 +3379,26 @@ fio_communicate(int in, int out)
case FIO_GET_ASYNC_ERROR:
fio_get_async_error_impl(out);
break;
+ case FIO_READLINK: /* Read content of a symbolic link */
+ {
+ /*
+ * We need a buf for a arguments and for a result at the same time
+ * hdr.size = strlen(symlink_name) + 1
+ * hdr.arg = bufsize for a answer (symlink content)
+ */
+ size_t filename_size = (size_t)hdr.size;
+ if (filename_size + hdr.arg > buf_size) {
+ buf_size = hdr.arg;
+ buf = (char*)realloc(buf, buf_size);
+ }
+ rc = readlink(buf, buf + filename_size, hdr.arg);
+ hdr.cop = FIO_READLINK;
+ hdr.size = rc > 0 ? rc : 0;
+ IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
+ if (hdr.size != 0)
+ IO_CHECK(fio_write_all(out, buf + filename_size, hdr.size), hdr.size);
+ }
+ break;
default:
Assert(false);
}
diff --git a/src/utils/file.h b/src/utils/file.h
index ad65b9901..edb5ea0f9 100644
--- a/src/utils/file.h
+++ b/src/utils/file.h
@@ -55,7 +55,8 @@ typedef enum
FIO_LIST_DIR,
FIO_CHECK_POSTMASTER,
FIO_GET_ASYNC_ERROR,
- FIO_WRITE_ASYNC
+ FIO_WRITE_ASYNC,
+ FIO_READLINK
} fio_operations;
typedef enum
@@ -128,6 +129,7 @@ extern int fio_mkdir(char const* path, int mode, fio_location location);
extern int fio_chmod(char const* path, int mode, fio_location location);
extern int fio_access(char const* path, int mode, fio_location location);
extern int fio_stat(char const* path, struct stat* st, bool follow_symlinks, fio_location location);
+extern ssize_t fio_readlink(const char *path, char *value, size_t valsiz, fio_location location);
extern DIR* fio_opendir(char const* path, fio_location location);
extern struct dirent * fio_readdir(DIR *dirp);
extern int fio_closedir(DIR *dirp);
diff --git a/src/utils/logger.c b/src/utils/logger.c
index f039d4a5d..70bd5dcc4 100644
--- a/src/utils/logger.c
+++ b/src/utils/logger.c
@@ -39,6 +39,10 @@ typedef enum
PG_FATAL
} eLogType;
+#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
+#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
+#endif
+
void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3);
static void elog_internal(int elevel, bool file_only, const char *message);
@@ -116,6 +120,84 @@ init_logger(const char *root_path, LoggerConfig *config)
#endif
}
+/*
+ * Check that we are connected to terminal and
+ * enable ANSI escape codes for Windows if possible
+ */
+void
+init_console(void)
+{
+
+ /* no point in tex coloring if we do not connected to terminal */
+ if (!isatty(fileno(stderr)) ||
+ !isatty(fileno(stdout)))
+ {
+ show_color = false;
+ return;
+ }
+
+#ifdef WIN32
+ HANDLE hOut = INVALID_HANDLE_VALUE;
+ HANDLE hErr = INVALID_HANDLE_VALUE;
+ DWORD dwMode_out = 0;
+ DWORD dwMode_err = 0;
+
+ hOut = GetStdHandle(STD_OUTPUT_HANDLE);
+ if (hOut == INVALID_HANDLE_VALUE || !hOut)
+ {
+ show_color = false;
+ _dosmaperr(GetLastError());
+ elog(WARNING, "Failed to get terminal stdout handle: %s", strerror(errno));
+ return;
+ }
+
+ hErr = GetStdHandle(STD_ERROR_HANDLE);
+ if (hErr == INVALID_HANDLE_VALUE || !hErr)
+ {
+ show_color = false;
+ _dosmaperr(GetLastError());
+ elog(WARNING, "Failed to get terminal stderror handle: %s", strerror(errno));
+ return;
+ }
+
+ if (!GetConsoleMode(hOut, &dwMode_out))
+ {
+ show_color = false;
+ _dosmaperr(GetLastError());
+ elog(WARNING, "Failed to get console mode for stdout: %s", strerror(errno));
+ return;
+ }
+
+ if (!GetConsoleMode(hErr, &dwMode_err))
+ {
+ show_color = false;
+ _dosmaperr(GetLastError());
+ elog(WARNING, "Failed to get console mode for stderr: %s", strerror(errno));
+ return;
+ }
+
+ /* Add ANSI codes support */
+ dwMode_out |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+ dwMode_err |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+
+ if (!SetConsoleMode(hOut, dwMode_out))
+ {
+ show_color = false;
+ _dosmaperr(GetLastError());
+ elog(WARNING, "Cannot set console mode for stdout: %s", strerror(errno));
+ return;
+ }
+
+ if (!SetConsoleMode(hErr, dwMode_err))
+ {
+ show_color = false;
+ _dosmaperr(GetLastError());
+ elog(WARNING, "Cannot set console mode for stderr: %s", strerror(errno));
+ return;
+ }
+#endif
+}
+
static void
write_elevel(FILE *stream, int elevel)
{
@@ -268,10 +350,26 @@ elog_internal(int elevel, bool file_only, const char *message)
fprintf(stderr, "%s ", str_pid);
fprintf(stderr, "%s ", str_thread);
}
+ else if (show_color)
+ {
+ /* color WARNING and ERROR messages */
+ if (elevel == WARNING)
+ fprintf(stderr, "%s", TC_YELLOW_BOLD);
+ else if (elevel == ERROR)
+ fprintf(stderr, "%s", TC_RED_BOLD);
+ }
write_elevel(stderr, elevel);
- fprintf(stderr, "%s\n", message);
+ /* main payload */
+ fprintf(stderr, "%s", message);
+
+ /* reset color to default */
+ if (show_color && (elevel == WARNING || elevel == ERROR))
+ fprintf(stderr, "%s", TC_RESET);
+
+ fprintf(stderr, "\n");
+
fflush(stderr);
}
diff --git a/src/utils/logger.h b/src/utils/logger.h
index 37b6ff095..6a7407e41 100644
--- a/src/utils/logger.h
+++ b/src/utils/logger.h
@@ -51,6 +51,7 @@ extern void elog(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);
extern void elog_file(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);
extern void init_logger(const char *root_path, LoggerConfig *config);
+extern void init_console(void);
extern int parse_log_level(const char *level);
extern const char *deparse_log_level(int level);
diff --git a/src/utils/parray.c b/src/utils/parray.c
index 31148ee9a..792e26907 100644
--- a/src/utils/parray.c
+++ b/src/utils/parray.c
@@ -175,7 +175,7 @@ parray_rm(parray *array, const void *key, int(*compare)(const void *, const void
size_t
parray_num(const parray *array)
{
- return array->used;
+ return array!= NULL ? array->used : (size_t) 0;
}
void
@@ -198,6 +198,13 @@ parray_bsearch(parray *array, const void *key, int(*compare)(const void *, const
return bsearch(&key, array->data, array->used, sizeof(void *), compare);
}
+int
+parray_bsearch_index(parray *array, const void *key, int(*compare)(const void *, const void *))
+{
+ void **elem = parray_bsearch(array, key, compare);
+ return elem != NULL ? elem - array->data : -1;
+}
+
/* checks that parray contains element */
bool parray_contains(parray *array, void *elem)
{
diff --git a/src/utils/parray.h b/src/utils/parray.h
index 85d7383f3..e92ad728c 100644
--- a/src/utils/parray.h
+++ b/src/utils/parray.h
@@ -29,6 +29,7 @@ extern bool parray_rm(parray *array, const void *key, int(*compare)(const void *
extern size_t parray_num(const parray *array);
extern void parray_qsort(parray *array, int(*compare)(const void *, const void *));
extern void *parray_bsearch(parray *array, const void *key, int(*compare)(const void *, const void *));
+extern int parray_bsearch_index(parray *array, const void *key, int(*compare)(const void *, const void *));
extern void parray_walk(parray *array, void (*action)(void *));
extern bool parray_contains(parray *array, void *elem);
diff --git a/src/utils/pgut.c b/src/utils/pgut.c
index 1d8845c23..33de24b3f 100644
--- a/src/utils/pgut.c
+++ b/src/utils/pgut.c
@@ -3,7 +3,7 @@
* pgut.c
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
- * Portions Copyright (c) 2017-2019, Postgres Professional
+ * Portions Copyright (c) 2017-2021, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@@ -926,6 +926,20 @@ pgut_strdup(const char *str)
return ret;
}
+char *
+pgut_strndup(const char *str, size_t n)
+{
+ char *ret;
+
+ if (str == NULL)
+ return NULL;
+
+ if ((ret = strndup(str, n)) == NULL)
+ elog(ERROR, "could not duplicate string \"%s\": %s",
+ str, strerror(errno));
+ return ret;
+}
+
FILE *
pgut_fopen(const char *path, const char *mode, bool missing_ok)
{
diff --git a/src/utils/pgut.h b/src/utils/pgut.h
index 77337a945..a1d7b5a93 100644
--- a/src/utils/pgut.h
+++ b/src/utils/pgut.h
@@ -3,7 +3,7 @@
* pgut.h
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
- * Portions Copyright (c) 2017-2019, Postgres Professional
+ * Portions Copyright (c) 2017-2021, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@@ -62,6 +62,7 @@ extern void *pgut_malloc(size_t size);
extern void *pgut_malloc0(size_t size);
extern void *pgut_realloc(void *p, size_t size);
extern char *pgut_strdup(const char *str);
+extern char *pgut_strndup(const char *str, size_t n);
#define pgut_new(type) ((type *) pgut_malloc(sizeof(type)))
#define pgut_new0(type) ((type *) pgut_malloc0(sizeof(type)))
diff --git a/src/validate.c b/src/validate.c
index 6bedd7269..4044ac158 100644
--- a/src/validate.c
+++ b/src/validate.c
@@ -16,7 +16,7 @@
#include "utils/thread.h"
static void *pgBackupValidateFiles(void *arg);
-static void do_validate_instance(void);
+static void do_validate_instance(InstanceState *instanceState);
static bool corrupted_backup_found = false;
static bool skipped_due_to_lock = false;
@@ -75,7 +75,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params)
{
elog(WARNING, "Backup %s has status %s, change it to ERROR and skip validation",
base36enc(backup->start_time), status2str(backup->status));
- write_backup_status(backup, BACKUP_STATUS_ERROR, instance_name, true);
+ write_backup_status(backup, BACKUP_STATUS_ERROR, true);
corrupted_backup_found = true;
return;
}
@@ -121,7 +121,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params)
{
elog(WARNING, "Backup %s file list is corrupted", base36enc(backup->start_time));
backup->status = BACKUP_STATUS_CORRUPT;
- write_backup_status(backup, BACKUP_STATUS_CORRUPT, instance_name, true);
+ write_backup_status(backup, BACKUP_STATUS_CORRUPT, true);
return;
}
@@ -130,11 +130,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params)
// params->partial_restore_type);
/* setup threads */
- for (i = 0; i < parray_num(files); i++)
- {
- pgFile *file = (pgFile *) parray_get(files, i);
- pg_atomic_clear_flag(&file->lock);
- }
+ pfilearray_clear_locks(files);
/* init thread args with own file lists */
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
@@ -190,7 +186,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params)
backup->status = BACKUP_STATUS_CORRUPT;
write_backup_status(backup, corrupted ? BACKUP_STATUS_CORRUPT :
- BACKUP_STATUS_OK, instance_name, true);
+ BACKUP_STATUS_OK, true);
if (corrupted)
elog(WARNING, "Backup %s data files are corrupted", base36enc(backup->start_time));
@@ -205,7 +201,6 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params)
{
char path[MAXPGPATH];
- //pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST);
join_path_components(path, backup->root_dir, DATABASE_FILE_LIST);
if (pgFileSize(path) >= (BLCKSZ*500))
@@ -215,7 +210,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params)
"https://github.com/postgrespro/pg_probackup/issues/132",
base36enc(backup->start_time));
backup->status = BACKUP_STATUS_CORRUPT;
- write_backup_status(backup, BACKUP_STATUS_CORRUPT, instance_name, true);
+ write_backup_status(backup, BACKUP_STATUS_CORRUPT, true);
}
}
}
@@ -381,39 +376,40 @@ pgBackupValidateFiles(void *arg)
/*
* Validate all backups in the backup catalog.
* If --instance option was provided, validate only backups of this instance.
+ *
+ * TODO: split into two functions: do_validate_catalog and do_validate_instance.
*/
int
-do_validate_all(void)
+do_validate_all(CatalogState *catalogState, InstanceState *instanceState)
{
corrupted_backup_found = false;
skipped_due_to_lock = false;
- if (instance_name == NULL)
+ if (instanceState == NULL)
{
/* Show list of instances */
- char path[MAXPGPATH];
DIR *dir;
struct dirent *dent;
/* open directory and list contents */
- join_path_components(path, backup_path, BACKUPS_DIR);
- dir = opendir(path);
+ dir = opendir(catalogState->backup_subdir_path);
if (dir == NULL)
- elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno));
+ elog(ERROR, "cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno));
errno = 0;
while ((dent = readdir(dir)))
{
- char conf_path[MAXPGPATH];
char child[MAXPGPATH];
struct stat st;
+ InstanceState *instanceState;
+
/* skip entries point current dir or parent dir */
if (strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0)
continue;
- join_path_components(child, path, dent->d_name);
+ join_path_components(child, catalogState->backup_subdir_path, dent->d_name);
if (lstat(child, &st) == -1)
elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno));
@@ -424,26 +420,30 @@ do_validate_all(void)
/*
* Initialize instance configuration.
*/
- instance_name = dent->d_name;
- sprintf(backup_instance_path, "%s/%s/%s",
- backup_path, BACKUPS_DIR, instance_name);
- sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
- join_path_components(conf_path, backup_instance_path,
- BACKUP_CATALOG_CONF_FILE);
- if (config_read_opt(conf_path, instance_options, ERROR, false,
+ instanceState = pgut_new(InstanceState);
+ strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH);
+
+ join_path_components(instanceState->instance_backup_subdir_path,
+ catalogState->backup_subdir_path, instanceState->instance_name);
+ join_path_components(instanceState->instance_wal_subdir_path,
+ catalogState->wal_subdir_path, instanceState->instance_name);
+ join_path_components(instanceState->instance_config_path,
+ instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE);
+
+ if (config_read_opt(instanceState->instance_config_path, instance_options, ERROR, false,
true) == 0)
{
- elog(WARNING, "Configuration file \"%s\" is empty", conf_path);
+ elog(WARNING, "Configuration file \"%s\" is empty", instanceState->instance_config_path);
corrupted_backup_found = true;
continue;
}
- do_validate_instance();
+ do_validate_instance(instanceState);
}
}
else
{
- do_validate_instance();
+ do_validate_instance(instanceState);
}
/* TODO: Probably we should have different exit code for every condition
@@ -473,17 +473,17 @@ do_validate_all(void)
* Validate all backups in the given instance of the backup catalog.
*/
static void
-do_validate_instance(void)
+do_validate_instance(InstanceState *instanceState)
{
int i;
int j;
parray *backups;
pgBackup *current_backup = NULL;
- elog(INFO, "Validate backups of the instance '%s'", instance_name);
+ elog(INFO, "Validate backups of the instance '%s'", instanceState->instance_name);
/* Get list of all backups sorted in order of descending start time */
- backups = catalog_get_backup_list(instance_name, INVALID_BACKUP_ID);
+ backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID);
/* Examine backups one by one and validate them */
for (i = 0; i < parray_num(backups); i++)
@@ -513,7 +513,7 @@ do_validate_instance(void)
if (current_backup->status == BACKUP_STATUS_OK ||
current_backup->status == BACKUP_STATUS_DONE)
{
- write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, instance_name, true);
+ write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true);
elog(WARNING, "Backup %s is orphaned because his parent %s is missing",
base36enc(current_backup->start_time),
parent_backup_id);
@@ -537,7 +537,7 @@ do_validate_instance(void)
if (current_backup->status == BACKUP_STATUS_OK ||
current_backup->status == BACKUP_STATUS_DONE)
{
- write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, instance_name, true);
+ write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true);
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
base36enc(current_backup->start_time), backup_id,
status2str(tmp_backup->status));
@@ -580,7 +580,7 @@ do_validate_instance(void)
/* Validate corresponding WAL files */
if (current_backup->status == BACKUP_STATUS_OK)
- validate_wal(current_backup, arclog_path, 0,
+ validate_wal(current_backup, instanceState->instance_wal_subdir_path, 0,
0, 0, current_backup->tli,
instance_config.xlog_seg_size);
@@ -610,7 +610,7 @@ do_validate_instance(void)
if (backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE)
{
- write_backup_status(backup, BACKUP_STATUS_ORPHAN, instance_name, true);
+ write_backup_status(backup, BACKUP_STATUS_ORPHAN, true);
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
base36enc(backup->start_time),
@@ -677,7 +677,7 @@ do_validate_instance(void)
{
/* Revalidation successful, validate corresponding WAL files */
- validate_wal(backup, arclog_path, 0,
+ validate_wal(backup, instanceState->instance_wal_subdir_path, 0,
0, 0, backup->tli,
instance_config.xlog_seg_size);
}
diff --git a/tests/__init__.py b/tests/__init__.py
index dbf84feea..080512760 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -6,7 +6,8 @@
retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \
- locking, remote, external, config, checkdb, set_backup, incr_restore
+ locking, remote, external, config, checkdb, set_backup, incr_restore, \
+ catchup
def load_tests(loader, tests, pattern):
@@ -23,6 +24,7 @@ def load_tests(loader, tests, pattern):
# suite.addTests(loader.loadTestsFromModule(auth_test))
suite.addTests(loader.loadTestsFromModule(archive))
suite.addTests(loader.loadTestsFromModule(backup))
+ suite.addTests(loader.loadTestsFromModule(catchup))
suite.addTests(loader.loadTestsFromModule(compatibility))
suite.addTests(loader.loadTestsFromModule(checkdb))
suite.addTests(loader.loadTestsFromModule(config))
diff --git a/tests/archive.py b/tests/archive.py
index 44fd7bcfb..7f0a69109 100644
--- a/tests/archive.py
+++ b/tests/archive.py
@@ -432,6 +432,11 @@ def test_archive_push_file_exists(self):
'pg_probackup archive-push completed successfully',
log_content)
+ # btw check that console coloring codes are not slipped into log file
+ self.assertNotIn('[0m', log_content)
+
+ print(log_content)
+
# Clean after yourself
self.del_test_dir(module_name, fname)
@@ -728,7 +733,7 @@ def test_replica_archive(self):
# to original data
master.psql(
"postgres",
- "insert into t_heap as select i as id, md5(i::text) as text, "
+ "insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(256,512) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
@@ -763,7 +768,7 @@ def test_replica_archive(self):
# to original data
master.psql(
"postgres",
- "insert into t_heap as select i as id, md5(i::text) as text, "
+ "insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,80680) i")
@@ -908,6 +913,11 @@ def test_basic_master_and_replica_concurrent_archiving(self):
'checkpoint_timeout': '30s',
'archive_timeout': '10s'})
+ if self.get_version(master) < self.version_to_num('9.6.0'):
+ self.del_test_dir(module_name, fname)
+ return unittest.skip(
+ 'Skipped because backup from replica is not supported in PG 9.5')
+
replica = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
@@ -953,7 +963,7 @@ def test_basic_master_and_replica_concurrent_archiving(self):
master.psql(
"postgres",
- "insert into t_heap as select i as id, md5(i::text) as text, "
+ "insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
diff --git a/tests/auth_test.py b/tests/auth_test.py
index c84fdb981..78af21be9 100644
--- a/tests/auth_test.py
+++ b/tests/auth_test.py
@@ -190,9 +190,7 @@ def setUpClass(cls):
"GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; "
"GRANT EXECUTE ON FUNCTION txid_current() TO backup; "
"GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; "
- "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup; "
- "GRANT EXECUTE ON FUNCTION pg_ptrack_clear() TO backup; "
- "GRANT EXECUTE ON FUNCTION pg_ptrack_get_and_clear(oid, oid) TO backup;")
+ "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;")
cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass')
@classmethod
diff --git a/tests/backup.py b/tests/backup.py
index d713263c3..45fd137eb 100644
--- a/tests/backup.py
+++ b/tests/backup.py
@@ -18,9 +18,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# PGPRO-707
def test_backup_modes_archive(self):
"""standart backup modes with ARCHIVE WAL method"""
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -33,12 +30,7 @@ def test_backup_modes_archive(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
-
- backup_id = self.backup_node(backup_dir, 'node', node)
+ full_backup_id = self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
self.assertEqual(show_backup['status'], "OK")
@@ -47,7 +39,7 @@ def test_backup_modes_archive(self):
# postmaster.pid and postmaster.opts shouldn't be copied
excluded = True
db_dir = os.path.join(
- backup_dir, "backups", 'node', backup_id, "database")
+ backup_dir, "backups", 'node', full_backup_id, "database")
for f in os.listdir(db_dir):
if (
@@ -64,31 +56,30 @@ def test_backup_modes_archive(self):
page_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type="page")
- # print self.show_pb(node)
- show_backup = self.show_pb(backup_dir, 'node')[1]
+ show_backup_1 = self.show_pb(backup_dir, 'node')[1]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PAGE")
+ # delta backup mode
+ delta_backup_id = self.backup_node(
+ backup_dir, 'node', node, backup_type="delta")
+
+ show_backup_2 = self.show_pb(backup_dir, 'node')[2]
+ self.assertEqual(show_backup['status'], "OK")
+ self.assertEqual(show_backup['backup-mode'], "DELTA")
+
# Check parent backup
self.assertEqual(
- backup_id,
+ full_backup_id,
self.show_pb(
backup_dir, 'node',
- backup_id=show_backup['id'])["parent-backup-id"])
+ backup_id=show_backup_1['id'])["parent-backup-id"])
- # ptrack backup mode
- self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
-
- show_backup = self.show_pb(backup_dir, 'node')[2]
- self.assertEqual(show_backup['status'], "OK")
- self.assertEqual(show_backup['backup-mode'], "PTRACK")
-
- # Check parent backup
self.assertEqual(
page_backup_id,
self.show_pb(
backup_dir, 'node',
- backup_id=show_backup['id'])["parent-backup-id"])
+ backup_id=show_backup_2['id'])["parent-backup-id"])
# Clean after yourself
self.del_test_dir(module_name, fname)
@@ -118,10 +109,7 @@ def test_smooth_checkpoint(self):
# @unittest.skip("skip")
def test_incremental_backup_without_full(self):
- """page-level backup without validated full backup"""
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
+ """page backup without validated full backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -134,11 +122,6 @@ def test_incremental_backup_without_full(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
-
try:
self.backup_node(backup_dir, 'node', node, backup_type="page")
# we should die here because exception is what we expect to happen
@@ -154,29 +137,10 @@ def test_incremental_backup_without_full(self):
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
- try:
- self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
- # we should die here because exception is what we expect to happen
- self.assertEqual(
- 1, 0,
- "Expecting Error because page backup should not be possible "
- "without valid full backup.\n Output: {0} \n CMD: {1}".format(
- repr(self.output), self.cmd))
- except ProbackupException as e:
- self.assertTrue(
- "WARNING: Valid full backup on current timeline 1 is not found" in e.message and
- "ERROR: Create new full backup before an incremental one" in e.message,
- "\n Unexpected Error Message: {0}\n CMD: {1}".format(
- repr(e.message), self.cmd))
-
self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['status'],
"ERROR")
- self.assertEqual(
- self.show_pb(backup_dir, 'node')[1]['status'],
- "ERROR")
-
# Clean after yourself
self.del_test_dir(module_name, fname)
@@ -242,64 +206,19 @@ def test_incremental_backup_corrupt_full(self):
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
- def test_ptrack_threads(self):
- """ptrack multi thread backup mode"""
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
- fname = self.id().split('.')[3]
- node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
- initdb_params=['--data-checksums'],
- ptrack_enable=True)
-
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
- self.init_pb(backup_dir)
- self.add_instance(backup_dir, 'node', node)
- self.set_archiving(backup_dir, 'node', node)
- node.slow_start()
-
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
-
- self.backup_node(
- backup_dir, 'node', node,
- backup_type="full", options=["-j", "4"])
- self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
-
- self.backup_node(
- backup_dir, 'node', node,
- backup_type="ptrack", options=["-j", "4"])
- self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
-
- # Clean after yourself
- self.del_test_dir(module_name, fname)
-
- # @unittest.skip("skip")
- def test_ptrack_threads_stream(self):
- """ptrack multi thread backup mode and stream"""
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
+ def test_delta_threads_stream(self):
+ """delta multi thread backup mode and stream"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
- initdb_params=['--data-checksums'],
- ptrack_enable=True)
+ initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
-
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
@@ -307,7 +226,7 @@ def test_ptrack_threads_stream(self):
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
self.backup_node(
backup_dir, 'node', node,
- backup_type="ptrack", options=["-j", "4", "--stream"])
+ backup_type="delta", options=["-j", "4", "--stream"])
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
# Clean after yourself
@@ -1459,76 +1378,6 @@ def test_drop_rel_during_backup_page(self):
# Clean after yourself
self.del_test_dir(module_name, fname)
- # @unittest.skip("skip")
- def test_drop_rel_during_backup_ptrack(self):
- """"""
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
- node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
- set_replication=True,
- ptrack_enable=self.ptrack,
- initdb_params=['--data-checksums'])
-
- self.init_pb(backup_dir)
- self.add_instance(backup_dir, 'node', node)
- self.set_archiving(backup_dir, 'node', node)
- node.slow_start()
-
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
-
- node.safe_psql(
- "postgres",
- "create table t_heap as select i"
- " as id from generate_series(0,100) i")
-
- relative_path = node.safe_psql(
- "postgres",
- "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
-
- absolute_path = os.path.join(node.data_dir, relative_path)
-
- # FULL backup
- self.backup_node(backup_dir, 'node', node, options=['--stream'])
-
- # PTRACK backup
- gdb = self.backup_node(
- backup_dir, 'node', node, backup_type='ptrack',
- gdb=True, options=['--log-level-file=LOG'])
-
- gdb.set_breakpoint('backup_files')
- gdb.run_until_break()
-
- # REMOVE file
- os.remove(absolute_path)
-
- # File removed, we can proceed with backup
- gdb.continue_execution_until_exit()
-
- pgdata = self.pgdata_content(node.data_dir)
-
- with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
- log_content = f.read()
- self.assertTrue(
- 'LOG: File not found: "{0}"'.format(absolute_path) in log_content,
- 'File "{0}" should be deleted but it`s not'.format(absolute_path))
-
- node.cleanup()
- self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
-
- # Physical comparison
- pgdata_restored = self.pgdata_content(node.data_dir)
- self.compare_pgdata(pgdata, pgdata_restored)
-
- # Clean after yourself
- self.del_test_dir(module_name, fname)
-
# @unittest.skip("skip")
def test_persistent_slot_for_stream_backup(self):
""""""
@@ -1992,10 +1841,11 @@ def test_backup_with_least_privileges_role(self):
'postgres',
'CREATE DATABASE backupdb')
- if self.ptrack and node.major_version >= 12:
+ if self.ptrack:
node.safe_psql(
"backupdb",
- "CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
+ "CREATE SCHEMA ptrack; "
+ "CREATE EXTENSION ptrack WITH SCHEMA ptrack")
# PG 9.5
if self.get_version(node) < 90600:
@@ -2105,32 +1955,14 @@ def test_backup_with_least_privileges_role(self):
)
if self.ptrack:
- if node.major_version < 12:
- for fname in [
- 'pg_catalog.oideq(oid, oid)',
- 'pg_catalog.ptrack_version()',
- 'pg_catalog.pg_ptrack_clear()',
- 'pg_catalog.pg_ptrack_control_lsn()',
- 'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)',
- 'pg_catalog.pg_ptrack_get_and_clear(oid, oid)',
- 'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)',
- 'pg_catalog.pg_stop_backup()']:
-
- node.safe_psql(
- "backupdb",
- "GRANT EXECUTE ON FUNCTION {0} "
- "TO backup".format(fname))
- else:
- fnames = [
- 'pg_catalog.ptrack_get_pagemapset(pg_lsn)',
- 'pg_catalog.ptrack_init_lsn()'
- ]
-
- for fname in fnames:
- node.safe_psql(
- "backupdb",
- "GRANT EXECUTE ON FUNCTION {0} "
- "TO backup".format(fname))
+ node.safe_psql(
+ "backupdb",
+ "GRANT USAGE ON SCHEMA ptrack TO backup")
+
+ node.safe_psql(
+ "backupdb",
+ "GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; "
+ "GRANT EXECUTE ON FUNCTION 'ptrack.ptrack_init_lsn()' TO backup; ")
if ProbackupTest.enterprise:
node.safe_psql(
@@ -2390,7 +2222,7 @@ def test_backup_with_less_privileges_role(self):
'postgres',
'CREATE DATABASE backupdb')
- if self.ptrack and node.major_version >= 12:
+ if self.ptrack:
node.safe_psql(
'backupdb',
'CREATE EXTENSION ptrack')
@@ -3451,3 +3283,115 @@ def test_basic_backup_default_transaction_read_only(self):
# Clean after yourself
self.del_test_dir(module_name, fname)
+
+ # @unittest.skip("skip")
+ def test_backup_atexit(self):
+ """"""
+ fname = self.id().split('.')[3]
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ set_replication=True,
+ ptrack_enable=self.ptrack,
+ initdb_params=['--data-checksums'])
+
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ self.set_archiving(backup_dir, 'node', node)
+ node.slow_start()
+
+ node.pgbench_init(scale=5)
+
+ # Full backup in streaming mode
+ gdb = self.backup_node(
+ backup_dir, 'node', node,
+ options=['--stream', '--log-level-file=VERBOSE'], gdb=True)
+
+ # break at streaming start
+ gdb.set_breakpoint('backup_data_file')
+ gdb.run_until_break()
+
+ gdb.remove_all_breakpoints()
+ gdb._execute('signal SIGINT')
+ sleep(1)
+
+ self.assertEqual(
+ self.show_pb(
+ backup_dir, 'node')[0]['status'], 'ERROR')
+
+ with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
+ log_content = f.read()
+ #print(log_content)
+ self.assertIn(
+ 'WARNING: backup in progress, stop backup',
+ log_content)
+
+ self.assertIn(
+ 'FROM pg_catalog.pg_stop_backup',
+ log_content)
+
+ self.assertIn(
+ 'setting its status to ERROR',
+ log_content)
+
+ # Clean after yourself
+ self.del_test_dir(module_name, fname)
+
+ # @unittest.skip("skip")
+ def test_pg_stop_backup_missing_permissions(self):
+ """"""
+ fname = self.id().split('.')[3]
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ set_replication=True,
+ ptrack_enable=self.ptrack,
+ initdb_params=['--data-checksums'])
+
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ self.set_archiving(backup_dir, 'node', node)
+ node.slow_start()
+
+ node.pgbench_init(scale=5)
+
+ self.simple_bootstrap(node, 'backup')
+
+ if self.get_version(node) < 90600:
+ node.safe_psql(
+ 'postgres',
+ 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup')
+ elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
+ node.safe_psql(
+ 'postgres',
+ 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup')
+ else:
+ node.safe_psql(
+ 'postgres',
+ 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup')
+
+ # Full backup in streaming mode
+ try:
+ self.backup_node(
+ backup_dir, 'node', node,
+ options=['--stream', '-U', 'backup'])
+ # we should die here because exception is what we expect to happen
+ self.assertEqual(
+ 1, 0,
+ "Expecting Error because of missing permissions on pg_stop_backup "
+ "\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ "ERROR: permission denied for function pg_stop_backup",
+ e.message,
+ "\n Unexpected Error Message: {0}\n CMD: {1}".format(
+ repr(e.message), self.cmd))
+ self.assertIn(
+ "query was: SELECT pg_catalog.txid_snapshot_xmax",
+ e.message,
+ "\n Unexpected Error Message: {0}\n CMD: {1}".format(
+ repr(e.message), self.cmd))
+
+ # Clean after yourself
+ self.del_test_dir(module_name, fname)
diff --git a/tests/catchup.py b/tests/catchup.py
new file mode 100644
index 000000000..5df538e42
--- /dev/null
+++ b/tests/catchup.py
@@ -0,0 +1,977 @@
+import os
+import signal
+import unittest
+from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
+
+module_name = 'catchup'
+
+class CatchupTest(ProbackupTest, unittest.TestCase):
+ def setUp(self):
+ self.fname = self.id().split('.')[3]
+
+#########################################
+# Basic tests
+#########################################
+ def test_basic_full_catchup(self):
+ """
+ Test 'multithreaded basebackup' mode (aka FULL catchup)
+ """
+ # preparation
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question AS SELECT 42 AS answer")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # do full catchup
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # 1st check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # run&recover catchup'ed instance
+ src_pg.stop()
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+
+ # 2nd check: run verification query
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ dst_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_full_catchup_with_tablespace(self):
+ """
+ Test tablespace transfers
+ """
+ # preparation
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True
+ )
+ src_pg.slow_start()
+ tblspace1_old_path = self.get_tblspace_path(src_pg, 'tblspace1_old')
+ self.create_tblspace_in_node(src_pg, 'tblspace1', tblspc_path = tblspace1_old_path)
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # do full catchup with tablespace mapping
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new')
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres',
+ '-p', str(src_pg.port),
+ '--stream',
+ '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path)
+ ]
+ )
+
+ # 1st check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # make changes in master tablespace
+ src_pg.safe_psql(
+ "postgres",
+ "UPDATE ultimate_question SET answer = -1")
+ src_pg.stop()
+
+ # run&recover catchup'ed instance
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+
+ # 2nd check: run verification query
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ dst_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_basic_delta_catchup(self):
+ """
+ Test delta catchup
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question(answer int)")
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.set_replica(src_pg, dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+ dst_pg.stop()
+
+ # preparation 3: make changes on master (source)
+ src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
+ pgbench.wait()
+ src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # do delta catchup
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # 1st check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # run&recover catchup'ed instance
+ src_pg.stop()
+ self.set_replica(master = src_pg, replica = dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+
+ # 2nd check: run verification query
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ dst_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_basic_ptrack_catchup(self):
+ """
+ Test ptrack catchup
+ """
+ if not self.ptrack:
+ return unittest.skip('Skipped because ptrack support is disabled')
+
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ ptrack_enable = True,
+ initdb_params = ['--data-checksums']
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack")
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question(answer int)")
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.set_replica(src_pg, dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+ dst_pg.stop()
+
+ # preparation 3: make changes on master (source)
+ src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
+ pgbench.wait()
+ src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # do ptrack catchup
+ self.catchup_node(
+ backup_mode = 'PTRACK',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # 1st check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # run&recover catchup'ed instance
+ src_pg.stop()
+ self.set_replica(master = src_pg, replica = dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+
+ # 2nd check: run verification query
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ dst_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_tli_delta_catchup(self):
+ """
+ Test that we correctly follow timeline change with delta catchup
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+
+ # preparation 2: destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_pg.stop()
+
+ # preparation 3: promote source
+ src_pg.stop()
+ self.set_replica(dst_pg, src_pg) # fake replication
+ src_pg.slow_start(replica = True)
+ src_pg.promote()
+ src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # do catchup
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # 1st check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # run&recover catchup'ed instance
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+
+ # 2nd check: run verification query
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ src_pg.stop()
+ dst_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_tli_ptrack_catchup(self):
+ """
+ Test that we correctly follow timeline change with ptrack catchup
+ """
+ if not self.ptrack:
+ return unittest.skip('Skipped because ptrack support is disabled')
+
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ ptrack_enable = True,
+ initdb_params = ['--data-checksums']
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack")
+
+ # preparation 2: destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_pg.stop()
+
+ # preparation 3: promote source
+ src_pg.stop()
+ self.set_replica(dst_pg, src_pg) # fake replication
+ src_pg.slow_start(replica = True)
+ src_pg.promote()
+ src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # do catchup
+ self.catchup_node(
+ backup_mode = 'PTRACK',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # 1st check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # run&recover catchup'ed instance
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+
+ # 2nd check: run verification query
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ src_pg.stop()
+ dst_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+#########################################
+# Test various corner conditions
+#########################################
+ def test_table_drop_with_delta(self):
+ """
+ Test that dropped table in source will be dropped in delta catchup'ed instance too
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question AS SELECT 42 AS answer")
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_pg.stop()
+
+ # preparation 3: make changes on master (source)
+ # perform checkpoint twice to ensure, that datafile is actually deleted on filesystem
+ src_pg.safe_psql("postgres", "DROP TABLE ultimate_question")
+ src_pg.safe_psql("postgres", "CHECKPOINT")
+ src_pg.safe_psql("postgres", "CHECKPOINT")
+
+ # do delta catchup
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # Check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_table_drop_with_ptrack(self):
+ """
+ Test that dropped table in source will be dropped in ptrack catchup'ed instance too
+ """
+ if not self.ptrack:
+ return unittest.skip('Skipped because ptrack support is disabled')
+
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ ptrack_enable = True,
+ initdb_params = ['--data-checksums']
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack")
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question AS SELECT 42 AS answer")
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_pg.stop()
+
+ # preparation 3: make changes on master (source)
+ # perform checkpoint twice to ensure, that datafile is actually deleted on filesystem
+ src_pg.safe_psql("postgres", "DROP TABLE ultimate_question")
+ src_pg.safe_psql("postgres", "CHECKPOINT")
+ src_pg.safe_psql("postgres", "CHECKPOINT")
+
+ # do ptrack catchup
+ self.catchup_node(
+ backup_mode = 'PTRACK',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # Check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_tablefile_truncation_with_delta(self):
+ """
+ Test that truncated table in source will be truncated in delta catchup'ed instance too
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE SEQUENCE t_seq; "
+ "CREATE TABLE t_heap AS SELECT i AS id, "
+ "md5(i::text) AS text, "
+ "md5(repeat(i::text, 10))::tsvector AS tsvector "
+ "FROM generate_series(0, 1024) i")
+ src_pg.safe_psql("postgres", "VACUUM t_heap")
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dest_options = {}
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_pg.stop()
+
+ # preparation 3: make changes on master (source)
+ src_pg.safe_psql("postgres", "DELETE FROM t_heap WHERE ctid >= '(11,0)'")
+ src_pg.safe_psql("postgres", "VACUUM t_heap")
+
+ # do delta catchup
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # Check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_tablefile_truncation_with_ptrack(self):
+ """
+ Test that truncated table in source will be truncated in ptrack catchup'ed instance too
+ """
+ if not self.ptrack:
+ return unittest.skip('Skipped because ptrack support is disabled')
+
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ ptrack_enable = True,
+ initdb_params = ['--data-checksums']
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack")
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE SEQUENCE t_seq; "
+ "CREATE TABLE t_heap AS SELECT i AS id, "
+ "md5(i::text) AS text, "
+ "md5(repeat(i::text, 10))::tsvector AS tsvector "
+ "FROM generate_series(0, 1024) i")
+ src_pg.safe_psql("postgres", "VACUUM t_heap")
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dest_options = {}
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_pg.stop()
+
+ # preparation 3: make changes on master (source)
+ src_pg.safe_psql("postgres", "DELETE FROM t_heap WHERE ctid >= '(11,0)'")
+ src_pg.safe_psql("postgres", "VACUUM t_heap")
+
+ # do ptrack catchup
+ self.catchup_node(
+ backup_mode = 'PTRACK',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # Check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+#########################################
+# Test reaction on user errors
+#########################################
+ def test_local_tablespace_without_mapping(self):
+ """
+ Test that we detect absence of needed --tablespace-mapping option
+ """
+ if self.remote:
+ return unittest.skip('Skipped because this test tests local catchup error handling')
+
+ src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'src'))
+ src_pg.slow_start()
+
+ tblspace_path = self.get_tblspace_path(src_pg, 'tblspace')
+ self.create_tblspace_in_node(
+ src_pg, 'tblspace',
+ tblspc_path = tblspace_path)
+
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question TABLESPACE tblspace AS SELECT 42 AS answer")
+
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ try:
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres',
+ '-p', str(src_pg.port),
+ '--stream',
+ ]
+ )
+ self.assertEqual(1, 0, "Expecting Error because '-T' parameter is not specified.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Local catchup executed, but source database contains tablespace',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_running_dest_postmaster(self):
+ """
+ Test that we detect running postmaster in destination
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+
+ # preparation 2: destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ # leave running destination postmaster
+ # so don't call dst_pg.stop()
+
+ # try delta catchup
+ try:
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.assertEqual(1, 0, "Expecting Error because postmaster in destination is running.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Postmaster with pid ',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_same_db_id(self):
+ """
+ Test that we detect different id's of source and destination
+ """
+ # preparation:
+ # source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True
+ )
+ src_pg.slow_start()
+ # destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_pg.stop()
+ # fake destination
+ fake_dst_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'fake_dst'))
+ # fake source
+ fake_src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'fake_src'))
+
+ # try delta catchup (src (with correct src conn), fake_dst)
+ try:
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = fake_dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.assertEqual(1, 0, "Expecting Error because database identifiers mismatch.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Database identifiers mismatch: ',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # try delta catchup (fake_src (with wrong src conn), dst)
+ try:
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = fake_src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.assertEqual(1, 0, "Expecting Error because database identifiers mismatch.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Database identifiers mismatch: ',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_destination_dbstate(self):
+ """
+ Test that we detect that destination pg is not cleanly shutdowned
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+
+ # preparation 2: destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # try #1
+ try:
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Destination directory contains "backup_label" file',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # try #2
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres")
+ os.kill(dst_pg.pid, signal.SIGKILL)
+ try:
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'must be stopped cleanly',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_tli_destination_mismatch(self):
+ """
+ Test that we detect TLI mismatch in destination
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+
+ # preparation 2: destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ self.set_replica(src_pg, dst_pg)
+ dst_pg.slow_start(replica = True)
+ dst_pg.promote()
+ dst_pg.stop()
+
+ # preparation 3: "useful" changes
+ src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # try catchup
+ try:
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ dst_pg.stop()
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Source is behind destination in timeline history',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_tli_source_mismatch(self):
+ """
+ Test that we detect TLI mismatch in source history
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+
+ # preparation 2: fake source (promouted copy)
+ fake_src_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'fake_src'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = fake_src_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ fake_src_options = {}
+ fake_src_options['port'] = str(fake_src_pg.port)
+ self.set_auto_conf(fake_src_pg, fake_src_options)
+ self.set_replica(src_pg, fake_src_pg)
+ fake_src_pg.slow_start(replica = True)
+ fake_src_pg.promote()
+ self.switch_wal_segment(fake_src_pg)
+ fake_src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE t_heap AS SELECT i AS id, "
+ "md5(i::text) AS text, "
+ "md5(repeat(i::text, 10))::tsvector AS tsvector "
+ "FROM generate_series(0, 256) i")
+ self.switch_wal_segment(fake_src_pg)
+ fake_src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 'trash' AS garbage")
+
+ # preparation 3: destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_pg.stop()
+
+ # preparation 4: "useful" changes
+ src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # try catchup
+ try:
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = fake_src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(fake_src_pg.port), '--stream']
+ )
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ dst_pg.stop()
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Destination is not in source timeline history',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # Cleanup
+ src_pg.stop()
+ fake_src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py
index 2e686d46c..d820360fe 100644
--- a/tests/cfs_backup.py
+++ b/tests/cfs_backup.py
@@ -35,10 +35,9 @@ def setUp(self):
self.node.slow_start()
- if self.node.major_version >= 12:
- self.node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ self.node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(self.node, tblspace_name, cfs=True)
diff --git a/tests/compatibility.py b/tests/compatibility.py
index d0fae2528..e274c22be 100644
--- a/tests/compatibility.py
+++ b/tests/compatibility.py
@@ -304,10 +304,9 @@ def test_backward_compatibility_ptrack(self):
self.set_archiving(backup_dir, 'node', node, old_binary=True)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=10)
diff --git a/tests/delete.py b/tests/delete.py
index 8ebd7d13a..345a70284 100644
--- a/tests/delete.py
+++ b/tests/delete.py
@@ -203,10 +203,9 @@ def test_delete_increment_ptrack(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- 'postgres',
- 'CREATE EXTENSION ptrack')
+ node.safe_psql(
+ 'postgres',
+ 'CREATE EXTENSION ptrack')
# full backup mode
self.backup_node(backup_dir, 'node', node)
diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out
index 1330acb5a..560b6b592 100644
--- a/tests/expected/option_version.out
+++ b/tests/expected/option_version.out
@@ -1 +1 @@
-pg_probackup 2.4.16
\ No newline at end of file
+pg_probackup 2.5.0
diff --git a/tests/false_positive.py b/tests/false_positive.py
index d4e7ccf0d..a101f8107 100644
--- a/tests/false_positive.py
+++ b/tests/false_positive.py
@@ -107,192 +107,6 @@ def test_incremental_backup_corrupt_full_1(self):
# Clean after yourself
self.del_test_dir(module_name, fname)
- @unittest.expectedFailure
- def test_ptrack_concurrent_get_and_clear_1(self):
- """make node, make full and ptrack stream backups,"
- " restore them and check data correctness"""
-
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
- node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
- set_replication=True,
- ptrack_enable=True,
- initdb_params=['--data-checksums'])
-
- self.init_pb(backup_dir)
- self.add_instance(backup_dir, 'node', node)
- self.set_archiving(backup_dir, 'node', node)
- node.slow_start()
-
- node.safe_psql(
- "postgres",
- "create table t_heap as select i"
- " as id from generate_series(0,1) i"
- )
-
- self.backup_node(backup_dir, 'node', node, options=['--stream'])
- gdb = self.backup_node(
- backup_dir, 'node', node, backup_type='ptrack',
- options=['--stream'],
- gdb=True
- )
-
- gdb.set_breakpoint('make_pagemap_from_ptrack')
- gdb.run_until_break()
-
- node.safe_psql(
- "postgres",
- "update t_heap set id = 100500")
-
- tablespace_oid = node.safe_psql(
- "postgres",
- "select oid from pg_tablespace where spcname = 'pg_default'").rstrip()
-
- relfilenode = node.safe_psql(
- "postgres",
- "select 't_heap'::regclass::oid").rstrip()
-
- node.safe_psql(
- "postgres",
- "SELECT pg_ptrack_get_and_clear({0}, {1})".format(
- tablespace_oid, relfilenode))
-
- gdb.continue_execution_until_exit()
-
- self.backup_node(
- backup_dir, 'node', node,
- backup_type='ptrack', options=['--stream']
- )
- if self.paranoia:
- pgdata = self.pgdata_content(node.data_dir)
-
- result = node.safe_psql("postgres", "SELECT * FROM t_heap")
- node.cleanup()
- self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
-
- # Physical comparison
- if self.paranoia:
- pgdata_restored = self.pgdata_content(
- node.data_dir, ignore_ptrack=False)
- self.compare_pgdata(pgdata, pgdata_restored)
-
- node.slow_start()
- # Logical comparison
- self.assertEqual(
- result,
- node.safe_psql("postgres", "SELECT * FROM t_heap"))
-
- # Clean after yourself
- self.del_test_dir(module_name, fname)
-
- @unittest.expectedFailure
- def test_ptrack_concurrent_get_and_clear_2(self):
- """make node, make full and ptrack stream backups,"
- " restore them and check data correctness"""
-
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
- node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
- set_replication=True,
- ptrack_enable=True,
- initdb_params=['--data-checksums'])
-
- self.init_pb(backup_dir)
- self.add_instance(backup_dir, 'node', node)
- self.set_archiving(backup_dir, 'node', node)
- node.slow_start()
-
- node.safe_psql(
- "postgres",
- "create table t_heap as select i"
- " as id from generate_series(0,1) i"
- )
-
- self.backup_node(backup_dir, 'node', node, options=['--stream'])
- gdb = self.backup_node(
- backup_dir, 'node', node, backup_type='ptrack',
- options=['--stream'],
- gdb=True
- )
-
- gdb.set_breakpoint('pthread_create')
- gdb.run_until_break()
-
- node.safe_psql(
- "postgres",
- "update t_heap set id = 100500")
-
- tablespace_oid = node.safe_psql(
- "postgres",
- "select oid from pg_tablespace "
- "where spcname = 'pg_default'").rstrip()
-
- relfilenode = node.safe_psql(
- "postgres",
- "select 't_heap'::regclass::oid").rstrip()
-
- node.safe_psql(
- "postgres",
- "SELECT pg_ptrack_get_and_clear({0}, {1})".format(
- tablespace_oid, relfilenode))
-
- gdb._execute("delete breakpoints")
- gdb.continue_execution_until_exit()
-
- try:
- self.backup_node(
- backup_dir, 'node', node,
- backup_type='ptrack', options=['--stream']
- )
- # we should die here because exception is what we expect to happen
- self.assertEqual(
- 1, 0,
- "Expecting Error because of LSN mismatch from ptrack_control "
- "and previous backup ptrack_lsn.\n"
- " Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
- except ProbackupException as e:
- self.assertTrue(
- 'ERROR: LSN from ptrack_control' in e.message,
- '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
- repr(e.message), self.cmd))
-
- if self.paranoia:
- pgdata = self.pgdata_content(node.data_dir)
-
- result = node.safe_psql("postgres", "SELECT * FROM t_heap")
- node.cleanup()
- self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
-
- # Physical comparison
- if self.paranoia:
- pgdata_restored = self.pgdata_content(
- node.data_dir, ignore_ptrack=False)
- self.compare_pgdata(pgdata, pgdata_restored)
-
- node.slow_start()
- # Logical comparison
- self.assertEqual(
- result,
- node.safe_psql("postgres", "SELECT * FROM t_heap")
- )
-
- # Clean after yourself
- self.del_test_dir(module_name, fname)
-
# @unittest.skip("skip")
@unittest.expectedFailure
def test_pg_10_waldir(self):
diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py
index bf84f266e..1de004250 100644
--- a/tests/helpers/ptrack_helpers.py
+++ b/tests/helpers/ptrack_helpers.py
@@ -316,6 +316,12 @@ def __init__(self, *args, **kwargs):
os.environ["PGAPPNAME"] = "pg_probackup"
+ if self.ptrack:
+ self.assertGreaterEqual(
+ self.pg_config_version,
+ self.version_to_num('11.0'),
+ "ptrack testing require PostgreSQL >= 11")
+
@property
def pg_config_version(self):
return self.version_to_num(
@@ -339,14 +345,9 @@ def pg_config_version(self):
# print('PGPROBACKUP_SSH_USER is not set')
# exit(1)
- def make_simple_node(
+ def make_empty_node(
self,
- base_dir=None,
- set_replication=False,
- ptrack_enable=False,
- initdb_params=[],
- pg_options={}):
-
+ base_dir=None):
real_base_dir = os.path.join(self.tmp_path, base_dir)
shutil.rmtree(real_base_dir, ignore_errors=True)
os.makedirs(real_base_dir)
@@ -355,6 +356,17 @@ def make_simple_node(
# bound method slow_start() to 'node' class instance
node.slow_start = slow_start.__get__(node)
node.should_rm_dirs = True
+ return node
+
+ def make_simple_node(
+ self,
+ base_dir=None,
+ set_replication=False,
+ ptrack_enable=False,
+ initdb_params=[],
+ pg_options={}):
+
+ node = self.make_empty_node(base_dir)
node.init(
initdb_params=initdb_params, allow_streaming=set_replication)
@@ -386,11 +398,8 @@ def make_simple_node(
options['max_wal_senders'] = 10
if ptrack_enable:
- if node.major_version >= 11:
- options['ptrack.map_size'] = '128'
- options['shared_preload_libraries'] = 'ptrack'
- else:
- options['ptrack_enable'] = 'on'
+ options['ptrack.map_size'] = '128'
+ options['shared_preload_libraries'] = 'ptrack'
if node.major_version >= 13:
options['wal_keep_size'] = '200MB'
@@ -410,6 +419,59 @@ def make_simple_node(
self.set_auto_conf(
node, {}, 'postgresql.conf', ['wal_keep_segments'])
return node
+
+ def simple_bootstrap(self, node, role) -> None:
+
+ node.safe_psql(
+ 'postgres',
+ 'CREATE ROLE {0} WITH LOGIN REPLICATION'.format(role))
+
+ # PG 9.5
+ if self.get_version(node) < 90600:
+ node.safe_psql(
+ 'postgres',
+ 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0};'.format(role))
+ # PG 9.6
+ elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
+ node.safe_psql(
+ 'postgres',
+ 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role))
+ # >= 10
+ else:
+ node.safe_psql(
+ 'postgres',
+ 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; '
+ 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role))
def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False):
res = node.execute(
@@ -567,9 +629,6 @@ def get_ptrack_bits_per_page_for_fork(self, node, file, size=[]):
return ptrack_bits_for_fork
def check_ptrack_map_sanity(self, node, idx_ptrack):
- if node.major_version >= 12:
- return
-
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
@@ -983,6 +1042,28 @@ def restore_node(
return self.run_pb(cmd_list + options, gdb=gdb, old_binary=old_binary)
+ def catchup_node(
+ self,
+ backup_mode, source_pgdata, destination_node,
+ options = []
+ ):
+
+ cmd_list = [
+ 'catchup',
+ '--backup-mode={0}'.format(backup_mode),
+ '--source-pgdata={0}'.format(source_pgdata),
+ '--destination-pgdata={0}'.format(destination_node.data_dir)
+ ]
+ if self.remote:
+ cmd_list += ['--remote-proto=ssh', '--remote-host=localhost']
+ if self.verbose:
+ cmd_list += [
+ '--log-level-file=VERBOSE',
+ '--log-directory={0}'.format(destination_node.logs_dir)
+ ]
+
+ return self.run_pb(cmd_list + options)
+
def show_pb(
self, backup_dir, instance=None, backup_id=None,
options=[], as_text=False, as_json=True, old_binary=False,
@@ -1683,10 +1764,10 @@ def compare_pgdata(self, original_pgdata, restored_pgdata):
):
fail = True
error_message += '\nFile permissions mismatch:\n'
- error_message += ' File_old: {0} Permissions: {1}\n'.format(
+ error_message += ' File_old: {0} Permissions: {1:o}\n'.format(
os.path.join(original_pgdata['pgdata'], file),
original_pgdata['files'][file]['mode'])
- error_message += ' File_new: {0} Permissions: {1}\n'.format(
+ error_message += ' File_new: {0} Permissions: {1:o}\n'.format(
os.path.join(restored_pgdata['pgdata'], file),
restored_pgdata['files'][file]['mode'])
diff --git a/tests/merge.py b/tests/merge.py
index 668691fc8..fe0927f49 100644
--- a/tests/merge.py
+++ b/tests/merge.py
@@ -811,10 +811,9 @@ def test_merge_ptrack_truncate(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
diff --git a/tests/ptrack.py b/tests/ptrack.py
index aa0bbadc1..bcc8dc20a 100644
--- a/tests/ptrack.py
+++ b/tests/ptrack.py
@@ -14,6 +14,140 @@
class PtrackTest(ProbackupTest, unittest.TestCase):
+ def setUp(self):
+ if self.pg_config_version < self.version_to_num('11.0'):
+ return unittest.skip('You need PostgreSQL >= 11 for this test')
+ self.fname = self.id().split('.')[3]
+
+ # @unittest.skip("skip")
+ def test_drop_rel_during_backup_ptrack(self):
+ """
+ drop relation during ptrack backup
+ """
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, self.fname, 'node'),
+ set_replication=True,
+ ptrack_enable=self.ptrack,
+ initdb_params=['--data-checksums'])
+
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ self.set_archiving(backup_dir, 'node', node)
+ node.slow_start()
+
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
+
+ node.safe_psql(
+ "postgres",
+ "create table t_heap as select i"
+ " as id from generate_series(0,100) i")
+
+ relative_path = node.safe_psql(
+ "postgres",
+ "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
+
+ absolute_path = os.path.join(node.data_dir, relative_path)
+
+ # FULL backup
+ self.backup_node(backup_dir, 'node', node, options=['--stream'])
+
+ # PTRACK backup
+ gdb = self.backup_node(
+ backup_dir, 'node', node, backup_type='ptrack',
+ gdb=True, options=['--log-level-file=LOG'])
+
+ gdb.set_breakpoint('backup_files')
+ gdb.run_until_break()
+
+ # REMOVE file
+ os.remove(absolute_path)
+
+ # File removed, we can proceed with backup
+ gdb.continue_execution_until_exit()
+
+ pgdata = self.pgdata_content(node.data_dir)
+
+ with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
+ log_content = f.read()
+ self.assertTrue(
+ 'LOG: File not found: "{0}"'.format(absolute_path) in log_content,
+ 'File "{0}" should be deleted but it`s not'.format(absolute_path))
+
+ node.cleanup()
+ self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
+
+ # Physical comparison
+ pgdata_restored = self.pgdata_content(node.data_dir)
+ self.compare_pgdata(pgdata, pgdata_restored)
+
+ # Clean after yourself
+ self.del_test_dir(module_name, self.fname)
+
+ # @unittest.skip("skip")
+ def test_ptrack_without_full(self):
+ """ptrack backup without validated full backup"""
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, self.fname, 'node'),
+ initdb_params=['--data-checksums'],
+ ptrack_enable=True)
+
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ self.set_archiving(backup_dir, 'node', node)
+ node.slow_start()
+
+ try:
+ self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
+ # we should die here because exception is what we expect to happen
+ self.assertEqual(
+ 1, 0,
+ "Expecting Error because page backup should not be possible "
+ "without valid full backup.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertTrue(
+ "WARNING: Valid full backup on current timeline 1 is not found" in e.message and
+ "ERROR: Create new full backup before an incremental one" in e.message,
+ "\n Unexpected Error Message: {0}\n CMD: {1}".format(
+ repr(e.message), self.cmd))
+
+ self.assertEqual(
+ self.show_pb(backup_dir, 'node')[0]['status'],
+ "ERROR")
+
+ # Clean after yourself
+ self.del_test_dir(module_name, self.fname)
+
+ # @unittest.skip("skip")
+ def test_ptrack_threads(self):
+ """ptrack multi thread backup mode"""
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, self.fname, 'node'),
+ initdb_params=['--data-checksums'],
+ ptrack_enable=True)
+
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ self.set_archiving(backup_dir, 'node', node)
+ node.slow_start()
+
+ self.backup_node(
+ backup_dir, 'node', node,
+ backup_type="full", options=["-j", "4"])
+ self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
+
+ self.backup_node(
+ backup_dir, 'node', node,
+ backup_type="ptrack", options=["-j", "4"])
+ self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
+
+ # Clean after yourself
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_stop_pg(self):
@@ -22,10 +156,9 @@ def test_ptrack_stop_pg(self):
restart node, check that ptrack backup
can be taken
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -34,10 +167,9 @@ def test_ptrack_stop_pg(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=1)
@@ -52,7 +184,7 @@ def test_ptrack_stop_pg(self):
backup_type='ptrack', options=['--stream'])
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_multi_timeline_backup(self):
@@ -60,10 +192,9 @@ def test_ptrack_multi_timeline_backup(self):
t2 /------P2
t1 ------F---*-----P1
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -73,10 +204,9 @@ def test_ptrack_multi_timeline_backup(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=5)
@@ -130,7 +260,7 @@ def test_ptrack_multi_timeline_backup(self):
self.assertEqual('0', balance)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_multi_timeline_backup_1(self):
@@ -142,10 +272,9 @@ def test_ptrack_multi_timeline_backup_1(self):
t2 /------P2
t1 ---F--------*
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -155,10 +284,9 @@ def test_ptrack_multi_timeline_backup_1(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=5)
@@ -206,17 +334,16 @@ def test_ptrack_multi_timeline_backup_1(self):
self.assertEqual('0', balance)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_eat_my_data(self):
"""
PGPRO-4051
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -226,17 +353,16 @@ def test_ptrack_eat_my_data(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=50)
self.backup_node(backup_dir, 'node', node)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum'])
@@ -287,16 +413,15 @@ def test_ptrack_eat_my_data(self):
'Data loss')
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_simple(self):
"""make node, make full and ptrack stream backups,"
" restore them and check data correctness"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -305,10 +430,9 @@ def test_ptrack_simple(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.backup_node(backup_dir, 'node', node, options=['--stream'])
@@ -335,7 +459,7 @@ def test_ptrack_simple(self):
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -358,15 +482,14 @@ def test_ptrack_simple(self):
node_restored.safe_psql("postgres", "SELECT * FROM t_heap"))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_unprivileged(self):
""""""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -483,32 +606,15 @@ def test_ptrack_unprivileged(self):
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
)
- if node.major_version < 11:
- fnames = [
- 'pg_catalog.oideq(oid, oid)',
- 'pg_catalog.ptrack_version()',
- 'pg_catalog.pg_ptrack_clear()',
- 'pg_catalog.pg_ptrack_control_lsn()',
- 'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)',
- 'pg_catalog.pg_ptrack_get_and_clear(oid, oid)',
- 'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)'
- ]
-
- for fname in fnames:
- node.safe_psql(
- "backupdb",
- "GRANT EXECUTE ON FUNCTION {0} TO backup".format(fname))
-
- else:
- node.safe_psql(
- "backupdb",
- "CREATE SCHEMA ptrack")
- node.safe_psql(
- "backupdb",
- "CREATE EXTENSION ptrack WITH SCHEMA ptrack")
- node.safe_psql(
- "backupdb",
- "GRANT USAGE ON SCHEMA ptrack TO backup")
+ node.safe_psql(
+ "backupdb",
+ "CREATE SCHEMA ptrack")
+ node.safe_psql(
+ "backupdb",
+ "CREATE EXTENSION ptrack WITH SCHEMA ptrack")
+ node.safe_psql(
+ "backupdb",
+ "GRANT USAGE ON SCHEMA ptrack TO backup")
node.safe_psql(
"backupdb",
@@ -536,10 +642,9 @@ def test_ptrack_unprivileged(self):
# @unittest.expectedFailure
def test_ptrack_enable(self):
"""make ptrack without full backup, should result in error"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s',
@@ -549,10 +654,9 @@ def test_ptrack_enable(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# PTRACK BACKUP
try:
@@ -577,7 +681,7 @@ def test_ptrack_enable(self):
)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
@@ -587,10 +691,9 @@ def test_ptrack_disable(self):
enable ptrack, restart postgresql, take ptrack backup
which should fail
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -600,28 +703,21 @@ def test_ptrack_disable(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
self.backup_node(backup_dir, 'node', node, options=['--stream'])
# DISABLE PTRACK
- if node.major_version >= 11:
- node.safe_psql('postgres', "alter system set ptrack.map_size to 0")
- else:
- node.safe_psql('postgres', "alter system set ptrack_enable to off")
+ node.safe_psql('postgres', "alter system set ptrack.map_size to 0")
node.stop()
node.slow_start()
# ENABLE PTRACK
- if node.major_version >= 11:
- node.safe_psql('postgres', "alter system set ptrack.map_size to '128'")
- node.safe_psql('postgres', "alter system set shared_preload_libraries to 'ptrack'")
- else:
- node.safe_psql('postgres', "alter system set ptrack_enable to on")
+ node.safe_psql('postgres', "alter system set ptrack.map_size to '128'")
+ node.safe_psql('postgres', "alter system set shared_preload_libraries to 'ptrack'")
node.stop()
node.slow_start()
@@ -650,15 +746,14 @@ def test_ptrack_disable(self):
)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_uncommitted_xact(self):
"""make ptrack backup while there is uncommitted open transaction"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -669,10 +764,9 @@ def test_ptrack_uncommitted_xact(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.backup_node(backup_dir, 'node', node, options=['--stream'])
@@ -689,7 +783,7 @@ def test_ptrack_uncommitted_xact(self):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -710,16 +804,15 @@ def test_ptrack_uncommitted_xact(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_full(self):
"""make node, make full and ptrack stream backups,
restore them and check data correctness"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -730,10 +823,9 @@ def test_ptrack_vacuum_full(self):
self.create_tblspace_in_node(node, 'somedata')
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.backup_node(backup_dir, 'node', node, options=['--stream'])
@@ -773,7 +865,7 @@ def test_ptrack_vacuum_full(self):
process.join()
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
old_tablespace = self.get_tblspace_path(node, 'somedata')
@@ -797,7 +889,7 @@ def test_ptrack_vacuum_full(self):
node_restored.slow_start()
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_truncate(self):
@@ -805,10 +897,9 @@ def test_ptrack_vacuum_truncate(self):
delete last 3 pages, vacuum relation,
take ptrack backup, take second ptrack backup,
restore last ptrack backup and check data correctness"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -819,10 +910,9 @@ def test_ptrack_vacuum_truncate(self):
self.create_tblspace_in_node(node, 'somedata')
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.safe_psql(
"postgres",
@@ -856,7 +946,7 @@ def test_ptrack_vacuum_truncate(self):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
old_tablespace = self.get_tblspace_path(node, 'somedata')
@@ -882,16 +972,17 @@ def test_ptrack_vacuum_truncate(self):
node_restored.slow_start()
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_get_block(self):
- """make node, make full and ptrack stream backups,"
- " restore them and check data correctness"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ """
+ make node, make full and ptrack stream backups,
+ restore them and check data correctness
+ """
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -900,11 +991,9 @@ def test_ptrack_get_block(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- self.skipTest("skip --- we do not need ptrack_get_block for ptrack 2.*")
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.safe_psql(
"postgres",
@@ -917,10 +1006,7 @@ def test_ptrack_get_block(self):
options=['--stream'],
gdb=True)
- if node.major_version > 11:
- gdb.set_breakpoint('make_pagemap_from_ptrack_2')
- else:
- gdb.set_breakpoint('make_pagemap_from_ptrack_1')
+ gdb.set_breakpoint('make_pagemap_from_ptrack_2')
gdb.run_until_break()
node.safe_psql(
@@ -950,21 +1036,18 @@ def test_ptrack_get_block(self):
# Logical comparison
self.assertEqual(
result,
- node.safe_psql("postgres", "SELECT * FROM t_heap")
- )
+ node.safe_psql("postgres", "SELECT * FROM t_heap"))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_stream(self):
"""make node, make full and ptrack stream backups,
restore them and check data correctness"""
- self.maxDiff = None
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -975,10 +1058,9 @@ def test_ptrack_stream(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
node.safe_psql("postgres", "create sequence t_seq")
@@ -1045,17 +1127,15 @@ def test_ptrack_stream(self):
self.assertEqual(ptrack_result, ptrack_result_new)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_archive(self):
"""make archive node, make full and ptrack backups,
check data correctness in restored instance"""
- self.maxDiff = None
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1067,10 +1147,9 @@ def test_ptrack_archive(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
node.safe_psql(
@@ -1158,20 +1237,18 @@ def test_ptrack_archive(self):
node.cleanup()
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
+ @unittest.skip("skip")
def test_ptrack_pgpro417(self):
- """Make node, take full backup, take ptrack backup,
- delete ptrack backup. Try to take ptrack backup,
- which should fail. Actual only for PTRACK 1.x"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ """
+ Make node, take full backup, take ptrack backup,
+ delete ptrack backup. Try to take ptrack backup,
+ which should fail. Actual only for PTRACK 1.x
+ """
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1238,22 +1315,18 @@ def test_ptrack_pgpro417(self):
repr(e.message), self.cmd))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
+ @unittest.skip("skip")
def test_page_pgpro417(self):
"""
Make archive node, take full backup, take page backup,
delete page backup. Try to take ptrack backup, which should fail.
Actual only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1308,22 +1381,18 @@ def test_page_pgpro417(self):
repr(e.message), self.cmd))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
+ @unittest.skip("skip")
def test_full_pgpro417(self):
"""
Make node, take two full backups, delete full second backup.
Try to take ptrack backup, which should fail.
Relevant only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1384,7 +1453,7 @@ def test_full_pgpro417(self):
repr(e.message), self.cmd))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_create_db(self):
@@ -1392,10 +1461,9 @@ def test_create_db(self):
Make node, take full backup, create database db1, take ptrack backup,
restore database and check it presense
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1406,10 +1474,9 @@ def test_create_db(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
node.safe_psql(
@@ -1439,7 +1506,7 @@ def test_create_db(self):
# RESTORE
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -1501,7 +1568,7 @@ def test_create_db(self):
repr(e.message), self.cmd))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_create_db_on_replica(self):
@@ -1511,10 +1578,9 @@ def test_create_db_on_replica(self):
create database db1, take ptrack backup from replica,
restore database and check it presense
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1525,10 +1591,9 @@ def test_create_db_on_replica(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
node.safe_psql(
@@ -1537,7 +1602,7 @@ def test_create_db_on_replica(self):
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.backup_node(
@@ -1590,7 +1655,7 @@ def test_create_db_on_replica(self):
# RESTORE
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -1604,16 +1669,15 @@ def test_create_db_on_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_alter_table_set_tablespace_ptrack(self):
"""Make node, create tablespace with table, take full backup,
alter tablespace location, take ptrack backup, restore database."""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1624,10 +1688,9 @@ def test_alter_table_set_tablespace_ptrack(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
self.create_tblspace_in_node(node, 'somedata')
@@ -1661,7 +1724,7 @@ def test_alter_table_set_tablespace_ptrack(self):
# RESTORE
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -1696,17 +1759,16 @@ def test_alter_table_set_tablespace_ptrack(self):
# self.assertEqual(result, result_new, 'lost some data after restore')
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_alter_database_set_tablespace_ptrack(self):
"""Make node, create tablespace with database,"
" take full backup, alter tablespace location,"
" take ptrack backup, restore database."""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1717,10 +1779,9 @@ def test_alter_database_set_tablespace_ptrack(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
self.backup_node(backup_dir, 'node', node, options=["--stream"])
@@ -1744,7 +1805,7 @@ def test_alter_database_set_tablespace_ptrack(self):
# RESTORE
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
backup_dir, 'node',
@@ -1766,7 +1827,7 @@ def test_alter_database_set_tablespace_ptrack(self):
node_restored.slow_start()
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_drop_tablespace(self):
@@ -1774,10 +1835,9 @@ def test_drop_tablespace(self):
Make node, create table, alter table tablespace, take ptrack backup,
move table from tablespace, take ptrack backup
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1788,10 +1848,9 @@ def test_drop_tablespace(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -1862,7 +1921,7 @@ def test_drop_tablespace(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_alter_tablespace(self):
@@ -1870,10 +1929,9 @@ def test_ptrack_alter_tablespace(self):
Make node, create table, alter table tablespace, take ptrack backup,
move table from tablespace, take ptrack backup
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1884,10 +1942,9 @@ def test_ptrack_alter_tablespace(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
tblspc_path = self.get_tblspace_path(node, 'somedata')
@@ -1920,7 +1977,7 @@ def test_ptrack_alter_tablespace(self):
# Restore ptrack backup
restored_node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'restored_node'))
+ base_dir=os.path.join(module_name, self.fname, 'restored_node'))
restored_node.cleanup()
tblspc_path_new = self.get_tblspace_path(
restored_node, 'somedata_restored')
@@ -1979,7 +2036,7 @@ def test_ptrack_alter_tablespace(self):
self.assertEqual(result, result_new)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_multiple_segments(self):
@@ -1987,10 +2044,9 @@ def test_ptrack_multiple_segments(self):
Make node, create table, alter table tablespace,
take ptrack backup, move table from tablespace, take ptrack backup
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -2001,10 +2057,9 @@ def test_ptrack_multiple_segments(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -2056,7 +2111,7 @@ def test_ptrack_multiple_segments(self):
# RESTORE NODE
restored_node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'restored_node'))
+ base_dir=os.path.join(module_name, self.fname, 'restored_node'))
restored_node.cleanup()
tblspc_path = self.get_tblspace_path(node, 'somedata')
tblspc_path_new = self.get_tblspace_path(
@@ -2090,28 +2145,23 @@ def test_ptrack_multiple_segments(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
- # @unittest.expectedFailure
+ @unittest.skip("skip")
def test_atexit_fail(self):
"""
Take backups of every available types and check that PTRACK is clean.
Relevant only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
pg_options={
'max_connections': '15'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -2147,26 +2197,22 @@ def test_atexit_fail(self):
"f")
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
+ @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean(self):
"""
Take backups of every available types and check that PTRACK is clean
Relevant only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -2259,29 +2305,24 @@ def test_ptrack_clean(self):
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
- # @unittest.expectedFailure
+ @unittest.skip("skip")
def test_ptrack_clean_replica(self):
"""
Take backups of every available types from
master and check that PTRACK on replica is clean.
Relevant only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
pg_options={
'archive_timeout': '30s'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -2289,7 +2330,7 @@ def test_ptrack_clean_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -2402,26 +2443,24 @@ def test_ptrack_clean_replica(self):
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_cluster_on_btree(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -2467,26 +2506,24 @@ def test_ptrack_cluster_on_btree(self):
self.check_ptrack_map_sanity(node, idx_ptrack)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_gist(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# Create table and indexes
node.safe_psql(
@@ -2540,18 +2577,17 @@ def test_ptrack_cluster_on_gist(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_btree_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -2564,7 +2600,7 @@ def test_ptrack_cluster_on_btree_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -2628,7 +2664,7 @@ def test_ptrack_cluster_on_btree_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'))
+ base_dir=os.path.join(module_name, self.fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', node)
@@ -2637,17 +2673,16 @@ def test_ptrack_cluster_on_btree_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_gist_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True)
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -2660,7 +2695,7 @@ def test_ptrack_cluster_on_gist_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -2730,7 +2765,7 @@ def test_ptrack_cluster_on_gist_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'))
+ base_dir=os.path.join(module_name, self.fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', node)
@@ -2740,28 +2775,26 @@ def test_ptrack_cluster_on_gist_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_empty(self):
"""Take backups of every available types and check that PTRACK is clean"""
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -2792,7 +2825,7 @@ def test_ptrack_empty(self):
node.safe_psql('postgres', 'checkpoint')
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
tblspace1 = self.get_tblspace_path(node, 'somedata')
@@ -2818,7 +2851,7 @@ def test_ptrack_empty(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
@@ -2827,14 +2860,13 @@ def test_ptrack_empty_replica(self):
Take backups of every available types from master
and check that PTRACK on replica is clean
"""
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
ptrack_enable=True)
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -2847,7 +2879,7 @@ def test_ptrack_empty_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -2903,7 +2935,7 @@ def test_ptrack_empty_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -2915,27 +2947,25 @@ def test_ptrack_empty_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_truncate(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -2998,13 +3028,12 @@ def test_ptrack_truncate(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_basic_ptrack_truncate_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -3013,7 +3042,7 @@ def test_basic_ptrack_truncate_replica(self):
'archive_timeout': '10s',
'checkpoint_timeout': '5min'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3026,7 +3055,7 @@ def test_basic_ptrack_truncate_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3108,7 +3137,7 @@ def test_basic_ptrack_truncate_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'))
+ base_dir=os.path.join(module_name, self.fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir)
@@ -3127,27 +3156,25 @@ def test_basic_ptrack_truncate_replica(self):
'select 1')
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -3215,20 +3242,19 @@ def test_ptrack_vacuum(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3241,7 +3267,7 @@ def test_ptrack_vacuum_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3314,7 +3340,7 @@ def test_ptrack_vacuum_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'))
+ base_dir=os.path.join(module_name, self.fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir)
@@ -3323,27 +3349,25 @@ def test_ptrack_vacuum_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_bits_frozen(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -3403,18 +3427,17 @@ def test_ptrack_vacuum_bits_frozen(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_bits_frozen_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3427,7 +3450,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3502,27 +3525,25 @@ def test_ptrack_vacuum_bits_frozen_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_bits_visibility(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -3582,26 +3603,24 @@ def test_ptrack_vacuum_bits_visibility(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True)
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
self.create_tblspace_in_node(node, 'somedata')
@@ -3661,19 +3680,18 @@ def test_ptrack_vacuum_full(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3685,7 +3703,7 @@ def test_ptrack_vacuum_full_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3763,27 +3781,25 @@ def test_ptrack_vacuum_full_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# Create table and indexes
res = node.safe_psql(
@@ -3832,7 +3848,7 @@ def test_ptrack_vacuum_truncate(self):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(backup_dir, 'node', node_restored)
@@ -3841,19 +3857,18 @@ def test_ptrack_vacuum_truncate(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3866,7 +3881,7 @@ def test_ptrack_vacuum_truncate_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3938,7 +3953,7 @@ def test_ptrack_vacuum_truncate_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(backup_dir, 'replica', node_restored)
@@ -3947,22 +3962,21 @@ def test_ptrack_vacuum_truncate_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
- # @unittest.expectedFailure
+ @unittest.skip("skip")
def test_ptrack_recovery(self):
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
+ """
+ Check that ptrack map contain correct bits after recovery.
+ Actual only for PTRACK 1.x
+ """
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -4009,17 +4023,13 @@ def test_ptrack_recovery(self):
self.check_ptrack_recovery(idx_ptrack[i])
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_recovery_1(self):
- if self.pg_config_version < self.version_to_num('12.0'):
- return unittest.skip('You need PostgreSQL >= 12 for this test')
-
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -4027,7 +4037,7 @@ def test_ptrack_recovery_1(self):
'shared_buffers': '512MB',
'max_wal_size': '3GB'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -4067,9 +4077,9 @@ def test_ptrack_recovery_1(self):
'postgres',
"create extension pg_buffercache")
- print(node.safe_psql(
- 'postgres',
- "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
+ #print(node.safe_psql(
+ # 'postgres',
+ # "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
if self.verbose:
print('Killing postmaster. Losing Ptrack changes')
@@ -4088,7 +4098,7 @@ def test_ptrack_recovery_1(self):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -4098,27 +4108,25 @@ def test_ptrack_recovery_1(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_zero_changes(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# Create table
node.safe_psql(
@@ -4144,14 +4152,13 @@ def test_ptrack_zero_changes(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_pg_resetxlog(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -4159,15 +4166,14 @@ def test_ptrack_pg_resetxlog(self):
'shared_buffers': '512MB',
'max_wal_size': '3GB'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# Create table
node.safe_psql(
@@ -4259,7 +4265,7 @@ def test_ptrack_pg_resetxlog(self):
# pgdata = self.pgdata_content(node.data_dir)
#
# node_restored = self.make_simple_node(
-# base_dir=os.path.join(module_name, fname, 'node_restored'))
+# base_dir=os.path.join(module_name, self.fname, 'node_restored'))
# node_restored.cleanup()
#
# self.restore_node(
@@ -4269,31 +4275,25 @@ def test_ptrack_pg_resetxlog(self):
# self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_corrupt_ptrack_map(self):
-
- if self.pg_config_version < self.version_to_num('12.0'):
- return unittest.skip('You need PostgreSQL >= 12 for this test')
-
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# Create table
node.safe_psql(
@@ -4388,11 +4388,8 @@ def test_corrupt_ptrack_map(self):
node.stop(['-m', 'immediate', '-D', node.data_dir])
self.set_auto_conf(node, {'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'})
-
node.slow_start()
- sleep(1)
-
try:
self.backup_node(
backup_dir, 'node', node,
@@ -4410,8 +4407,6 @@ def test_corrupt_ptrack_map(self):
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(e.message), self.cmd))
- sleep(1)
-
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--stream'])
@@ -4435,4 +4430,73 @@ def test_corrupt_ptrack_map(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
+
+ # @unittest.skip("skip")
+ def test_horizon_lsn_ptrack(self):
+ """
+ https://github.com/postgrespro/pg_probackup/pull/386
+ """
+ self.assertLessEqual(
+ self.version_to_num(self.old_probackup_version),
+ self.version_to_num('2.4.15'),
+ 'You need pg_probackup old_binary =< 2.4.15 for this test')
+
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, self.fname, 'node'),
+ set_replication=True,
+ ptrack_enable=True,
+ initdb_params=['--data-checksums'])
+
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ node.slow_start()
+
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
+
+ # TODO: ptrack version must be 2.1
+ ptrack_version = node.safe_psql(
+ "postgres",
+ "SELECT extversion "
+ "FROM pg_catalog.pg_extension WHERE extname = 'ptrack'").decode('utf-8').rstrip()
+
+ self.assertEqual(
+ ptrack_version,
+ "2.1",
+ "You need ptrack 2.1 for this test")
+
+ # set map_size to a minimal value
+ self.set_auto_conf(node, {'ptrack.map_size': '1'})
+ node.restart()
+
+ node.pgbench_init(scale=100)
+
+ # FULL backup
+ full_id = self.backup_node(backup_dir, 'node', node, options=['--stream'], old_binary=True)
+
+ # enable archiving so the WAL size to do interfere with data bytes comparison later
+ self.set_archiving(backup_dir, 'node', node)
+ node.restart()
+
+ # change data
+ pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
+ pgbench.wait()
+
+ # DELTA is exemplar
+ delta_id = self.backup_node(
+ backup_dir, 'node', node, backup_type='delta')
+ delta_bytes = self.show_pb(backup_dir, 'node', backup_id=delta_id)["data-bytes"]
+ self.delete_pb(backup_dir, 'node', backup_id=delta_id)
+
+ # PTRACK with current binary
+ ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
+ ptrack_bytes = self.show_pb(backup_dir, 'node', backup_id=ptrack_id)["data-bytes"]
+
+ # make sure that backup size is exactly the same
+ self.assertEqual(delta_bytes, ptrack_bytes)
+
+ # Clean after yourself
+ self.del_test_dir(module_name, self.fname)
diff --git a/tests/replica.py b/tests/replica.py
index d59b11dbf..2cd32e70c 100644
--- a/tests/replica.py
+++ b/tests/replica.py
@@ -149,7 +149,7 @@ def test_replica_stream_ptrack_backup(self):
# to original data
master.psql(
"postgres",
- "insert into t_heap as select i as id, md5(i::text) as text, "
+ "insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(256,512) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
@@ -185,7 +185,7 @@ def test_replica_stream_ptrack_backup(self):
# to original data
master.psql(
"postgres",
- "insert into t_heap as select i as id, md5(i::text) as text, "
+ "insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,768) i")
@@ -279,7 +279,7 @@ def test_replica_archive_page_backup(self):
# equal to original data
master.psql(
"postgres",
- "insert into t_heap as select i as id, md5(i::text) as text, "
+ "insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(256,25120) i")
diff --git a/tests/restore.py b/tests/restore.py
index 8ccffa44c..a76272b12 100644
--- a/tests/restore.py
+++ b/tests/restore.py
@@ -512,10 +512,9 @@ def test_restore_full_ptrack_archive(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=2)
@@ -567,10 +566,9 @@ def test_restore_ptrack(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=2)
@@ -630,10 +628,9 @@ def test_restore_full_ptrack_stream(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=2)
@@ -689,10 +686,9 @@ def test_restore_full_ptrack_under_load(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.pgbench_init(scale=2)
@@ -759,10 +755,9 @@ def test_restore_full_under_load_ptrack(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 12:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# wal_segment_size = self.guc_wal_segment_size(node)
node.pgbench_init(scale=2)
@@ -3298,32 +3293,15 @@ def test_missing_database_map(self):
)
if self.ptrack:
- fnames = []
- if node.major_version < 12:
- fnames += [
- 'pg_catalog.oideq(oid, oid)',
- 'pg_catalog.ptrack_version()',
- 'pg_catalog.pg_ptrack_clear()',
- 'pg_catalog.pg_ptrack_control_lsn()',
- 'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)',
- 'pg_catalog.pg_ptrack_get_and_clear(oid, oid)',
- 'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)'
- ]
- else:
- # TODO why backup works without these grants ?
-# fnames += [
-# 'pg_ptrack_get_pagemapset(pg_lsn)',
-# 'pg_ptrack_control_lsn()',
-# 'pg_ptrack_get_block(oid, oid, oid, bigint)'
-# ]
- node.safe_psql(
- "backupdb",
- "CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
-
- for fname in fnames:
- node.safe_psql(
- "backupdb",
- "GRANT EXECUTE ON FUNCTION {0} TO backup".format(fname))
+ # TODO why backup works without these grants ?
+ # 'pg_ptrack_get_pagemapset(pg_lsn)',
+ # 'pg_ptrack_control_lsn()',
+ # because PUBLIC
+ node.safe_psql(
+ "backupdb",
+ "CREATE SCHEMA ptrack; "
+ "GRANT USAGE ON SCHEMA ptrack TO backup; "
+ "CREATE EXTENSION ptrack WITH SCHEMA ptrack")
if ProbackupTest.enterprise:
node.safe_psql(
diff --git a/tests/show.py b/tests/show.py
index 2a13a768b..e1fb9c0dc 100644
--- a/tests/show.py
+++ b/tests/show.py
@@ -535,3 +535,39 @@ def test_corrupt_correctness_2(self):
# Clean after yourself
self.del_test_dir(module_name, fname)
+
+ # @unittest.skip("skip")
+ # @unittest.expectedFailure
+ def test_color_with_no_terminal(self):
+ """backup.control contains invalid option"""
+ fname = self.id().split('.')[3]
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ initdb_params=['--data-checksums'],
+ pg_options={'autovacuum': 'off'})
+
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ node.slow_start()
+
+ node.pgbench_init(scale=1)
+
+ # FULL
+ try:
+ self.backup_node(
+ backup_dir, 'node', node, options=['--archive-timeout=1s'])
+ # we should die here because exception is what we expect to happen
+ self.assertEqual(
+ 1, 0,
+ "Expecting Error because archiving is disabled\n "
+ "Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertNotIn(
+ '[0m', e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
+ repr(e.message), self.cmd))
+
+ # Clean after yourself
+ self.del_test_dir(module_name, fname)