diff --git a/.travis.yml b/.travis.yml
index b6b8fd217..873dd8f20 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -26,24 +26,26 @@ notifications:
# Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON
env:
- - PG_VERSION=14 PG_BRANCH=REL_14_STABLE
- - PG_VERSION=13 PG_BRANCH=REL_13_STABLE
- - PG_VERSION=12 PG_BRANCH=REL_12_STABLE
- - PG_VERSION=11 PG_BRANCH=REL_11_STABLE
+ - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_VERSION=13
+ - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_VERSION=13
+ - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13
+ - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_VERSION=12
+ - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_VERSION=11
- PG_VERSION=10 PG_BRANCH=REL_10_STABLE
- PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE
- PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=archive
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=backup
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=compression
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=delta
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=locking
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=merge
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=page
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=replica
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=retention
-# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE MODE=restore
- - PG_VERSION=15 PG_BRANCH=master
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=archive
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=backup
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=catchup
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=compression
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=delta
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=locking
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=merge
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=page
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=ptrack
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=replica
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=off MODE=retention
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_VERSION=13 MODE=restore
jobs:
allow_failures:
@@ -54,3 +56,4 @@ jobs:
#branches:
# only:
# - master
+
diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml
index f7814c2d2..7178cb14c 100644
--- a/doc/pgprobackup.xml
+++ b/doc/pgprobackup.xml
@@ -3409,16 +3409,29 @@ pg_probackup delete -B backup_dir --instance
- Cloning PostgreSQL Instance
+ Cloning and Synchronizing PostgreSQL Instance
pg_probackup can create a copy of a PostgreSQL
- instance directly, without using the backup catalog. This allows you
- to add a new standby server in a parallel mode or to have a standby
- server that has fallen behind catch up
with master.
+ instance directly, without using the backup catalog. To do this, you can run the command.
+ It can be useful in the following cases:
+
+
+ To add a new standby server.
+ Usually, pg_basebackup
+ is used to create a copy of a PostgreSQL instance. If the data directory of the destination instance
+ is empty, the catchup command works similarly, but it can be faster if run in parallel mode.
+
+
+ To have a fallen-behind standby server catch up
with master.
+ Under high write load, replicas may fail to replay WAL fast enough to keep up with master and hence may lag behind.
+ A usual solution to create a new replica and switch to it requires a lot of extra space and data transfer. The catchup
+ command allows you to update an existing replica much faster by bringing differences from master.
+
+
- Cloning a PostgreSQL instance is different from other pg_probackup
+ catchup is different from other pg_probackup
operations:
@@ -3439,12 +3452,12 @@ pg_probackup delete -B backup_dir --instance
- No SQL commands involving tablespaces, such as
+ DDL commands
CREATE TABLESPACE/DROP TABLESPACE,
- can be run simultaneously with catchup.
+ >DROP TABLESPACE
+ cannot be run simultaneously with catchup.
@@ -3452,14 +3465,16 @@ pg_probackup delete -B backup_dir --instance catchup takes configuration files, such as
postgresql.conf, postgresql.auto.conf,
or pg_hba.conf, from the source server and overwrites them
- on the target server.
+ on the target server. The option allows you to keep
+ the configuration files intact.
- Before cloning a PostgreSQL instance, set up the source database server as follows:
+ To prepare for cloning/synchronizing a PostgreSQL instance,
+ set up the source instance server as follows:
@@ -3481,9 +3496,10 @@ pg_probackup delete -B backup_dir --instance
- To clone a PostgreSQL instance, ensure that the source
- database server is running and accepting connections and
- on the server with the destination database, run the following command:
+ Before cloning/synchronizing a PostgreSQL instance, ensure that the source
+ instance server is running and accepting connections. To clone/sync a PostgreSQL instance,
+ on the server with the destination instance, you can run
+ the command as follows:
pg_probackup catchup -b catchup-mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream [connection_options] [remote_options]
@@ -3496,33 +3512,43 @@ pg_probackup catchup -b catchup-mode --source-pgdata=
FULL — creates a full copy of the PostgreSQL instance.
- The destination directory must be empty for this mode.
+ The data directory of the destination instance must be empty for this mode.
DELTA — reads all data files in the data directory and
creates an incremental copy for pages that have changed
- since the destination database was shut down cleanly.
- For this mode, the destination directory must contain a previous
- copy of the database that was shut down cleanly.
+ since the destination instance was shut down.
PTRACK — tracking page changes on the fly,
- only copies pages that have changed since the point of divergence
- of the source and destination databases.
- For this mode, the destination directory must contain a previous
- copy of the database that was shut down cleanly.
+ only reads and copies pages that have changed since the point of divergence
+ of the source and destination instances.
+
+
+ PTRACK catchup mode requires PTRACK
+ not earlier than 2.0 and hence, PostgreSQL not earlier than 11.
+
+
+
+ By specifying the option, you can set
+ STREAM WAL delivery mode
+ of copying, which will include all the necessary WAL files by streaming them from
+ the instance server via replication protocol.
+
You can use connection_options to specify
the connection to the source database cluster. If it is located on a different server,
also specify remote_options.
- If the source database contains tablespaces that must be located in
+
+
+ If the source database cluster contains tablespaces that must be located in
a different directory, additionally specify the
option:
@@ -3538,8 +3564,9 @@ pg_probackup catchup -b catchup-mode --source-pgdata=
For example, assume that a remote standby server with the PostgreSQL instance having /replica-pgdata data directory has fallen behind. To sync this instance with the one in /master-pgdata data directory, you can run
the catchup command in the PTRACK mode on four parallel threads as follows:
-pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=PTRACK --remote-host=remote-hostname --remote-user=remote-unix-username -j 4
+pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=PTRACK --remote-host=remote-hostname --remote-user=remote-unix-username -j 4 --exclude-path=postgresql.conf --exclude-path=postgresql.auto.conf --exclude-path=pg_hba.conf --exclude-path=pg_ident.conf
+ Note that in this example, the configuration files will not be overwritten during synchronization.
Another example shows how you can add a new remote standby server with the PostgreSQL data directory /replica-pgdata by running the catchup command in the FULL mode
@@ -4428,7 +4455,9 @@ pg_probackup archive-get -B backup_dir --instance catchup_mode
--source-pgdata=path_to_pgdata_on_remote_server
--destination-pgdata=path_to_local_dir
-[--help] [--stream] [-j num_threads]
+[--help] [-j | --threads=num_threads] [--stream]
+[--temp-slot] [-P | --perm-slot] [-S | --slot=slot_name]
+[--exclude-path=PATHNAME]
[-T OLDDIR=NEWDIR]
[connection_options] [remote_options]
@@ -4454,14 +4483,20 @@ pg_probackup catchup -b catchup_mode
DELTA — reads all data files in the data directory and
creates an incremental copy for pages that have changed
- since the destination database was shut down cleanly.
+ since the destination instance was shut down.
PTRACK — tracking page changes on the fly,
- only copies pages that have changed since the point of divergence
- of the source and destination databases.
+ only reads and copies pages that have changed since the point of divergence
+ of the source and destination instances.
+
+
+ PTRACK catchup mode requires PTRACK
+ not earlier than 2.0 and hence, PostgreSQL not earlier than 11.
+
+
@@ -4487,24 +4522,98 @@ pg_probackup catchup -b catchup_mode
+
+
+
+
+
+ Sets the number of parallel threads for
+ catchup process.
+
+
+
+
- Makes a STREAM backup, which
- includes all the necessary WAL files by streaming them from
- the database server via replication protocol.
+ Copies the instance in STREAM WAL delivery mode,
+ including all the necessary WAL files by streaming them from
+ the instance server via replication protocol.
-
-
+=path_prefix
+=path_prefix
- Sets the number of parallel threads for
- catchup process.
+ Specifies a prefix for files to exclude from the synchronization of PostgreSQL
+ instances during copying. The prefix must contain a path relative to the data directory of an instance.
+ If the prefix specifies a directory,
+ all files in this directory will not be synchronized.
+
+
+ This option is dangerous since excluding files from synchronization can result in
+ incomplete synchronization; use with care.
+
+
+
+
+
+
+
+
+
+
+ Copies the instance in STREAM WAL delivery mode,
+ including all the necessary WAL files by streaming them from
+ the instance server via replication protocol.
+
+
+
+
+
+
+
+
+ Creates a temporary physical replication slot for streaming
+ WAL from the PostgreSQL instance being copied. It ensures that
+ all the required WAL segments remain available if WAL is
+ rotated while the backup is in progress. This flag can only be
+ used together with the flag and
+ cannot be used together with the flag.
+ The default slot name is pg_probackup_slot,
+ which can be changed using the / option.
+
+
+
+
+
+
+
+
+
+ Creates a permanent physical replication slot for streaming
+ WAL from the PostgreSQL instance being copied. This flag can only be
+ used together with the flag and
+ cannot be used together with the flag.
+ The default slot name is pg_probackup_perm_slot,
+ which can be changed using the / option.
+
+
+
+
+
+
+
+
+
+ Specifies the replication slot for WAL streaming. This option
+ can only be used together with the
+ flag.
@@ -4533,7 +4642,7 @@ pg_probackup catchup -b catchup_mode
For details on usage, see the section
- Cloning PostgreSQL Instance.
+ Cloning and Synchronizing PostgreSQL Instance.
diff --git a/src/backup.c b/src/backup.c
index e9c8a22d1..1d08c3828 100644
--- a/src/backup.c
+++ b/src/backup.c
@@ -263,7 +263,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
fio_mkdir(stream_xlog_path, DIR_PERMISSION, FIO_BACKUP_HOST);
start_WAL_streaming(backup_conn, stream_xlog_path, &instance_config.conn_opt,
- current.start_lsn, current.tli);
+ current.start_lsn, current.tli, true);
/* Make sure that WAL streaming is working
* PAGE backup in stream mode is waited twice, first for
@@ -2051,8 +2051,6 @@ backup_files(void *arg)
instance_config.compress_alg,
instance_config.compress_level,
arguments->nodeInfo->checksum_version,
- arguments->nodeInfo->ptrack_version_num,
- arguments->nodeInfo->ptrack_schema,
arguments->hdr_map, false);
}
else
@@ -2350,7 +2348,7 @@ calculate_datasize_of_filelist(parray *filelist)
{
pgFile *file = (pgFile *) parray_get(filelist, i);
- if (file->external_dir_num != 0)
+ if (file->external_dir_num != 0 || file->excluded)
continue;
if (S_ISDIR(file->mode))
diff --git a/src/catchup.c b/src/catchup.c
index 58ce13c10..5a0c8e45a 100644
--- a/src/catchup.c
+++ b/src/catchup.c
@@ -27,20 +27,19 @@
/*
* Catchup routines
*/
-static PGconn *catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata);
+static PGconn *catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata);
static void catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, const char *source_pgdata,
const char *dest_pgdata);
static void catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn);
static parray* catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli);
-//REVIEW The name of this function looks strange to me.
-//Maybe catchup_init_state() or catchup_setup() will do better?
-//I'd also suggest to wrap all these fields into some CatchupState, but it isn't urgent.
+//REVIEW I'd also suggest to wrap all these fields into some CatchupState, but it isn't urgent.
+//REVIEW_ANSWER what for?
/*
* Prepare for work: fill some globals, open connection to source database
*/
static PGconn *
-catchup_collect_info(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata)
+catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata)
{
PGconn *source_conn;
@@ -159,17 +158,6 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn,
elog(ERROR, "Destination directory contains \"" PG_BACKUP_LABEL_FILE "\" file");
}
- /* check that destination database is shutdowned cleanly */
- if (current.backup_mode != BACKUP_MODE_FULL)
- {
- DBState state;
- state = get_system_dbstate(dest_pgdata, FIO_LOCAL_HOST);
- /* see states in postgres sources (src/include/catalog/pg_control.h) */
- if (state != DB_SHUTDOWNED && state != DB_SHUTDOWNED_IN_RECOVERY)
- elog(ERROR, "Postmaster in destination directory \"%s\" must be stopped cleanly",
- dest_pgdata);
- }
-
/* Check that connected PG instance, source and destination PGDATA are the same */
{
uint64 source_conn_id, source_id, dest_id;
@@ -366,6 +354,7 @@ typedef struct
XLogRecPtr sync_lsn;
BackupMode backup_mode;
int thread_num;
+ size_t transfered_bytes;
bool completed;
} catchup_thread_runner_arg;
@@ -390,6 +379,9 @@ catchup_thread_runner(void *arg)
if (S_ISDIR(file->mode))
continue;
+ if (file->excluded)
+ continue;
+
if (!pg_atomic_test_set_flag(&file->lock))
continue;
@@ -431,12 +423,7 @@ catchup_thread_runner(void *arg)
catchup_data_file(file, from_fullpath, to_fullpath,
arguments->sync_lsn,
arguments->backup_mode,
- NONE_COMPRESS,
- 0,
arguments->nodeInfo->checksum_version,
- arguments->nodeInfo->ptrack_version_num,
- arguments->nodeInfo->ptrack_schema,
- false,
dest_file != NULL ? dest_file->size : 0);
}
else
@@ -445,6 +432,7 @@ catchup_thread_runner(void *arg)
arguments->backup_mode, current.parent_backup, true);
}
+ /* file went missing during catchup */
if (file->write_size == FILE_NOT_FOUND)
continue;
@@ -454,6 +442,7 @@ catchup_thread_runner(void *arg)
continue;
}
+ arguments->transfered_bytes += file->write_size;
elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes",
from_fullpath, file->write_size);
}
@@ -469,8 +458,10 @@ catchup_thread_runner(void *arg)
/*
* main multithreaded copier
+ * returns size of transfered data file
+ * or -1 in case of error
*/
-static bool
+static ssize_t
catchup_multithreaded_copy(int num_threads,
PGNodeInfo *source_node_info,
const char *source_pgdata_path,
@@ -485,6 +476,7 @@ catchup_multithreaded_copy(int num_threads,
pthread_t *threads;
bool all_threads_successful = true;
+ ssize_t transfered_bytes_result = 0;
int i;
/* init thread args */
@@ -499,6 +491,7 @@ catchup_multithreaded_copy(int num_threads,
.sync_lsn = sync_lsn,
.backup_mode = backup_mode,
.thread_num = i + 1,
+ .transfered_bytes = 0,
.completed = false,
};
@@ -516,15 +509,16 @@ catchup_multithreaded_copy(int num_threads,
{
pthread_join(threads[i], NULL);
all_threads_successful &= threads_args[i].completed;
+ transfered_bytes_result += threads_args[i].transfered_bytes;
}
free(threads);
free(threads_args);
- return all_threads_successful;
+ return all_threads_successful ? transfered_bytes_result : -1;
}
/*
- *
+ * Sync every file in destination directory to disk
*/
static void
catchup_sync_destination_files(const char* pgdata_path, fio_location location, parray *filelist, pgFile *pg_control_file)
@@ -541,8 +535,13 @@ catchup_sync_destination_files(const char* pgdata_path, fio_location location, p
{
pgFile *file = (pgFile *) parray_get(filelist, i);
- /* TODO: sync directory ? */
- if (S_ISDIR(file->mode))
+ /* TODO: sync directory ?
+ * - at first glance we can rely on fs journaling,
+ * which is enabled by default on most platforms
+ * - but PG itself is not relying on fs, its durable_sync
+ * includes directory sync
+ */
+ if (S_ISDIR(file->mode) || file->excluded)
continue;
Assert(file->external_dir_num == 0);
@@ -564,11 +563,50 @@ catchup_sync_destination_files(const char* pgdata_path, fio_location location, p
elog(INFO, "Files are synced, time elapsed: %s", pretty_time);
}
+/*
+ * Filter filelist helper function (used to process --exclude-path's)
+ * filelist -- parray of pgFile *, can't be NULL
+ * exclude_absolute_paths_list -- sorted parray of char * (absolute paths, starting with '/'), can be NULL
+ * exclude_relative_paths_list -- sorted parray of char * (relative paths), can be NULL
+ * logging_string -- helper parameter, used for generating verbose log messages ("Source" or "Destination")
+ */
+static void
+filter_filelist(parray *filelist, const char *pgdata,
+ parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list,
+ const char *logging_string)
+{
+ int i;
+
+ if (exclude_absolute_paths_list == NULL && exclude_relative_paths_list == NULL)
+ return;
+
+ for (i = 0; i < parray_num(filelist); ++i)
+ {
+ char full_path[MAXPGPATH];
+ pgFile *file = (pgFile *) parray_get(filelist, i);
+ join_path_components(full_path, pgdata, file->rel_path);
+
+ if (
+ (exclude_absolute_paths_list != NULL
+ && parray_bsearch(exclude_absolute_paths_list, full_path, pgPrefixCompareString)!= NULL
+ ) || (
+ exclude_relative_paths_list != NULL
+ && parray_bsearch(exclude_relative_paths_list, file->rel_path, pgPrefixCompareString)!= NULL)
+ )
+ {
+ elog(LOG, "%s file \"%s\" excluded with --exclude-path option", logging_string, full_path);
+ file->excluded = true;
+ }
+ }
+}
+
/*
* Entry point of pg_probackup CATCHUP subcommand.
+ * exclude_*_paths_list are parray's of char *
*/
int
-do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files)
+do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files,
+ parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list)
{
PGconn *source_conn = NULL;
PGNodeInfo source_node_info;
@@ -586,33 +624,27 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
/* for fancy reporting */
time_t start_time, end_time;
- char pretty_time[20];
- char pretty_bytes[20];
+ ssize_t transfered_datafiles_bytes = 0;
+ ssize_t transfered_walfiles_bytes = 0;
+ char pretty_source_bytes[20];
- source_conn = catchup_collect_info(&source_node_info, source_pgdata, dest_pgdata);
+ source_conn = catchup_init_state(&source_node_info, source_pgdata, dest_pgdata);
catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata);
- elog(LOG, "Database catchup start");
+ /* we need to sort --exclude_path's for future searching */
+ if (exclude_absolute_paths_list != NULL)
+ parray_qsort(exclude_absolute_paths_list, pgCompareString);
+ if (exclude_relative_paths_list != NULL)
+ parray_qsort(exclude_relative_paths_list, pgCompareString);
- {
- char label[1024];
- /* notify start of backup to PostgreSQL server */
- time2iso(label, lengthof(label), current.start_time, false);
- strncat(label, " with pg_probackup", lengthof(label) -
- strlen(" with pg_probackup"));
-
- /* Call pg_start_backup function in PostgreSQL connect */
- pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn);
- elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn));
- }
+ elog(LOG, "Database catchup start");
- //REVIEW I wonder, if we can move this piece above and call before pg_start backup()?
- //It seems to be a part of setup phase.
if (current.backup_mode != BACKUP_MODE_FULL)
{
dest_filelist = parray_new();
dir_list_file(dest_filelist, dest_pgdata,
true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST);
+ filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Destination");
// fill dest_redo.lsn and dest_redo.tli
get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo);
@@ -627,16 +659,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
*/
}
- //REVIEW I wonder, if we can move this piece above and call before pg_start backup()?
- //It seems to be a part of setup phase.
/*
+ * Make sure that sync point is withing ptrack tracking range
* TODO: move to separate function to use in both backup.c and catchup.c
*/
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
{
XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(source_conn, &source_node_info);
- // new ptrack is more robust and checks Start LSN
if (ptrack_lsn > dest_redo.lsn || ptrack_lsn == InvalidXLogRecPtr)
elog(ERROR, "LSN from ptrack_control in source %X/%X is greater than checkpoint LSN in destination %X/%X.\n"
"You can perform only FULL catchup.",
@@ -645,7 +675,19 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
(uint32) (dest_redo.lsn));
}
- /* Check that dest_redo.lsn is less than current.start_lsn */
+ {
+ char label[1024];
+ /* notify start of backup to PostgreSQL server */
+ time2iso(label, lengthof(label), current.start_time, false);
+ strncat(label, " with pg_probackup", lengthof(label) -
+ strlen(" with pg_probackup"));
+
+ /* Call pg_start_backup function in PostgreSQL connect */
+ pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn);
+ elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn));
+ }
+
+ /* Sanity: source cluster must be "in future" relatively to dest cluster */
if (current.backup_mode != BACKUP_MODE_FULL &&
dest_redo.lsn > current.start_lsn)
elog(ERROR, "Current START LSN %X/%X is lower than SYNC LSN %X/%X, "
@@ -657,7 +699,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR);
fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST);
start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt,
- current.start_lsn, current.tli);
+ current.start_lsn, current.tli, false);
source_filelist = parray_new();
@@ -670,17 +712,16 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST);
//REVIEW FIXME. Let's fix that before release.
- // TODO filter pg_xlog/wal?
// TODO what if wal is not a dir (symlink to a dir)?
+ // - Currently backup/restore transform pg_wal symlink to directory
+ // so the problem is not only with catchup.
+ // if we want to make it right - we must provide the way
+ // for symlink remapping during restore and catchup.
+ // By default everything must be left as it is.
/* close ssh session in main thread */
fio_disconnect();
- //REVIEW Do we want to do similar calculation for dest?
- current.pgdata_bytes += calculate_datasize_of_filelist(source_filelist);
- pretty_size(current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes));
- elog(INFO, "Source PGDATA size: %s", pretty_bytes);
-
/*
* Sort pathname ascending. It is necessary to create intermediate
* directories sequentially.
@@ -694,8 +735,24 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
*/
parray_qsort(source_filelist, pgFileCompareRelPathWithExternal);
- /* Extract information about files in source_filelist parsing their names:*/
- parse_filelist_filenames(source_filelist, source_pgdata);
+ //REVIEW Do we want to do similar calculation for dest?
+ //REVIEW_ANSWER what for?
+ {
+ ssize_t source_bytes = 0;
+ char pretty_bytes[20];
+
+ source_bytes += calculate_datasize_of_filelist(source_filelist);
+
+ /* Extract information about files in source_filelist parsing their names:*/
+ parse_filelist_filenames(source_filelist, source_pgdata);
+ filter_filelist(source_filelist, source_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Source");
+
+ current.pgdata_bytes += calculate_datasize_of_filelist(source_filelist);
+
+ pretty_size(current.pgdata_bytes, pretty_source_bytes, lengthof(pretty_source_bytes));
+ pretty_size(source_bytes - current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes));
+ elog(INFO, "Source PGDATA size: %s (excluded %s)", pretty_source_bytes, pretty_bytes);
+ }
elog(LOG, "Start LSN (source): %X/%X, TLI: %X",
(uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn),
@@ -728,7 +785,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
* We iterate over source_filelist and for every directory with parent 'pg_tblspc'
* we must lookup this directory name in tablespace map.
* If we got a match, we treat this directory as tablespace.
- * It means that we create directory specified in tablespace_map and
+ * It means that we create directory specified in tablespace map and
* original directory created as symlink to it.
*/
for (i = 0; i < parray_num(source_filelist); i++)
@@ -736,7 +793,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
pgFile *file = (pgFile *) parray_get(source_filelist, i);
char parent_dir[MAXPGPATH];
- if (!S_ISDIR(file->mode))
+ if (!S_ISDIR(file->mode) || file->excluded)
continue;
/*
@@ -816,9 +873,22 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
source_pg_control_file = parray_remove(source_filelist, control_file_elem_index);
}
+ /* TODO before public release: must be more careful with pg_control.
+ * when running catchup or incremental restore
+ * cluster is actually in two states
+ * simultaneously - old and new, so
+ * it must contain both pg_control files
+ * describing those states: global/pg_control_old, global/pg_control_new
+ * 1. This approach will provide us with means of
+ * robust detection of previos failures and thus correct operation retrying (or forbidding).
+ * 2. We will have the ability of preventing instance from starting
+ * in the middle of our operations.
+ */
+
/*
* remove absent source files in dest (dropped tables, etc...)
* note: global/pg_control will also be deleted here
+ * mark dest files (that excluded with source --exclude-path) also for exclusion
*/
if (current.backup_mode != BACKUP_MODE_FULL)
{
@@ -828,33 +898,33 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
{
bool redundant = true;
pgFile *file = (pgFile *) parray_get(dest_filelist, i);
+ pgFile **src_file = NULL;
//TODO optimize it and use some merge-like algorithm
//instead of bsearch for each file.
- if (parray_bsearch(source_filelist, file, pgFileCompareRelPathWithExternal))
+ src_file = (pgFile **) parray_bsearch(source_filelist, file, pgFileCompareRelPathWithExternal);
+
+ if (src_file!= NULL && !(*src_file)->excluded && file->excluded)
+ (*src_file)->excluded = true;
+
+ if (src_file!= NULL || file->excluded)
redundant = false;
- /* pg_filenode.map are always restored, because it's crc cannot be trusted */
+ /* pg_filenode.map are always copied, because it's crc cannot be trusted */
Assert(file->external_dir_num == 0);
if (pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0)
redundant = true;
- //REVIEW This check seems unneded. Anyway we delete only redundant stuff below.
- /* do not delete the useful internal directories */
- if (S_ISDIR(file->mode) && !redundant)
- continue;
-
/* if file does not exists in destination list, then we can safely unlink it */
if (redundant)
{
char fullpath[MAXPGPATH];
join_path_components(fullpath, dest_pgdata, file->rel_path);
-
fio_delete(file->mode, fullpath, FIO_DB_HOST);
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
- /* shrink pgdata list */
+ /* shrink dest pgdata list */
pgFileFree(file);
parray_remove(dest_filelist, i);
i--;
@@ -875,10 +945,11 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
/* run copy threads */
elog(INFO, "Start transferring data files");
time(&start_time);
- catchup_isok = catchup_multithreaded_copy(num_threads, &source_node_info,
+ transfered_datafiles_bytes = catchup_multithreaded_copy(num_threads, &source_node_info,
source_pgdata, dest_pgdata,
source_filelist, dest_filelist,
dest_redo.lsn, current.backup_mode);
+ catchup_isok = transfered_datafiles_bytes != -1;
/* at last copy control file */
if (catchup_isok)
@@ -889,17 +960,22 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
join_path_components(to_fullpath, dest_pgdata, source_pg_control_file->rel_path);
copy_pgcontrol_file(from_fullpath, FIO_DB_HOST,
to_fullpath, FIO_LOCAL_HOST, source_pg_control_file);
+ transfered_datafiles_bytes += source_pg_control_file->size;
}
- time(&end_time);
- pretty_time_interval(difftime(end_time, start_time),
+ if (!catchup_isok)
+ {
+ char pretty_time[20];
+ char pretty_transfered_data_bytes[20];
+
+ time(&end_time);
+ pretty_time_interval(difftime(end_time, start_time),
pretty_time, lengthof(pretty_time));
- if (catchup_isok)
- elog(INFO, "Data files are transferred, time elapsed: %s",
- pretty_time);
- else
- elog(ERROR, "Data files transferring failed, time elapsed: %s",
- pretty_time);
+ pretty_size(transfered_datafiles_bytes, pretty_transfered_data_bytes, lengthof(pretty_transfered_data_bytes));
+
+ elog(ERROR, "Catchup failed. Transfered: %s, time elapsed: %s",
+ pretty_transfered_data_bytes, pretty_time);
+ }
/* Notify end of backup */
{
@@ -912,17 +988,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
pg_silent_client_messages(source_conn);
- //REVIEW. Do we want to support pg 9.5? I suppose we never test it...
- //Maybe check it and error out early?
- /* Create restore point
- * Only if backup is from master.
- * For PG 9.5 create restore point only if pguser is superuser.
- */
- if (!current.from_replica &&
- !(source_node_info.server_version < 90600 &&
- !source_node_info.is_superuser)) //TODO: check correctness
- pg_create_restore_point(source_conn, current.start_time);
-
/* Execute pg_stop_backup using PostgreSQL connection */
pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, exclusive_backup, &stop_backup_query_text);
@@ -965,22 +1030,23 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
}
#endif
- if(wait_WAL_streaming_end(NULL))
- elog(ERROR, "WAL streaming failed");
+ /* wait for end of wal streaming and calculate wal size transfered */
+ {
+ parray *wal_files_list = NULL;
+ wal_files_list = parray_new();
- //REVIEW Please add a comment about these lsns. It is a crutial part of the algorithm.
- current.recovery_xid = stop_backup_result.snapshot_xid;
+ if (wait_WAL_streaming_end(wal_files_list))
+ elog(ERROR, "WAL streaming failed");
- elog(LOG, "Getting the Recovery Time from WAL");
+ for (i = 0; i < parray_num(wal_files_list); i++)
+ {
+ pgFile *file = (pgFile *) parray_get(wal_files_list, i);
+ transfered_walfiles_bytes += file->size;
+ }
- /* iterate over WAL from stop_backup lsn to start_backup lsn */
- if (!read_recovery_info(dest_xlog_path, current.tli,
- instance_config.xlog_seg_size,
- current.start_lsn, current.stop_lsn,
- ¤t.recovery_time))
- {
- elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp");
- current.recovery_time = stop_backup_result.invocation_time;
+ parray_walk(wal_files_list, pgFileFree);
+ parray_free(wal_files_list);
+ wal_files_list = NULL;
}
/*
@@ -994,15 +1060,33 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
/* close ssh session in main thread */
fio_disconnect();
- /* Sync all copied files unless '--no-sync' flag is used */
- if (catchup_isok)
+ /* fancy reporting */
{
- if (sync_dest_files)
- catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file);
- else
- elog(WARNING, "Files are not synced to disk");
+ char pretty_transfered_data_bytes[20];
+ char pretty_transfered_wal_bytes[20];
+ char pretty_time[20];
+
+ time(&end_time);
+ pretty_time_interval(difftime(end_time, start_time),
+ pretty_time, lengthof(pretty_time));
+ pretty_size(transfered_datafiles_bytes, pretty_transfered_data_bytes, lengthof(pretty_transfered_data_bytes));
+ pretty_size(transfered_walfiles_bytes, pretty_transfered_wal_bytes, lengthof(pretty_transfered_wal_bytes));
+
+ elog(INFO, "Databases synchronized. Transfered datafiles size: %s, transfered wal size: %s, time elapsed: %s",
+ pretty_transfered_data_bytes, pretty_transfered_wal_bytes, pretty_time);
+
+ if (current.backup_mode != BACKUP_MODE_FULL)
+ elog(INFO, "Catchup incremental ratio (less is better): %.f%% (%s/%s)",
+ ((float) transfered_datafiles_bytes / current.pgdata_bytes) * 100,
+ pretty_transfered_data_bytes, pretty_source_bytes);
}
+ /* Sync all copied files unless '--no-sync' flag is used */
+ if (sync_dest_files)
+ catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file);
+ else
+ elog(WARNING, "Files are not synced to disk");
+
/* Cleanup */
if (dest_filelist)
{
@@ -1013,8 +1097,5 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
parray_free(source_filelist);
pgFileFree(source_pg_control_file);
- //REVIEW: Are we going to do that before release?
- /* TODO: show the amount of transfered data in bytes and calculate incremental ratio */
-
return 0;
}
diff --git a/src/data.c b/src/data.c
index 49b696059..f02e3fd14 100644
--- a/src/data.c
+++ b/src/data.c
@@ -28,10 +28,10 @@
typedef struct DataPage
{
BackupPageHeader bph;
- char data[BLCKSZ];
+ char data[BLCKSZ];
} DataPage;
-static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph,
+static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader *bph,
pg_crc32 *crc, bool use_crc32c);
#ifdef HAVE_LIBZ
@@ -40,9 +40,9 @@ static int32
zlib_compress(void *dst, size_t dst_size, void const *src, size_t src_size,
int level)
{
- uLongf compressed_size = dst_size;
- int rc = compress2(dst, &compressed_size, src, src_size,
- level);
+ uLongf compressed_size = dst_size;
+ int rc = compress2(dst, &compressed_size, src, src_size,
+ level);
return rc == Z_OK ? compressed_size : rc;
}
@@ -51,8 +51,8 @@ zlib_compress(void *dst, size_t dst_size, void const *src, size_t src_size,
static int32
zlib_decompress(void *dst, size_t dst_size, void const *src, size_t src_size)
{
- uLongf dest_len = dst_size;
- int rc = uncompress(dst, &dest_len, src, src_size);
+ uLongf dest_len = dst_size;
+ int rc = uncompress(dst, &dest_len, src, src_size);
return rc == Z_OK ? dest_len : rc;
}
@@ -63,7 +63,7 @@ zlib_decompress(void *dst, size_t dst_size, void const *src, size_t src_size)
* written in the destination buffer, or -1 if compression fails.
*/
int32
-do_compress(void* dst, size_t dst_size, void const* src, size_t src_size,
+do_compress(void *dst, size_t dst_size, void const *src, size_t src_size,
CompressAlg alg, int level, const char **errormsg)
{
switch (alg)
@@ -73,13 +73,13 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size,
return -1;
#ifdef HAVE_LIBZ
case ZLIB_COMPRESS:
- {
- int32 ret;
- ret = zlib_compress(dst, dst_size, src, src_size, level);
- if (ret < Z_OK && errormsg)
- *errormsg = zError(ret);
- return ret;
- }
+ {
+ int32 ret;
+ ret = zlib_compress(dst, dst_size, src, src_size, level);
+ if (ret < Z_OK && errormsg)
+ *errormsg = zError(ret);
+ return ret;
+ }
#endif
case PGLZ_COMPRESS:
return pglz_compress(src, src_size, dst, PGLZ_strategy_always);
@@ -93,25 +93,25 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size,
* decompressed in the destination buffer, or -1 if decompression fails.
*/
int32
-do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size,
+do_decompress(void *dst, size_t dst_size, void const *src, size_t src_size,
CompressAlg alg, const char **errormsg)
{
switch (alg)
{
case NONE_COMPRESS:
case NOT_DEFINED_COMPRESS:
- if (errormsg)
+ if (errormsg)
*errormsg = "Invalid compression algorithm";
return -1;
#ifdef HAVE_LIBZ
case ZLIB_COMPRESS:
- {
- int32 ret;
- ret = zlib_decompress(dst, dst_size, src, src_size);
- if (ret < Z_OK && errormsg)
- *errormsg = zError(ret);
- return ret;
- }
+ {
+ int32 ret;
+ ret = zlib_decompress(dst, dst_size, src, src_size);
+ if (ret < Z_OK && errormsg)
+ *errormsg = zError(ret);
+ return ret;
+ }
#endif
case PGLZ_COMPRESS:
@@ -125,7 +125,6 @@ do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size,
return -1;
}
-
#define ZLIB_MAGIC 0x78
/*
@@ -162,7 +161,7 @@ page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version)
/* For zlib we can check page magic:
* https://stackoverflow.com/questions/9050260/what-does-a-zlib-header-look-like
*/
- if (alg == ZLIB_COMPRESS && *(char*)page != ZLIB_MAGIC)
+ if (alg == ZLIB_COMPRESS && *(char *)page != ZLIB_MAGIC)
{
return false;
}
@@ -281,8 +280,6 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
BackupMode backup_mode,
Page page, bool strict,
uint32 checksum_version,
- int ptrack_version_num,
- const char *ptrack_schema,
const char *from_fullpath,
PageState *page_st)
{
@@ -404,8 +401,7 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
blknum, from_fullpath,
file->exists_in_prev ? "true" : "false",
(uint32) (page_st->lsn >> 32), (uint32) page_st->lsn,
- (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn
- );
+ (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn);
return SkipCurrentPage;
}
@@ -422,7 +418,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
{
int compressed_size = 0;
size_t write_buffer_size = 0;
- char write_buffer[BLCKSZ*2]; /* compressed page may require more space than uncompressed */
+ char write_buffer[BLCKSZ*2]; /* compressed page may require more space than uncompressed */
BackupPageHeader* bph = (BackupPageHeader*)write_buffer;
const char *errormsg = NULL;
@@ -463,16 +459,13 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
return compressed_size;
}
-/* взята из compress_and_backup_page, но выпилена вся магия заголовков и компрессии, просто копирование 1-в-1 */
+/* Write page as-is. TODO: make it fastpath option in compress_and_backup_page() */
static int
-copy_page(pgFile *file, BlockNumber blknum,
- FILE *in, FILE *out, Page page,
- const char *to_fullpath)
+write_page(pgFile *file, FILE *out, Page page)
{
/* write data page */
if (fio_fwrite(out, page, BLCKSZ) != BLCKSZ)
- elog(ERROR, "File: \"%s\", cannot write at block %u: %s",
- to_fullpath, blknum, strerror(errno));
+ return -1;
file->write_size += BLCKSZ;
file->uncompressed_size += BLCKSZ;
@@ -492,13 +485,12 @@ void
backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
CompressAlg calg, int clevel, uint32 checksum_version,
- int ptrack_version_num, const char *ptrack_schema,
HeaderMap *hdr_map, bool is_merge)
{
int rc;
bool use_pagemap;
- char *errmsg = NULL;
- BlockNumber err_blknum = 0;
+ char *errmsg = NULL;
+ BlockNumber err_blknum = 0;
/* page headers */
BackupPageHeader2 *headers = NULL;
@@ -547,7 +539,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat
* Such files should be fully copied.
*/
- if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
+ if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
file->pagemap_isabsent || !file->exists_in_prev ||
!file->pagemap.bitmap)
use_pagemap = false;
@@ -557,7 +549,6 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat
/* Remote mode */
if (fio_is_remote(FIO_DB_HOST))
{
-
rc = fio_send_pages(to_fullpath, from_fullpath, file,
/* send prev backup START_LSN */
(backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
@@ -576,7 +567,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat
(backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
calg, clevel, checksum_version, use_pagemap,
- &headers, backup_mode, ptrack_version_num, ptrack_schema);
+ &headers, backup_mode);
}
/* check for errors */
@@ -646,30 +637,21 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat
}
/*
- * Backup data file in the from_root directory to the to_root directory with
- * same relative path. If prev_backup_start_lsn is not NULL, only pages with
+ * Catchup data file in the from_root directory to the to_root directory with
+ * same relative path. If sync_lsn is not NULL, only pages with equal or
* higher lsn will be copied.
* Not just copy file, but read it block by block (use bitmap in case of
- * incremental backup), validate checksum, optionally compress and write to
- * backup with special header.
+ * incremental catchup), validate page checksum.
*/
void
catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
- XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
- CompressAlg calg, int clevel, uint32 checksum_version,
- int ptrack_version_num, const char *ptrack_schema,
- bool is_merge, size_t prev_size)
+ XLogRecPtr sync_lsn, BackupMode backup_mode,
+ uint32 checksum_version, size_t prev_size)
{
int rc;
bool use_pagemap;
char *errmsg = NULL;
BlockNumber err_blknum = 0;
- /* page headers */
- BackupPageHeader2 *headers = NULL;
-
- /* sanity */
- if (file->size % BLCKSZ != 0)
- elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size);
/*
* Compute expected number of blocks in the file.
@@ -679,7 +661,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa
file->n_blocks = file->size/BLCKSZ;
/*
- * Skip unchanged file only if it exists in previous backup.
+ * Skip unchanged file only if it exists in destination directory.
* This way we can correctly handle null-sized files which are
* not tracked by pagemap and thus always marked as unchanged.
*/
@@ -688,8 +670,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa
file->exists_in_prev && file->size == prev_size && !file->pagemap_isabsent)
{
/*
- * There are no changed blocks since last backup. We want to make
- * incremental backup, so we should exit.
+ * There are none changed pages.
*/
file->write_size = BYTES_INVALID;
return;
@@ -699,16 +680,10 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa
file->read_size = 0;
file->write_size = 0;
file->uncompressed_size = 0;
- INIT_FILE_CRC32(true, file->crc);
/*
- * Read each page, verify checksum and write it to backup.
- * If page map is empty or file is not present in previous backup
- * backup all pages of the relation.
- *
- * In PTRACK 1.x there was a problem
- * of data files with missing _ptrack map.
- * Such files should be fully copied.
+ * If page map is empty or file is not present in destination directory,
+ * then copy backup all pages of the relation.
*/
if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
@@ -726,29 +701,28 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa
{
rc = fio_copy_pages(to_fullpath, from_fullpath, file,
/* send prev backup START_LSN */
- (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
- file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
- calg, clevel, checksum_version,
+ ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
+ file->exists_in_prev) ? sync_lsn : InvalidXLogRecPtr,
+ NONE_COMPRESS, 1, checksum_version,
/* send pagemap if any */
use_pagemap,
/* variables for error reporting */
- &err_blknum, &errmsg, &headers);
+ &err_blknum, &errmsg);
}
else
{
/* TODO: stop handling errors internally */
rc = copy_pages(to_fullpath, from_fullpath, file,
/* send prev backup START_LSN */
- (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
- file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
- checksum_version, use_pagemap,
- backup_mode, ptrack_version_num, ptrack_schema);
+ ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
+ file->exists_in_prev) ? sync_lsn : InvalidXLogRecPtr,
+ checksum_version, use_pagemap, backup_mode);
}
/* check for errors */
if (rc == FILE_MISSING)
{
- elog(is_merge ? ERROR : LOG, "File not found: \"%s\"", from_fullpath);
+ elog(LOG, "File not found: \"%s\"", from_fullpath);
file->write_size = FILE_NOT_FOUND;
goto cleanup;
}
@@ -784,11 +758,6 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa
file->read_size = rc * BLCKSZ;
- /* refresh n_blocks for FULL and DELTA */
- if (backup_mode == BACKUP_MODE_FULL ||
- backup_mode == BACKUP_MODE_DIFF_DELTA)
- file->n_blocks = file->read_size / BLCKSZ;
-
/* Determine that file didn`t changed in case of incremental catchup */
if (backup_mode != BACKUP_MODE_FULL &&
file->exists_in_prev &&
@@ -799,13 +768,8 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa
}
cleanup:
-
- /* finish CRC calculation */
- FIN_FILE_CRC32(true, file->crc);
-
pg_free(errmsg);
pg_free(file->pagemap.bitmap);
- pg_free(headers);
}
/*
@@ -816,9 +780,9 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa
*/
void
backup_non_data_file(pgFile *file, pgFile *prev_file,
- const char *from_fullpath, const char *to_fullpath,
- BackupMode backup_mode, time_t parent_backup_time,
- bool missing_ok)
+ const char *from_fullpath, const char *to_fullpath,
+ BackupMode backup_mode, time_t parent_backup_time,
+ bool missing_ok)
{
/* special treatment for global/pg_control */
if (file->external_dir_num == 0 && strcmp(file->rel_path, XLOG_CONTROL_FILE) == 0)
@@ -891,7 +855,7 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out,
/* page headers */
BackupPageHeader2 *headers = NULL;
- pgBackup *backup = (pgBackup *) parray_get(parent_chain, backup_seq);
+ pgBackup *backup = (pgBackup *) parray_get(parent_chain, backup_seq);
if (use_bitmap)
backup_seq++;
@@ -899,7 +863,7 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out,
backup_seq--;
/* lookup file in intermediate backup */
- res_file = parray_bsearch(backup->files, dest_file, pgFileCompareRelPathWithExternal);
+ res_file = parray_bsearch(backup->files, dest_file, pgFileCompareRelPathWithExternal);
tmp_file = (res_file) ? *res_file : NULL;
/* Destination file is not exists yet at this moment */
@@ -951,13 +915,13 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out,
* copy the file from backup.
*/
total_write_len += restore_data_file_internal(in, out, tmp_file,
- parse_program_version(backup->program_version),
- from_fullpath, to_fullpath, dest_file->n_blocks,
- use_bitmap ? &(dest_file)->pagemap : NULL,
- checksum_map, backup->checksum_version,
- /* shiftmap can be used only if backup state precedes the shift */
- backup->stop_lsn <= shift_lsn ? lsn_map : NULL,
- headers);
+ parse_program_version(backup->program_version),
+ from_fullpath, to_fullpath, dest_file->n_blocks,
+ use_bitmap ? &(dest_file)->pagemap : NULL,
+ checksum_map, backup->checksum_version,
+ /* shiftmap can be used only if backup state precedes the shift */
+ backup->stop_lsn <= shift_lsn ? lsn_map : NULL,
+ headers);
if (fclose(in) != 0)
elog(ERROR, "Cannot close file \"%s\": %s", from_fullpath,
@@ -983,15 +947,15 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out,
*/
size_t
restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_version,
- const char *from_fullpath, const char *to_fullpath, int nblocks,
- datapagemap_t *map, PageState *checksum_map, int checksum_version,
- datapagemap_t *lsn_map, BackupPageHeader2 *headers)
+ const char *from_fullpath, const char *to_fullpath, int nblocks,
+ datapagemap_t *map, PageState *checksum_map, int checksum_version,
+ datapagemap_t *lsn_map, BackupPageHeader2 *headers)
{
BlockNumber blknum = 0;
- int n_hdr = -1;
- size_t write_len = 0;
- off_t cur_pos_out = 0;
- off_t cur_pos_in = 0;
+ int n_hdr = -1;
+ size_t write_len = 0;
+ off_t cur_pos_out = 0;
+ off_t cur_pos_in = 0;
/* should not be possible */
Assert(!(backup_version >= 20400 && file->n_headers <= 0));
@@ -1007,7 +971,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers
* but should never happen in case of blocks from FULL backup.
*/
if (fio_fseek(out, cur_pos_out) < 0)
- elog(ERROR, "Cannot seek block %u of \"%s\": %s",
+ elog(ERROR, "Cannot seek block %u of \"%s\": %s",
blknum, to_fullpath, strerror(errno));
for (;;)
@@ -1020,7 +984,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers
bool is_compressed = false;
/* incremental restore vars */
- uint16 page_crc = 0;
+ uint16 page_crc = 0;
XLogRecPtr page_lsn = InvalidXLogRecPtr;
/* check for interrupt */
@@ -1072,7 +1036,6 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers
* Now we have to deal with backward compatibility.
*/
read_len = MAXALIGN(compressed_size);
-
}
else
break;
@@ -1183,8 +1146,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers
* page_may_be_compressed() function.
*/
if (compressed_size != BLCKSZ
- || page_may_be_compressed(page.data, file->compress_alg,
- backup_version))
+ || page_may_be_compressed(page.data, file->compress_alg, backup_version))
{
is_compressed = true;
}
@@ -1244,10 +1206,10 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers
*/
void
restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file,
- const char *from_fullpath, const char *to_fullpath)
+ const char *from_fullpath, const char *to_fullpath)
{
- size_t read_len = 0;
- char *buf = pgut_malloc(STDIO_BUFSIZE); /* 64kB buffer */
+ size_t read_len = 0;
+ char *buf = pgut_malloc(STDIO_BUFSIZE); /* 64kB buffer */
/* copy content */
for (;;)
@@ -1310,7 +1272,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup,
tmp_backup = dest_backup->parent_backup_link;
while (tmp_backup)
{
- pgFile **res_file = NULL;
+ pgFile **res_file = NULL;
/* lookup file in intermediate backup */
res_file = parray_bsearch(tmp_backup->files, dest_file, pgFileCompareRelPathWithExternal);
@@ -1420,10 +1382,10 @@ backup_non_data_file_internal(const char *from_fullpath,
const char *to_fullpath, pgFile *file,
bool missing_ok)
{
- FILE *in = NULL;
- FILE *out = NULL;
- ssize_t read_len = 0;
- char *buf = NULL;
+ FILE *in = NULL;
+ FILE *out = NULL;
+ ssize_t read_len = 0;
+ char *buf = NULL;
INIT_FILE_CRC32(true, file->crc);
@@ -1553,7 +1515,7 @@ backup_non_data_file_internal(const char *from_fullpath,
*/
bool
create_empty_file(fio_location from_location, const char *to_root,
- fio_location to_location, pgFile *file)
+ fio_location to_location, pgFile *file)
{
char to_path[MAXPGPATH];
FILE *out;
@@ -1650,7 +1612,7 @@ check_data_file(ConnectionArgs *arguments, pgFile *file,
BlockNumber nblocks = 0;
int page_state;
char curr_page[BLCKSZ];
- bool is_valid = true;
+ bool is_valid = true;
in = fopen(from_fullpath, PG_BINARY_R);
if (in == NULL)
@@ -1686,7 +1648,7 @@ check_data_file(ConnectionArgs *arguments, pgFile *file,
page_state = prepare_page(file, InvalidXLogRecPtr,
blknum, in, BACKUP_MODE_FULL,
curr_page, false, checksum_version,
- 0, NULL, from_fullpath, &page_st);
+ from_fullpath, &page_st);
if (page_state == PageIsTruncated)
break;
@@ -1744,9 +1706,9 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn,
while (true)
{
int rc = 0;
- size_t len = 0;
+ size_t len = 0;
DataPage compressed_page; /* used as read buffer */
- int compressed_size = 0;
+ int compressed_size = 0;
DataPage page;
BlockNumber blknum = 0;
PageState page_st;
@@ -1834,7 +1796,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn,
|| page_may_be_compressed(compressed_page.data, file->compress_alg,
backup_version))
{
- int32 uncompressed_size = 0;
+ int32 uncompressed_size = 0;
const char *errormsg = NULL;
uncompressed_size = do_decompress(page.data, BLCKSZ,
@@ -1862,13 +1824,13 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn,
}
rc = validate_one_page(page.data,
- file->segno * RELSEG_SIZE + blknum,
- stop_lsn, &page_st, checksum_version);
+ file->segno * RELSEG_SIZE + blknum,
+ stop_lsn, &page_st, checksum_version);
}
else
rc = validate_one_page(compressed_page.data,
- file->segno * RELSEG_SIZE + blknum,
- stop_lsn, &page_st, checksum_version);
+ file->segno * RELSEG_SIZE + blknum,
+ stop_lsn, &page_st, checksum_version);
switch (rc)
{
@@ -1986,11 +1948,11 @@ datapagemap_t *
get_lsn_map(const char *fullpath, uint32 checksum_version,
int n_blocks, XLogRecPtr shift_lsn, BlockNumber segmentno)
{
- FILE *in = NULL;
- BlockNumber blknum = 0;
- char read_buffer[BLCKSZ];
- char in_buf[STDIO_BUFSIZE];
- datapagemap_t *lsn_map = NULL;
+ FILE *in = NULL;
+ BlockNumber blknum = 0;
+ char read_buffer[BLCKSZ];
+ char in_buf[STDIO_BUFSIZE];
+ datapagemap_t *lsn_map = NULL;
Assert(shift_lsn > 0);
@@ -2069,10 +2031,10 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph,
else if (read_len != 0 && feof(in))
elog(ERROR,
"Odd size page found at offset %lu of \"%s\"",
- ftell(in), fullpath);
+ ftello(in), fullpath);
else
elog(ERROR, "Cannot read header at offset %lu of \"%s\": %s",
- ftell(in), fullpath, strerror(errno));
+ ftello(in), fullpath, strerror(errno));
}
/* In older versions < 2.4.0, when crc for file was calculated, header was
@@ -2117,7 +2079,7 @@ int
send_pages(const char *to_fullpath, const char *from_fullpath,
pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel,
uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers,
- BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema)
+ BackupMode backup_mode)
{
FILE *in = NULL;
FILE *out = NULL;
@@ -2175,7 +2137,6 @@ send_pages(const char *to_fullpath, const char *from_fullpath,
int rc = prepare_page(file, prev_backup_start_lsn,
blknum, in, backup_mode, curr_page,
true, checksum_version,
- ptrack_version_num, ptrack_schema,
from_fullpath, &page_st);
if (rc == PageIsTruncated)
@@ -2254,17 +2215,19 @@ send_pages(const char *to_fullpath, const char *from_fullpath,
return n_blocks_read;
}
-/* copy local file (взята из send_pages, но используется простое копирование странички, без добавления заголовков и компрессии) */
+/*
+ * Copy local data file just as send_pages but without attaching additional header and compression
+ */
int
copy_pages(const char *to_fullpath, const char *from_fullpath,
- pgFile *file, XLogRecPtr sync_lsn,
- uint32 checksum_version, bool use_pagemap,
- BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema)
+ pgFile *file, XLogRecPtr sync_lsn,
+ uint32 checksum_version, bool use_pagemap,
+ BackupMode backup_mode)
{
FILE *in = NULL;
FILE *out = NULL;
- char curr_page[BLCKSZ];
- int n_blocks_read = 0;
+ char curr_page[BLCKSZ];
+ int n_blocks_read = 0;
BlockNumber blknum = 0;
datapagemap_iterator_t *iter = NULL;
@@ -2308,44 +2271,36 @@ copy_pages(const char *to_fullpath, const char *from_fullpath,
out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_BACKUP_HOST);
if (out == NULL)
elog(ERROR, "Cannot open destination file \"%s\": %s",
- to_fullpath, strerror(errno));
+ to_fullpath, strerror(errno));
/* update file permission */
- if (fio_chmod(to_fullpath, file->mode, FIO_BACKUP_HOST) == -1)
+ if (chmod(to_fullpath, file->mode) == -1)
elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath,
- strerror(errno));
+ strerror(errno));
- elog(VERBOSE, "ftruncate file \"%s\" to size %lu",
- to_fullpath, file->size);
- if (fio_ftruncate(out, file->size) == -1)
- elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s",
- to_fullpath, file->size, strerror(errno));
-
- if (!fio_is_remote_file(out))
- {
- out_buf = pgut_malloc(STDIO_BUFSIZE);
- setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE);
- }
+ /* Enable buffering for output file */
+ out_buf = pgut_malloc(STDIO_BUFSIZE);
+ setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE);
while (blknum < file->n_blocks)
{
PageState page_st;
int rc = prepare_page(file, sync_lsn,
- blknum, in, backup_mode, curr_page,
- true, checksum_version,
- ptrack_version_num, ptrack_schema,
- from_fullpath, &page_st);
+ blknum, in, backup_mode, curr_page,
+ true, checksum_version,
+ from_fullpath, &page_st);
if (rc == PageIsTruncated)
break;
else if (rc == PageIsOk)
{
- if (fio_fseek(out, blknum * BLCKSZ) < 0)
- {
- elog(ERROR, "Cannot seek block %u of \"%s\": %s",
- blknum, to_fullpath, strerror(errno));
- }
- copy_page(file, blknum, in, out, curr_page, to_fullpath);
+ if (fseek(out, blknum * BLCKSZ, SEEK_SET) != 0)
+ elog(ERROR, "Cannot seek to position %u in destination file \"%s\": %s",
+ blknum * BLCKSZ, to_fullpath, strerror(errno));
+
+ if (write_page(file, out, curr_page) != BLCKSZ)
+ elog(ERROR, "File: \"%s\", cannot write at block %u: %s",
+ to_fullpath, blknum, strerror(errno));
}
n_blocks_read++;
@@ -2361,13 +2316,36 @@ copy_pages(const char *to_fullpath, const char *from_fullpath,
blknum++;
}
+ /* truncate output file if required */
+ if (fseek(out, 0, SEEK_END) != 0)
+ elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s",
+ to_fullpath, strerror(errno));
+ {
+ size_t pos = ftell(out);
+
+ if (pos < 0)
+ elog(ERROR, "Cannot get position in destination file \"%s\": %s",
+ to_fullpath, strerror(errno));
+
+ if (pos != file->size)
+ {
+ if (fflush(out) != 0)
+ elog(ERROR, "Cannot flush destination file \"%s\": %s",
+ to_fullpath, strerror(errno));
+
+ if (ftruncate(fileno(out), file->size) == -1)
+ elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s",
+ to_fullpath, file->size, strerror(errno));
+ }
+ }
+
/* cleanup */
- if (in && fclose(in))
+ if (fclose(in))
elog(ERROR, "Cannot close the source file \"%s\": %s",
to_fullpath, strerror(errno));
- /* close local output file */
- if (out && fio_fclose(out))
+ /* close output file */
+ if (fclose(out))
elog(ERROR, "Cannot close the destination file \"%s\": %s",
to_fullpath, strerror(errno));
@@ -2503,19 +2481,19 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map,
/* when running merge we must write headers into temp map */
map_path = (is_merge) ? hdr_map->path_tmp : hdr_map->path;
- read_len = (file->n_headers+1) * sizeof(BackupPageHeader2);
+ read_len = (file->n_headers + 1) * sizeof(BackupPageHeader2);
/* calculate checksums */
INIT_FILE_CRC32(true, file->hdr_crc);
COMP_FILE_CRC32(true, file->hdr_crc, headers, read_len);
FIN_FILE_CRC32(true, file->hdr_crc);
- zheaders = pgut_malloc(read_len*2);
- memset(zheaders, 0, read_len*2);
+ zheaders = pgut_malloc(read_len * 2);
+ memset(zheaders, 0, read_len * 2);
/* compress headers */
- z_len = do_compress(zheaders, read_len*2, headers,
- read_len, ZLIB_COMPRESS, 1, &errormsg);
+ z_len = do_compress(zheaders, read_len * 2, headers,
+ read_len, ZLIB_COMPRESS, 1, &errormsg);
/* writing to header map must be serialized */
pthread_lock(&(hdr_map->mutex)); /* what if we crash while trying to obtain mutex? */
@@ -2559,7 +2537,7 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map,
if (fwrite(zheaders, 1, z_len, hdr_map->fp) != z_len)
elog(ERROR, "Cannot write to file \"%s\": %s", map_path, strerror(errno));
- file->hdr_size = z_len; /* save the length of compressed headers */
+ file->hdr_size = z_len; /* save the length of compressed headers */
hdr_map->offset += z_len; /* update current offset in map */
/* End critical section */
diff --git a/src/dir.c b/src/dir.c
index 473534c8b..bac583b4d 100644
--- a/src/dir.c
+++ b/src/dir.c
@@ -121,8 +121,6 @@ typedef struct TablespaceCreatedList
TablespaceCreatedListCell *tail;
} TablespaceCreatedList;
-static int pgCompareString(const void *str1, const void *str2);
-
static char dir_check_file(pgFile *file, bool backup_logs);
static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir,
@@ -224,6 +222,7 @@ pgFileInit(const char *rel_path)
// May be add?
// pg_atomic_clear_flag(file->lock);
+ file->excluded = false;
return file;
}
@@ -426,6 +425,26 @@ pgFileCompareName(const void *f1, const void *f2)
return strcmp(f1p->name, f2p->name);
}
+/* Compare pgFile->name with string in ascending order of ASCII code. */
+int
+pgFileCompareNameWithString(const void *f1, const void *f2)
+{
+ pgFile *f1p = *(pgFile **)f1;
+ char *f2s = *(char **)f2;
+
+ return strcmp(f1p->name, f2s);
+}
+
+/* Compare pgFile->rel_path with string in ascending order of ASCII code. */
+int
+pgFileCompareRelPathWithString(const void *f1, const void *f2)
+{
+ pgFile *f1p = *(pgFile **)f1;
+ char *f2s = *(char **)f2;
+
+ return strcmp(f1p->rel_path, f2s);
+}
+
/*
* Compare two pgFile with their relative path and external_dir_num in ascending
* order of ASСII code.
@@ -492,12 +511,26 @@ pgFileCompareSizeDesc(const void *f1, const void *f2)
return -1 * pgFileCompareSize(f1, f2);
}
-static int
+int
pgCompareString(const void *str1, const void *str2)
{
return strcmp(*(char **) str1, *(char **) str2);
}
+/*
+ * From bsearch(3): "The compar routine is expected to have two argu‐
+ * ments which point to the key object and to an array member, in that order"
+ * But in practice this is opposite, so we took strlen from second string (search key)
+ * This is checked by tests.catchup.CatchupTest.test_catchup_with_exclude_path
+ */
+int
+pgPrefixCompareString(const void *str1, const void *str2)
+{
+ const char *s1 = *(char **) str1;
+ const char *s2 = *(char **) str2;
+ return strncmp(s1, s2, strlen(s2));
+}
+
/* Compare two Oids */
int
pgCompareOid(const void *f1, const void *f2)
diff --git a/src/help.c b/src/help.c
index 921feaec0..1515359e4 100644
--- a/src/help.c
+++ b/src/help.c
@@ -124,7 +124,7 @@ help_pg_probackup(void)
printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [-C]\n"));
- printf(_(" [--stream [-S slot-name]] [--temp-slot]\n"));
+ printf(_(" [--stream [-S slot-name] [--temp-slot]]\n"));
printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n"));
printf(_(" [--no-validate] [--skip-block-validation]\n"));
printf(_(" [--external-dirs=external-directories-paths]\n"));
@@ -251,9 +251,10 @@ help_pg_probackup(void)
printf(_("\n %s catchup -b catchup-mode\n"), PROGRAM_NAME);
printf(_(" --source-pgdata=path_to_pgdata_on_remote_server\n"));
printf(_(" --destination-pgdata=path_to_local_dir\n"));
- printf(_(" [--stream [-S slot-name]] [--temp-slot]\n"));
+ printf(_(" [--stream [-S slot-name] [--temp-slot | --perm-slot]]\n"));
printf(_(" [-j num-threads]\n"));
printf(_(" [-T OLDDIR=NEWDIR]\n"));
+ printf(_(" [--exclude-path=path_prefix]\n"));
printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
printf(_(" [-w --no-password] [-W --password]\n"));
printf(_(" [--remote-proto] [--remote-host]\n"));
@@ -295,7 +296,7 @@ help_backup(void)
{
printf(_("\n%s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [-C]\n"));
- printf(_(" [--stream [-S slot-name] [--temp-slot]\n"));
+ printf(_(" [--stream [-S slot-name] [--temp-slot]]\n"));
printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n"));
printf(_(" [--no-validate] [--skip-block-validation]\n"));
printf(_(" [-E external-directories-paths]\n"));
@@ -1031,9 +1032,10 @@ help_catchup(void)
printf(_("\n%s catchup -b catchup-mode\n"), PROGRAM_NAME);
printf(_(" --source-pgdata=path_to_pgdata_on_remote_server\n"));
printf(_(" --destination-pgdata=path_to_local_dir\n"));
- printf(_(" [--stream [-S slot-name]] [--temp-slot]\n"));
+ printf(_(" [--stream [-S slot-name]] [--temp-slot | --perm-slot]\n"));
printf(_(" [-j num-threads]\n"));
printf(_(" [-T OLDDIR=NEWDIR]\n"));
+ printf(_(" [--exclude-path=path_prefix]\n"));
printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
printf(_(" [-w --no-password] [-W --password]\n"));
printf(_(" [--remote-proto] [--remote-host]\n"));
@@ -1045,11 +1047,15 @@ help_catchup(void)
printf(_(" --stream stream the transaction log (only supported mode)\n"));
printf(_(" -S, --slot=SLOTNAME replication slot to use\n"));
printf(_(" --temp-slot use temporary replication slot\n"));
+ printf(_(" -P --perm-slot create permanent replication slot\n"));
printf(_(" -j, --threads=NUM number of parallel threads\n"));
printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"));
printf(_(" relocate the tablespace from directory OLDDIR to NEWDIR\n"));
+ printf(_(" -x, --exclude-path=path_prefix files with path_prefix (relative to pgdata) will be\n"));
+ printf(_(" excluded from catchup (can be used multiple times)\n"));
+ printf(_(" Dangerous option! Use at your own risk!\n"));
printf(_("\n Connection options:\n"));
printf(_(" -U, --pguser=USERNAME user name to connect as (default: current local user)\n"));
diff --git a/src/merge.c b/src/merge.c
index cd070fce4..ff39c2510 100644
--- a/src/merge.c
+++ b/src/merge.c
@@ -1256,7 +1256,7 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup,
backup_data_file(tmp_file, to_fullpath_tmp1, to_fullpath_tmp2,
InvalidXLogRecPtr, BACKUP_MODE_FULL,
dest_backup->compress_alg, dest_backup->compress_level,
- dest_backup->checksum_version, 0, NULL,
+ dest_backup->checksum_version,
&(full_backup->hdr_map), true);
/* drop restored temp file */
diff --git a/src/pg_probackup.c b/src/pg_probackup.c
index 00796be04..d629d838d 100644
--- a/src/pg_probackup.c
+++ b/src/pg_probackup.c
@@ -80,8 +80,9 @@ bool progress = false;
bool no_sync = false;
#if PG_VERSION_NUM >= 100000
char *replication_slot = NULL;
-#endif
bool temp_slot = false;
+#endif
+bool perm_slot = false;
/* backup options */
bool backup_logs = false;
@@ -118,6 +119,9 @@ bool skip_external_dirs = false;
/* array for datnames, provided via db-include and db-exclude */
static parray *datname_exclude_list = NULL;
static parray *datname_include_list = NULL;
+/* arrays for --exclude-path's */
+static parray *exclude_absolute_paths_list = NULL;
+static parray *exclude_relative_paths_list = NULL;
/* checkdb options */
bool need_amcheck = false;
@@ -176,6 +180,7 @@ static void compress_init(ProbackupSubcmd const subcmd);
static void opt_datname_exclude_list(ConfigOption *opt, const char *arg);
static void opt_datname_include_list(ConfigOption *opt, const char *arg);
+static void opt_exclude_path(ConfigOption *opt, const char *arg);
/*
* Short name should be non-printable ASCII character.
@@ -198,7 +203,10 @@ static ConfigOption cmd_options[] =
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT },
{ 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMD_STRICT },
{ 's', 'S', "slot", &replication_slot, SOURCE_CMD_STRICT },
+#if PG_VERSION_NUM >= 100000
{ 'b', 181, "temp-slot", &temp_slot, SOURCE_CMD_STRICT },
+#endif
+ { 'b', 'P', "perm-slot", &perm_slot, SOURCE_CMD_STRICT },
{ 'b', 182, "delete-wal", &delete_wal, SOURCE_CMD_STRICT },
{ 'b', 183, "delete-expired", &delete_expired, SOURCE_CMD_STRICT },
{ 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT },
@@ -207,6 +215,7 @@ static ConfigOption cmd_options[] =
/* catchup options */
{ 's', 239, "source-pgdata", &catchup_source_pgdata, SOURCE_CMD_STRICT },
{ 's', 240, "destination-pgdata", &catchup_destination_pgdata, SOURCE_CMD_STRICT },
+ { 'f', 'x', "exclude-path", opt_exclude_path, SOURCE_CMD_STRICT },
/* restore options */
{ 's', 136, "recovery-target-time", &target_time, SOURCE_CMD_STRICT },
{ 's', 137, "recovery-target-xid", &target_xid, SOURCE_CMD_STRICT },
@@ -787,6 +796,17 @@ main(int argc, char *argv[])
elog(ERROR, "You cannot specify \"--no-validate\" option with the \"%s\" command",
get_subcmd_name(backup_subcmd));
+#if PG_VERSION_NUM >= 100000
+ if (temp_slot && perm_slot)
+ elog(ERROR, "You cannot specify \"--perm-slot\" option with the \"--temp-slot\" option");
+
+ /* if slot name was not provided for temp slot, use default slot name */
+ if (!replication_slot && temp_slot)
+ replication_slot = DEFAULT_TEMP_SLOT_NAME;
+#endif
+ if (!replication_slot && perm_slot)
+ replication_slot = DEFAULT_PERMANENT_SLOT_NAME;
+
if (num_threads < 1)
num_threads = 1;
@@ -825,7 +845,8 @@ main(int argc, char *argv[])
no_validate, no_sync, backup_logs);
}
case CATCHUP_CMD:
- return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync);
+ return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync,
+ exclude_absolute_paths_list, exclude_relative_paths_list);
case RESTORE_CMD:
return do_restore_or_validate(instanceState, current.backup_id,
recovery_target_options,
@@ -990,39 +1011,45 @@ compress_init(ProbackupSubcmd const subcmd)
}
}
-/* Construct array of datnames, provided by user via db-exclude option */
-void
-opt_datname_exclude_list(ConfigOption *opt, const char *arg)
+static void
+opt_parser_add_to_parray_helper(parray **list, const char *str)
{
- char *dbname = NULL;
+ char *elem = NULL;
- if (!datname_exclude_list)
- datname_exclude_list = parray_new();
+ if (*list == NULL)
+ *list = parray_new();
- dbname = pgut_malloc(strlen(arg) + 1);
+ elem = pgut_malloc(strlen(str) + 1);
+ strcpy(elem, str);
- /* TODO add sanity for database name */
- strcpy(dbname, arg);
+ parray_append(*list, elem);
+}
- parray_append(datname_exclude_list, dbname);
+/* Construct array of datnames, provided by user via db-exclude option */
+void
+opt_datname_exclude_list(ConfigOption *opt, const char *arg)
+{
+ /* TODO add sanity for database name */
+ opt_parser_add_to_parray_helper(&datname_exclude_list, arg);
}
/* Construct array of datnames, provided by user via db-include option */
void
opt_datname_include_list(ConfigOption *opt, const char *arg)
{
- char *dbname = NULL;
-
- if (!datname_include_list)
- datname_include_list = parray_new();
-
- dbname = pgut_malloc(strlen(arg) + 1);
-
- if (strcmp(dbname, "tempate0") == 0 ||
- strcmp(dbname, "tempate1") == 0)
+ if (strcmp(arg, "tempate0") == 0 ||
+ strcmp(arg, "tempate1") == 0)
elog(ERROR, "Databases 'template0' and 'template1' cannot be used for partial restore or validation");
- strcpy(dbname, arg);
+ opt_parser_add_to_parray_helper(&datname_include_list, arg);
+}
- parray_append(datname_include_list, dbname);
+/* Parse --exclude-path option */
+void
+opt_exclude_path(ConfigOption *opt, const char *arg)
+{
+ if (is_absolute_path(arg))
+ opt_parser_add_to_parray_helper(&exclude_absolute_paths_list, arg);
+ else
+ opt_parser_add_to_parray_helper(&exclude_relative_paths_list, arg);
}
diff --git a/src/pg_probackup.h b/src/pg_probackup.h
index 1cad526dd..19f6feff0 100644
--- a/src/pg_probackup.h
+++ b/src/pg_probackup.h
@@ -86,6 +86,10 @@ extern const char *PROGRAM_EMAIL;
#define HEADER_MAP "page_header_map"
#define HEADER_MAP_TMP "page_header_map_tmp"
+/* default replication slot names */
+#define DEFAULT_TEMP_SLOT_NAME "pg_probackup_slot";
+#define DEFAULT_PERMANENT_SLOT_NAME "pg_probackup_perm_slot";
+
/* Timeout defaults */
#define ARCHIVE_TIMEOUT_DEFAULT 300
#define REPLICA_TIMEOUT_DEFAULT 300
@@ -278,6 +282,7 @@ typedef struct pgFile
pg_crc32 hdr_crc; /* CRC value of header file: name_hdr */
pg_off_t hdr_off; /* offset in header map */
int hdr_size; /* length of headers */
+ bool excluded; /* excluded via --exclude-path option */
} pgFile;
typedef struct page_map_entry
@@ -771,11 +776,12 @@ extern bool stream_wal;
extern bool show_color;
extern bool progress;
extern bool is_archive_cmd; /* true for archive-{get,push} */
-#if PG_VERSION_NUM >= 100000
/* In pre-10 'replication_slot' is defined in receivelog.h */
extern char *replication_slot;
-#endif
+#if PG_VERSION_NUM >= 100000
extern bool temp_slot;
+#endif
+extern bool perm_slot;
/* backup options */
extern bool smooth_checkpoint;
@@ -842,7 +848,8 @@ extern void process_block_change(ForkNumber forknum, RelFileNode rnode,
BlockNumber blkno);
/* in catchup.c */
-extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files);
+extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files,
+ parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list);
/* in restore.c */
extern int do_restore_or_validate(InstanceState *instanceState,
@@ -1057,11 +1064,15 @@ extern pg_crc32 pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool miss
extern int pgFileMapComparePath(const void *f1, const void *f2);
extern int pgFileCompareName(const void *f1, const void *f2);
+extern int pgFileCompareNameWithString(const void *f1, const void *f2);
+extern int pgFileCompareRelPathWithString(const void *f1, const void *f2);
extern int pgFileCompareRelPathWithExternal(const void *f1, const void *f2);
extern int pgFileCompareRelPathWithExternalDesc(const void *f1, const void *f2);
extern int pgFileCompareLinked(const void *f1, const void *f2);
extern int pgFileCompareSize(const void *f1, const void *f2);
extern int pgFileCompareSizeDesc(const void *f1, const void *f2);
+extern int pgCompareString(const void *str1, const void *str2);
+extern int pgPrefixCompareString(const void *str1, const void *str2);
extern int pgCompareOid(const void *f1, const void *f2);
extern void pfilearray_clear_locks(parray *file_list);
@@ -1071,14 +1082,11 @@ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file,
extern void catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
- XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
- CompressAlg calg, int clevel, uint32 checksum_version,
- int ptrack_version_num, const char *ptrack_schema,
- bool is_merge, size_t prev_size);
+ XLogRecPtr sync_lsn, BackupMode backup_mode,
+ uint32 checksum_version, size_t prev_size);
extern void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
CompressAlg calg, int clevel, uint32 checksum_version,
- int ptrack_version_num, const char *ptrack_schema,
HeaderMap *hdr_map, bool missing_ok);
extern void backup_non_data_file(pgFile *file, pgFile *prev_file,
const char *from_fullpath, const char *to_fullpath,
@@ -1197,11 +1205,11 @@ extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32
extern int send_pages(const char *to_fullpath, const char *from_fullpath,
pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel,
uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers,
- BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema);
+ BackupMode backup_mode);
extern int copy_pages(const char *to_fullpath, const char *from_fullpath,
pgFile *file, XLogRecPtr prev_backup_start_lsn,
uint32 checksum_version, bool use_pagemap,
- BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema);
+ BackupMode backup_mode);
/* FIO */
extern void setMyLocation(ProbackupSubcmd const subcmd);
@@ -1212,8 +1220,7 @@ extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pg
BackupPageHeader2 **headers);
extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version,
- bool use_pagemap, BlockNumber *err_blknum, char **errormsg,
- BackupPageHeader2 **headers);
+ bool use_pagemap, BlockNumber *err_blknum, char **errormsg);
/* return codes for fio_send_pages */
extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg);
extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out,
@@ -1265,7 +1272,8 @@ datapagemap_print_debug(datapagemap_t *map);
extern XLogRecPtr stop_backup_lsn;
extern void start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path,
ConnectionOptions *conn_opt,
- XLogRecPtr startpos, TimeLineID starttli);
+ XLogRecPtr startpos, TimeLineID starttli,
+ bool is_backup);
extern int wait_WAL_streaming_end(parray *backup_files_list);
extern parray* parse_tli_history_buffer(char *history, TimeLineID tli);
diff --git a/src/ptrack.c b/src/ptrack.c
index 191f988a3..3f395b286 100644
--- a/src/ptrack.c
+++ b/src/ptrack.c
@@ -123,19 +123,24 @@ pg_is_ptrack_enabled(PGconn *backup_conn, int ptrack_version_num)
PGresult *res_db;
bool result = false;
- if (ptrack_version_num == 200)
+ if (ptrack_version_num > 200)
+ {
+ res_db = pgut_execute(backup_conn, "SHOW ptrack.map_size", 0, NULL);
+ result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0 &&
+ strcmp(PQgetvalue(res_db, 0, 0), "-1") != 0;
+ PQclear(res_db);
+ }
+ else if (ptrack_version_num == 200)
{
res_db = pgut_execute(backup_conn, "SHOW ptrack_map_size", 0, NULL);
result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0;
+ PQclear(res_db);
}
else
{
- res_db = pgut_execute(backup_conn, "SHOW ptrack.map_size", 0, NULL);
- result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0 &&
- strcmp(PQgetvalue(res_db, 0, 0), "-1") != 0;
+ result = false;
}
- PQclear(res_db);
return result;
}
diff --git a/src/stream.c b/src/stream.c
index 5912ff44b..570108cde 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -59,6 +59,7 @@ static pthread_t stream_thread;
static StreamThreadArg stream_thread_arg = {"", NULL, 1};
static parray *xlog_files_list = NULL;
+static bool do_crc = true;
static void IdentifySystem(StreamThreadArg *stream_thread_arg);
static int checkpoint_timeout(PGconn *backup_conn);
@@ -159,6 +160,56 @@ checkpoint_timeout(PGconn *backup_conn)
return val_int;
}
+/*
+ * CreateReplicationSlot_compat() -- wrapper for CreateReplicationSlot() used in StreamLog()
+ * src/bin/pg_basebackup/streamutil.c
+ * CreateReplicationSlot() has different signatures on different PG versions:
+ * PG 15
+ * bool
+ * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin,
+ * bool is_temporary, bool is_physical, bool reserve_wal,
+ * bool slot_exists_ok, bool two_phase)
+ * PG 11-14
+ * bool
+ * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin,
+ * bool is_temporary, bool is_physical, bool reserve_wal,
+ * bool slot_exists_ok)
+ * PG 9.5-10
+ * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin,
+ * bool is_physical, bool slot_exists_ok)
+ * NOTE: PG 9.6 and 10 support reserve_wal in
+ * pg_catalog.pg_create_physical_replication_slot(slot_name name [, immediately_reserve boolean])
+ * and
+ * CREATE_REPLICATION_SLOT slot_name { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin }
+ * replication protocol command, but CreateReplicationSlot() C function doesn't
+ */
+static bool
+CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *plugin,
+ bool is_temporary, bool is_physical,
+ bool slot_exists_ok)
+{
+#if PG_VERSION_NUM >= 150000
+ return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical,
+ /* reserve_wal = */ true, slot_exists_ok, /* two_phase = */ false);
+#elif PG_VERSION_NUM >= 110000
+ return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical,
+ /* reserve_wal = */ true, slot_exists_ok);
+#elif PG_VERSION_NUM >= 100000
+ /*
+ * PG-10 doesn't support creating temp_slot by calling CreateReplicationSlot(), but
+ * it will be created by setting StreamCtl.temp_slot later in StreamLog()
+ */
+ if (!is_temporary)
+ return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok);
+ else
+ return true;
+#else
+ /* these parameters not supported in PG < 10 */
+ Assert(!is_temporary);
+ return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok);
+#endif
+}
+
/*
* Start the log streaming
*/
@@ -177,31 +228,36 @@ StreamLog(void *arg)
/* Initialize timeout */
stream_stop_begin = 0;
+ /* Create repslot */
#if PG_VERSION_NUM >= 100000
- /* if slot name was not provided for temp slot, use default slot name */
- if (!replication_slot && temp_slot)
- replication_slot = "pg_probackup_slot";
-#endif
-
-
-#if PG_VERSION_NUM >= 150000
- /* Create temp repslot */
- if (temp_slot)
- CreateReplicationSlot(stream_arg->conn, replication_slot,
- NULL, temp_slot, true, true, false, false);
-#elif PG_VERSION_NUM >= 110000
- /* Create temp repslot */
- if (temp_slot)
- CreateReplicationSlot(stream_arg->conn, replication_slot,
- NULL, temp_slot, true, true, false);
+ if (temp_slot || perm_slot)
+ if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, temp_slot, true, false))
+#else
+ if (perm_slot)
+ if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, false, true, false))
#endif
+ {
+ interrupted = true;
+ elog(ERROR, "Couldn't create physical replication slot %s", replication_slot);
+ }
/*
* Start the replication
*/
- elog(LOG, "started streaming WAL at %X/%X (timeline %u)",
- (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos,
- stream_arg->starttli);
+ if (replication_slot)
+ elog(LOG, "started streaming WAL at %X/%X (timeline %u) using%s slot %s",
+ (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos,
+ stream_arg->starttli,
+#if PG_VERSION_NUM >= 100000
+ temp_slot ? " temporary" : "",
+#else
+ "",
+#endif
+ replication_slot);
+ else
+ elog(LOG, "started streaming WAL at %X/%X (timeline %u)",
+ (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos,
+ stream_arg->starttli);
#if PG_VERSION_NUM >= 90600
{
@@ -212,6 +268,11 @@ StreamLog(void *arg)
ctl.startpos = stream_arg->startpos;
ctl.timeline = stream_arg->starttli;
ctl.sysidentifier = NULL;
+ ctl.stream_stop = stop_streaming;
+ ctl.standby_message_timeout = standby_message_timeout;
+ ctl.partial_suffix = NULL;
+ ctl.synchronous = false;
+ ctl.mark_done = false;
#if PG_VERSION_NUM >= 100000
ctl.walmethod = CreateWalDirectoryMethod(
@@ -224,19 +285,14 @@ StreamLog(void *arg)
ctl.do_sync = false; /* We sync all files at the end of backup */
// ctl.mark_done /* for future use in s3 */
#if PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000
+ /* StreamCtl.temp_slot used only for PG-10, in PG>10, temp_slots are created by calling CreateReplicationSlot() */
ctl.temp_slot = temp_slot;
-#endif
-#else
+#endif /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 */
+#else /* PG_VERSION_NUM < 100000 */
ctl.basedir = (char *) stream_arg->basedir;
-#endif
-
- ctl.stream_stop = stop_streaming;
- ctl.standby_message_timeout = standby_message_timeout;
- ctl.partial_suffix = NULL;
- ctl.synchronous = false;
- ctl.mark_done = false;
+#endif /* PG_VERSION_NUM >= 100000 */
- if(ReceiveXlogStream(stream_arg->conn, &ctl) == false)
+ if (ReceiveXlogStream(stream_arg->conn, &ctl) == false)
{
interrupted = true;
elog(ERROR, "Problem in receivexlog");
@@ -244,38 +300,42 @@ StreamLog(void *arg)
#if PG_VERSION_NUM >= 100000
if (!ctl.walmethod->finish())
+ {
+ interrupted = true;
elog(ERROR, "Could not finish writing WAL files: %s",
strerror(errno));
-#endif
+ }
+#endif /* PG_VERSION_NUM >= 100000 */
}
-#else
- if(ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli,
+#else /* PG_VERSION_NUM < 90600 */
+ /* PG-9.5 */
+ if (ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli,
NULL, (char *) stream_arg->basedir, stop_streaming,
standby_message_timeout, NULL, false, false) == false)
{
interrupted = true;
elog(ERROR, "Problem in receivexlog");
}
-#endif
+#endif /* PG_VERSION_NUM >= 90600 */
- /* be paranoid and sort xlog_files_list,
- * so if stop_lsn segno is already in the list,
- * then list must be sorted to detect duplicates.
- */
- parray_qsort(xlog_files_list, pgFileCompareRelPathWithExternal);
+ /* be paranoid and sort xlog_files_list,
+ * so if stop_lsn segno is already in the list,
+ * then list must be sorted to detect duplicates.
+ */
+ parray_qsort(xlog_files_list, pgFileCompareRelPathWithExternal);
- /* Add the last segment to the list */
- add_walsegment_to_filelist(xlog_files_list, stream_arg->starttli,
+ /* Add the last segment to the list */
+ add_walsegment_to_filelist(xlog_files_list, stream_arg->starttli,
stop_stream_lsn, (char *) stream_arg->basedir,
instance_config.xlog_seg_size);
- /* append history file to walsegment filelist */
- add_history_file_to_filelist(xlog_files_list, stream_arg->starttli, (char *) stream_arg->basedir);
+ /* append history file to walsegment filelist */
+ add_history_file_to_filelist(xlog_files_list, stream_arg->starttli, (char *) stream_arg->basedir);
- /*
- * TODO: remove redundant WAL segments
- * walk pg_wal and remove files with segno greater that of stop_lsn`s segno +1
- */
+ /*
+ * TODO: remove redundant WAL segments
+ * walk pg_wal and remove files with segno greater that of stop_lsn`s segno +1
+ */
elog(LOG, "finished streaming WAL at %X/%X (timeline %u)",
(uint32) (stop_stream_lsn >> 32), (uint32) stop_stream_lsn, stream_arg->starttli);
@@ -569,8 +629,10 @@ parse_tli_history_buffer(char *history, TimeLineID tli)
*/
void
start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOptions *conn_opt,
- XLogRecPtr startpos, TimeLineID starttli)
+ XLogRecPtr startpos, TimeLineID starttli, bool is_backup)
{
+ /* calculate crc only when running backup, catchup has no need for it */
+ do_crc = is_backup;
/* How long we should wait for streaming end after pg_stop_backup */
stream_stop_timeout = checkpoint_timeout(backup_conn);
//TODO Add a comment about this calculation
@@ -654,15 +716,16 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos
if (existing_file)
{
- (*existing_file)->crc = pgFileGetCRC(wal_segment_fullpath, true, false);
+ if (do_crc)
+ (*existing_file)->crc = pgFileGetCRC(wal_segment_fullpath, true, false);
(*existing_file)->write_size = xlog_seg_size;
(*existing_file)->uncompressed_size = xlog_seg_size;
return;
}
- /* calculate crc */
- file->crc = pgFileGetCRC(wal_segment_fullpath, true, false);
+ if (do_crc)
+ file->crc = pgFileGetCRC(wal_segment_fullpath, true, false);
/* Should we recheck it using stat? */
file->write_size = xlog_seg_size;
@@ -692,7 +755,8 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir)
file = pgFileNew(fullpath, relpath, false, 0, FIO_BACKUP_HOST);
/* calculate crc */
- file->crc = pgFileGetCRC(fullpath, true, false);
+ if (do_crc)
+ file->crc = pgFileGetCRC(fullpath, true, false);
file->write_size = file->size;
file->uncompressed_size = file->size;
diff --git a/src/utils/configuration.h b/src/utils/configuration.h
index 3a5de4b83..2c6ea3eec 100644
--- a/src/utils/configuration.h
+++ b/src/utils/configuration.h
@@ -61,14 +61,14 @@ typedef char *(*option_get_fn) (ConfigOption *opt);
/*
* type:
- * b: bool (true)
- * B: bool (false)
+ * b: bool (true)
+ * B: bool (false)
* f: option_fn
- * i: 32bit signed integer
- * u: 32bit unsigned integer
- * I: 64bit signed integer
- * U: 64bit unsigned integer
- * s: string
+ * i: 32bit signed integer
+ * u: 32bit unsigned integer
+ * I: 64bit signed integer
+ * U: 64bit unsigned integer
+ * s: string
* t: time_t
*/
struct ConfigOption
diff --git a/src/utils/file.c b/src/utils/file.c
index b808d6293..f86e605cb 100644
--- a/src/utils/file.c
+++ b/src/utils/file.c
@@ -1963,8 +1963,7 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
int
fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version,
- bool use_pagemap, BlockNumber* err_blknum, char **errormsg,
- BackupPageHeader2 **headers)
+ bool use_pagemap, BlockNumber* err_blknum, char **errormsg)
{
FILE *out = NULL;
char *out_buf = NULL;
@@ -2092,9 +2091,9 @@ fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file,
/* receive headers if any */
if (hdr.size > 0)
{
- *headers = pgut_malloc(hdr.size);
- IO_CHECK(fio_read_all(fio_stdin, *headers, hdr.size), hdr.size);
- file->n_headers = (hdr.size / sizeof(BackupPageHeader2)) -1;
+ char *tmp = pgut_malloc(hdr.size);
+ IO_CHECK(fio_read_all(fio_stdin, tmp, hdr.size), hdr.size);
+ pg_free(tmp);
}
break;
diff --git a/tests/backup.py b/tests/backup.py
index 0bfd0c1b9..558c62de3 100644
--- a/tests/backup.py
+++ b/tests/backup.py
@@ -1421,8 +1421,10 @@ def test_basic_temp_slot_for_stream_backup(self):
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
- pg_options={
- 'max_wal_size': '40MB', 'default_transaction_read_only': 'on'})
+ pg_options={'max_wal_size': '40MB'})
+
+ if self.get_version(node) < self.version_to_num('10.0'):
+ return unittest.skip('You need PostgreSQL >= 10 for this test')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@@ -1434,11 +1436,6 @@ def test_basic_temp_slot_for_stream_backup(self):
backup_dir, 'node', node,
options=['--stream', '--temp-slot'])
- if self.get_version(node) < self.version_to_num('10.0'):
- return unittest.skip('You need PostgreSQL >= 10 for this test')
- else:
- pg_receivexlog_path = self.get_bin_path('pg_receivewal')
-
# FULL backup
self.backup_node(
backup_dir, 'node', node,
@@ -3274,7 +3271,7 @@ def test_basic_backup_default_transaction_read_only(self):
# FULL backup
self.backup_node(
backup_dir, 'node', node,
- options=['--stream', '--temp-slot'])
+ options=['--stream'])
# DELTA backup
self.backup_node(
diff --git a/tests/catchup.py b/tests/catchup.py
index 5df538e42..45d999629 100644
--- a/tests/catchup.py
+++ b/tests/catchup.py
@@ -1,4 +1,5 @@
import os
+from pathlib import Path
import signal
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
@@ -55,6 +56,7 @@ def test_basic_full_catchup(self):
# Cleanup
dst_pg.stop()
+ #self.assertEqual(1, 0, 'Stop test')
self.del_test_dir(module_name, self.fname)
def test_full_catchup_with_tablespace(self):
@@ -180,6 +182,7 @@ def test_basic_delta_catchup(self):
# Cleanup
dst_pg.stop()
+ #self.assertEqual(1, 0, 'Stop test')
self.del_test_dir(module_name, self.fname)
def test_basic_ptrack_catchup(self):
@@ -252,6 +255,7 @@ def test_basic_ptrack_catchup(self):
# Cleanup
dst_pg.stop()
+ #self.assertEqual(1, 0, 'Stop test')
self.del_test_dir(module_name, self.fname)
def test_tli_delta_catchup(self):
@@ -776,69 +780,6 @@ def test_same_db_id(self):
src_pg.stop()
self.del_test_dir(module_name, self.fname)
- def test_destination_dbstate(self):
- """
- Test that we detect that destination pg is not cleanly shutdowned
- """
- # preparation 1: source
- src_pg = self.make_simple_node(
- base_dir = os.path.join(module_name, self.fname, 'src'),
- set_replication = True,
- pg_options = { 'wal_log_hints': 'on' }
- )
- src_pg.slow_start()
-
- # preparation 2: destination
- dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
- self.catchup_node(
- backup_mode = 'FULL',
- source_pgdata = src_pg.data_dir,
- destination_node = dst_pg,
- options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
- )
-
- # try #1
- try:
- self.catchup_node(
- backup_mode = 'DELTA',
- source_pgdata = src_pg.data_dir,
- destination_node = dst_pg,
- options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
- )
- self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format(
- repr(self.output), self.cmd))
- except ProbackupException as e:
- self.assertIn(
- 'ERROR: Destination directory contains "backup_label" file',
- e.message,
- '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
-
- # try #2
- dst_options = {}
- dst_options['port'] = str(dst_pg.port)
- self.set_auto_conf(dst_pg, dst_options)
- dst_pg.slow_start()
- self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres")
- os.kill(dst_pg.pid, signal.SIGKILL)
- try:
- self.catchup_node(
- backup_mode = 'DELTA',
- source_pgdata = src_pg.data_dir,
- destination_node = dst_pg,
- options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
- )
- self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format(
- repr(self.output), self.cmd))
- except ProbackupException as e:
- self.assertIn(
- 'must be stopped cleanly',
- e.message,
- '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
-
- # Cleanup
- src_pg.stop()
- self.del_test_dir(module_name, self.fname)
-
def test_tli_destination_mismatch(self):
"""
Test that we detect TLI mismatch in destination
@@ -975,3 +916,517 @@ def test_tli_source_mismatch(self):
src_pg.stop()
fake_src_pg.stop()
self.del_test_dir(module_name, self.fname)
+
+#########################################
+# Test unclean destination
+#########################################
+ def test_unclean_delta_catchup(self):
+ """
+ Test that we correctly recover uncleanly shutdowned destination
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question(answer int)")
+
+ # preparation 2: destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # try #1
+ try:
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Destination directory contains "backup_label" file',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # try #2
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres")
+ os.kill(dst_pg.pid, signal.SIGKILL)
+
+ # preparation 3: make changes on master (source)
+ src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
+ pgbench.wait()
+ src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # do delta catchup
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # 1st check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # run&recover catchup'ed instance
+ src_pg.stop()
+ self.set_replica(master = src_pg, replica = dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+
+ # 2nd check: run verification query
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ dst_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_unclean_ptrack_catchup(self):
+ """
+ Test that we correctly recover uncleanly shutdowned destination
+ """
+ if not self.ptrack:
+ return unittest.skip('Skipped because ptrack support is disabled')
+
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ ptrack_enable = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack")
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question(answer int)")
+
+ # preparation 2: destination
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # try #1
+ try:
+ self.catchup_node(
+ backup_mode = 'PTRACK',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.assertEqual(1, 0, "Expecting Error because destination pg is not cleanly shutdowned.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: Destination directory contains "backup_label" file',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # try #2
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start()
+ self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres")
+ os.kill(dst_pg.pid, signal.SIGKILL)
+
+ # preparation 3: make changes on master (source)
+ src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
+ pgbench.wait()
+ src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+
+ # do delta catchup
+ self.catchup_node(
+ backup_mode = 'PTRACK',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+
+ # 1st check: compare data directories
+ self.compare_pgdata(
+ self.pgdata_content(src_pg.data_dir),
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # run&recover catchup'ed instance
+ src_pg.stop()
+ self.set_replica(master = src_pg, replica = dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+
+ # 2nd check: run verification query
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ dst_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+#########################################
+# Test replication slot logic
+#
+# -S, --slot=SLOTNAME replication slot to use
+# --temp-slot use temporary replication slot
+# -P --perm-slot create permanent replication slot
+# --primary-slot-name=SLOTNAME value for primary_slot_name parameter
+#
+# 1. if "--slot" is used - try to use already existing slot with given name
+# 2. if "--slot" and "--perm-slot" used - try to create permanent slot and use it.
+# 3. If "--perm-slot " flag is used without "--slot" option - use generic slot name like "pg_probackup_perm_slot"
+# 4. If "--perm-slot " flag is used and permanent slot already exists - fail with error.
+# 5. "--perm-slot" and "--temp-slot" flags cannot be used together.
+#########################################
+ def test_catchup_with_replication_slot(self):
+ """
+ """
+ # preparation
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True
+ )
+ src_pg.slow_start()
+
+ # 1a. --slot option
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_1a'))
+ try:
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--slot=nonexistentslot_1a'
+ ]
+ )
+ self.assertEqual(1, 0, "Expecting Error because replication slot does not exist.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: replication slot "nonexistentslot_1a" does not exist',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # 1b. --slot option
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_1b'))
+ src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_1b')")
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--slot=existentslot_1b'
+ ]
+ )
+
+ # 2a. --slot --perm-slot
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_2a'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--slot=nonexistentslot_2a',
+ '--perm-slot'
+ ]
+ )
+
+ # 2b. and 4. --slot --perm-slot
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_2b'))
+ src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_2b')")
+ try:
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--slot=existentslot_2b',
+ '--perm-slot'
+ ]
+ )
+ self.assertEqual(1, 0, "Expecting Error because replication slot already exist.\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: replication slot "existentslot_2b" already exists',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ # 3. --perm-slot --slot
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_3'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--perm-slot'
+ ]
+ )
+ slot_name = src_pg.safe_psql(
+ "postgres",
+ "SELECT slot_name FROM pg_catalog.pg_replication_slots "
+ "WHERE slot_name NOT LIKE '%existentslot%' "
+ "AND slot_type = 'physical'"
+ ).decode('utf-8').rstrip()
+ self.assertEqual(slot_name, 'pg_probackup_perm_slot', 'Slot name mismatch')
+
+ # 5. --perm-slot --temp-slot (PG>=10)
+ if self.get_version(src_pg) >= self.version_to_num('10.0'):
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5'))
+ try:
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--perm-slot',
+ '--temp-slot'
+ ]
+ )
+ self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option',
+ e.message,
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
+
+ #self.assertEqual(1, 0, 'Stop test')
+ self.del_test_dir(module_name, self.fname)
+
+#########################################
+# --exclude-path
+#########################################
+ def test_catchup_with_exclude_path(self):
+ """
+ various syntetic tests for --exclude-path option
+ """
+ # preparation
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True
+ )
+ src_pg.slow_start()
+
+ # test 1
+ os.mkdir(os.path.join(src_pg.data_dir, 'src_usefull_dir'))
+ with open(os.path.join(os.path.join(src_pg.data_dir, 'src_usefull_dir', 'src_garbage_file')), 'w') as f:
+ f.write('garbage')
+ f.flush()
+ f.close
+ os.mkdir(os.path.join(src_pg.data_dir, 'src_garbage_dir'))
+ with open(os.path.join(os.path.join(src_pg.data_dir, 'src_garbage_dir', 'src_garbage_file')), 'w') as f:
+ f.write('garbage')
+ f.flush()
+ f.close
+
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--exclude-path={0}'.format(os.path.join(src_pg.data_dir, 'src_usefull_dir', 'src_garbage_file')),
+ '-x', '{0}'.format(os.path.join(src_pg.data_dir, 'src_garbage_dir')),
+ ]
+ )
+
+ self.assertTrue(Path(os.path.join(dst_pg.data_dir, 'src_usefull_dir')).exists())
+ self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'src_usefull_dir', 'src_garbage_file')).exists())
+ self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'src_garbage_dir')).exists())
+
+ self.set_replica(src_pg, dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+ dst_pg.stop()
+
+ # test 2
+ os.mkdir(os.path.join(dst_pg.data_dir, 'dst_garbage_dir'))
+ os.mkdir(os.path.join(dst_pg.data_dir, 'dst_usefull_dir'))
+ with open(os.path.join(os.path.join(dst_pg.data_dir, 'dst_usefull_dir', 'dst_usefull_file')), 'w') as f:
+ f.write('gems')
+ f.flush()
+ f.close
+
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--exclude-path=src_usefull_dir/src_garbage_file',
+ '--exclude-path=src_garbage_dir',
+ '--exclude-path={0}'.format(os.path.join(dst_pg.data_dir, 'dst_usefull_dir')),
+ ]
+ )
+
+ self.assertTrue(Path(os.path.join(dst_pg.data_dir, 'src_usefull_dir')).exists())
+ self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'src_usefull_dir', 'src_garbage_file')).exists())
+ self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'src_garbage_dir')).exists())
+ self.assertFalse(Path(os.path.join(dst_pg.data_dir, 'dst_garbage_dir')).exists())
+ self.assertTrue(Path(os.path.join(dst_pg.data_dir, 'dst_usefull_dir', 'dst_usefull_file')).exists())
+
+ #self.assertEqual(1, 0, 'Stop test')
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_config_exclusion(self):
+ """
+ Test that catchup can preserve dest replication config
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql(
+ "postgres",
+ "CREATE TABLE ultimate_question(answer int)")
+
+ # preparation 2: make lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.set_replica(src_pg, dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+ dst_pg.stop()
+
+ # preparation 3: make changes on master (source)
+ src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '2', '--no-vacuum'])
+ pgbench.wait()
+
+ # test 1: do delta catchup with relative exclusion paths
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--exclude-path=postgresql.conf',
+ '--exclude-path=postgresql.auto.conf',
+ '--exclude-path=recovery.conf',
+ ]
+ )
+
+ # run&recover catchup'ed instance
+ # don't set destination db port and recover options
+ dst_pg.slow_start(replica = True)
+
+ # check: run verification query
+ src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # preparation 4: make changes on master (source)
+ dst_pg.stop()
+ #src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '2', '--no-vacuum'])
+ pgbench.wait()
+
+ # test 2: do delta catchup with absolute source exclusion paths
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--exclude-path={0}/postgresql.conf'.format(src_pg.data_dir),
+ '--exclude-path={0}/postgresql.auto.conf'.format(src_pg.data_dir),
+ '--exclude-path={0}/recovery.conf'.format(src_pg.data_dir),
+ ]
+ )
+
+ # run&recover catchup'ed instance
+ # don't set destination db port and recover options
+ dst_pg.slow_start(replica = True)
+
+ # check: run verification query
+ src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(2*42)")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # preparation 5: make changes on master (source)
+ dst_pg.stop()
+ pgbench = src_pg.pgbench(options=['-T', '2', '--no-vacuum'])
+ pgbench.wait()
+
+ # test 3: do delta catchup with absolute destination exclusion paths
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = [
+ '-d', 'postgres', '-p', str(src_pg.port), '--stream',
+ '--exclude-path={0}/postgresql.conf'.format(dst_pg.data_dir),
+ '--exclude-path={0}/postgresql.auto.conf'.format(dst_pg.data_dir),
+ '--exclude-path={0}/recovery.conf'.format(dst_pg.data_dir),
+ ]
+ )
+
+ # run&recover catchup'ed instance
+ # don't set destination db port and recover options
+ dst_pg.slow_start(replica = True)
+
+ # check: run verification query
+ src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(3*42)")
+ src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question")
+ self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy')
+
+ # Cleanup
+ src_pg.stop()
+ dst_pg.stop()
+ #self.assertEqual(1, 0, 'Stop test')
+ self.del_test_dir(module_name, self.fname)
diff --git a/travis/Dockerfile.in b/travis/Dockerfile.in
index 3e451e24f..e6bbedb61 100644
--- a/travis/Dockerfile.in
+++ b/travis/Dockerfile.in
@@ -10,6 +10,7 @@ RUN python3 -m pip install virtualenv
# Environment
ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH}
+ENV PTRACK_PATCH_PG_VERSION=${PTRACK_PATCH_PG_VERSION}
ENV PGPROBACKUP_GDB=${PGPROBACKUP_GDB}
ENV LANG=C.UTF-8 PGHOME=/pg/testdir/pgbin
diff --git a/travis/make_dockerfile.sh b/travis/make_dockerfile.sh
index 2e8ccd5a3..fc2742cdb 100755
--- a/travis/make_dockerfile.sh
+++ b/travis/make_dockerfile.sh
@@ -14,6 +14,10 @@ if [ -z ${MODE+x} ]; then
MODE=basic
fi
+if [ -z ${PTRACK_PATCH_PG_VERSION+x} ]; then
+ PTRACK_PATCH_PG_VERSION=off
+fi
+
if [ -z ${PGPROBACKUP_GDB+x} ]; then
PGPROBACKUP_GDB=ON
fi
@@ -21,11 +25,13 @@ fi
echo PG_VERSION=${PG_VERSION}
echo PG_BRANCH=${PG_BRANCH}
echo MODE=${MODE}
+echo PTRACK_PATCH_PG_VERSION=${PTRACK_PATCH_PG_VERSION}
echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB}
sed \
-e 's/${PG_VERSION}/'${PG_VERSION}/g \
-e 's/${PG_BRANCH}/'${PG_BRANCH}/g \
-e 's/${MODE}/'${MODE}/g \
+ -e 's/${PTRACK_PATCH_PG_VERSION}/'${PTRACK_PATCH_PG_VERSION}/g \
-e 's/${PGPROBACKUP_GDB}/'${PGPROBACKUP_GDB}/g \
Dockerfile.in > Dockerfile
diff --git a/travis/run_tests.sh b/travis/run_tests.sh
index 488d8ee45..4a64fed80 100755
--- a/travis/run_tests.sh
+++ b/travis/run_tests.sh
@@ -32,9 +32,21 @@ PG_SRC=$PWD/postgres
echo "############### Getting Postgres sources:"
git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1
+# Clone ptrack
+if [ "$PTRACK_PATCH_PG_VERSION" != "off" ]; then
+ git clone https://github.com/postgrespro/ptrack.git -b master --depth=1
+ export PG_PROBACKUP_PTRACK=on
+else
+ export PG_PROBACKUP_PTRACK=off
+fi
+
+
# Compile and install Postgres
echo "############### Compiling Postgres:"
cd postgres # Go to postgres dir
+if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then
+ git apply -3 ../ptrack/patches/REL_${PTRACK_PATCH_PG_VERSION}_STABLE-ptrack-core.diff
+fi
CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests
make -s -j$(nproc) install
#make -s -j$(nproc) -C 'src/common' install
@@ -47,6 +59,11 @@ export PATH=$PGHOME/bin:$PATH
export LD_LIBRARY_PATH=$PGHOME/lib
export PG_CONFIG=$(which pg_config)
+if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then
+ echo "############### Compiling Ptrack:"
+ make USE_PGXS=1 -C ../ptrack install
+fi
+
# Get amcheck if missing
if [ ! -d "contrib/amcheck" ]; then
echo "############### Getting missing amcheck:"