summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavan Deolasee2014-09-01 13:09:27 +0000
committerPavan Deolasee2014-09-01 13:09:27 +0000
commite17819396ac56006da5a5a0cff30c3215ed66554 (patch)
tree993ce457a4e5c93290cddcf405dc77d779a0a99b
parentcae66e77ac771fe3dc9aaaf6c2a003b8c1e3270f (diff)
Add pgxc_ctl contrib module. This was added later in PGXC tree, but we
cherry-picked and enhanced it to work with XL
-rw-r--r--contrib/Makefile1
-rw-r--r--contrib/pgxc_ctl/Makefile45
-rw-r--r--contrib/pgxc_ctl/bash_handler.c76
-rw-r--r--contrib/pgxc_ctl/bash_handler.h18
-rw-r--r--contrib/pgxc_ctl/config.c1129
-rw-r--r--contrib/pgxc_ctl/config.h46
-rw-r--r--contrib/pgxc_ctl/coord_cmd.c2185
-rw-r--r--contrib/pgxc_ctl/coord_cmd.h70
-rw-r--r--contrib/pgxc_ctl/coord_command.h1
-rw-r--r--contrib/pgxc_ctl/datanode_cmd.c1970
-rw-r--r--contrib/pgxc_ctl/datanode_cmd.h71
-rw-r--r--contrib/pgxc_ctl/do_command.c2493
-rw-r--r--contrib/pgxc_ctl/do_command.h16
-rw-r--r--contrib/pgxc_ctl/do_shell.c729
-rw-r--r--contrib/pgxc_ctl/do_shell.h106
-rw-r--r--contrib/pgxc_ctl/gtm_cmd.c1372
-rw-r--r--contrib/pgxc_ctl/gtm_cmd.h73
-rw-r--r--contrib/pgxc_ctl/gtm_util.c167
-rw-r--r--contrib/pgxc_ctl/gtm_util.h23
-rwxr-xr-xcontrib/pgxc_ctl/make_signature136
-rw-r--r--contrib/pgxc_ctl/mcxt.c77
-rw-r--r--contrib/pgxc_ctl/monitor.c469
-rw-r--r--contrib/pgxc_ctl/monitor.h18
-rwxr-xr-xcontrib/pgxc_ctl/pgxc_ctl.bash5361
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl.c605
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl.h58
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl_bash.c671
-rwxr-xr-xcontrib/pgxc_ctl/pgxc_ctl_bash.org885
-rwxr-xr-xcontrib/pgxc_ctl/pgxc_ctl_bash_2302
-rwxr-xr-xcontrib/pgxc_ctl/pgxc_ctl_conf_part318
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl_log.c333
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl_log.h63
-rw-r--r--contrib/pgxc_ctl/signature.h15
-rw-r--r--contrib/pgxc_ctl/utils.c381
-rw-r--r--contrib/pgxc_ctl/utils.h48
-rw-r--r--contrib/pgxc_ctl/variables.c453
-rw-r--r--contrib/pgxc_ctl/variables.h76
-rw-r--r--contrib/pgxc_ctl/varnames.h148
38 files changed, 21008 insertions, 0 deletions
diff --git a/contrib/Makefile b/contrib/Makefile
index 61f61e6ecd..57d4045559 100644
--- a/contrib/Makefile
+++ b/contrib/Makefile
@@ -44,6 +44,7 @@ SUBDIRS = \
pgrowlocks \
pgstattuple \
pgxc_clean \
+ pgxc_ctl \
seg \
spi \
tablefunc \
diff --git a/contrib/pgxc_ctl/Makefile b/contrib/pgxc_ctl/Makefile
new file mode 100644
index 0000000000..6d4f6006f3
--- /dev/null
+++ b/contrib/pgxc_ctl/Makefile
@@ -0,0 +1,45 @@
+#-------------------------------------------------------------------------
+#
+# Makefile for contrib/pgxc_ctl
+#
+# Portions Copyright (c) 2013 Postgres-XC Development Group
+#
+# $PostgreSQL$
+#
+#-------------------------------------------------------------------------
+
+PGFILEDESC = "pgxc_ctl - Provide XC configuration and opeation"
+PGAPPICON = win32
+
+PROGRAM= pgxc_ctl
+OBJS= bash_handler.o config.o pgxc_ctl.o pgxc_ctl_bash.o variables.o pgxc_ctl_log.o do_command.o \
+ utils.o do_shell.o gtm_cmd.o coord_cmd.o datanode_cmd.o gtm_util.o mcxt.o monitor.o
+
+
+#Include GTM objects
+gtm_builddir = $(top_builddir)/src/gtm
+EX_OBJS = $(gtm_builddir)/common/assert.o \
+ $(gtm_builddir)/client/libgtmclient.a \
+ $(gtm_builddir)/common/gtm_serialize.o
+
+PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir)
+PG_LIBS = $(libpq_pgport) $(PTHREAD_LIBS) $(EX_OBJS)
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/pgxc_clean
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
+
+pgxc_ctl_bash.c: pgxc_ctl_conf_part pgxc_ctl_bash_2
+ ./make_signature
+ touch pgxc_ctl_bash.c
+ touch pgxc_ctl_bash_conf_part
+
+clean_script:
+ rm -f pgxc_ctl_bash.c
diff --git a/contrib/pgxc_ctl/bash_handler.c b/contrib/pgxc_ctl/bash_handler.c
new file mode 100644
index 0000000000..0c3a3187e6
--- /dev/null
+++ b/contrib/pgxc_ctl/bash_handler.c
@@ -0,0 +1,76 @@
+/*-------------------------------------------------------------------------
+ *
+ * bash_handler.c
+ *
+ * Bash script handler module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include "bash_handler.h"
+#include "config.h"
+#include "pgxc_ctl.h"
+#include "pgxc_ctl_log.h"
+
+extern char *pgxc_ctl_bash_script[];
+extern char *pgxc_ctl_conf_prototype[];
+
+/*
+ * Install bash script.
+ */
+void install_pgxc_ctl_bash(char *path)
+{
+ char cmd[1024];
+ FILE *pgxc_ctl_bash = fopen(path, "w");
+ int i;
+
+ elog(NOTICE, "Installing pgxc_ctl_bash script as %s.\n", path);
+ if (!pgxc_ctl_bash)
+ {
+ elog(ERROR, "ERROR: Could not open pgxc_ctl bash script, %s, %s\n", path, strerror(errno));
+ }
+ for (i=0; pgxc_ctl_conf_prototype[i]; i++)
+ fprintf(pgxc_ctl_bash, "%s\n", pgxc_ctl_conf_prototype[i]);
+ for (i=0; pgxc_ctl_bash_script[i]; i++)
+ fprintf(pgxc_ctl_bash, "%s\n", pgxc_ctl_bash_script[i]);
+ fclose(pgxc_ctl_bash);
+ sprintf(cmd, "chmod +x %s", path);
+ system(cmd);
+}
+
+/*
+ * Uninstall bash script.
+ */
+void uninstall_pgxc_ctl_bash(char *path)
+{
+ if (path)
+ unlink(path);
+}
+
+/*
+ * Run the bash script and read its output, which consists of variables needed to configure
+ * postgres-xc cluster in pgxc_ctl.
+ *
+ * Be careful that pgxc_ctl changes its working directory to pgxc home directory,
+ * typically $HOME/pgxc_ctl, which can be changed with pgxc_ctl options.
+ * See pgxc_ctl.c or pgxc_ctl document for details.
+ */
+void read_config_file(char *path, char *conf)
+{
+ FILE *vars;
+ char cmd[1024];
+
+ if (conf)
+ sprintf(cmd, "bash %s/pgxc_ctl_bash --configure %s print_values", path, conf);
+ else
+ sprintf(cmd, "bash %s/pgxc_ctl_bash print_values", path);
+ vars = popen(cmd, "r");
+ read_vars(vars);
+ fclose(vars);
+}
diff --git a/contrib/pgxc_ctl/bash_handler.h b/contrib/pgxc_ctl/bash_handler.h
new file mode 100644
index 0000000000..c16638b7a3
--- /dev/null
+++ b/contrib/pgxc_ctl/bash_handler.h
@@ -0,0 +1,18 @@
+/*-------------------------------------------------------------------------
+ *
+ * bash_handler.h
+ *
+ * Bash script handling module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef BASH_HANDLER_H
+#define BASH_HANDLER_H
+
+void install_pgxc_ctl_bash(char *path);
+void read_config_file(char *path, char *conf);
+void uninstall_pgxc_ctl_bash(char *path);
+
+#endif /* BASH_HANDLER_H */
diff --git a/contrib/pgxc_ctl/config.c b/contrib/pgxc_ctl/config.c
new file mode 100644
index 0000000000..2d09826dc1
--- /dev/null
+++ b/contrib/pgxc_ctl/config.c
@@ -0,0 +1,1129 @@
+/*-------------------------------------------------------------------------
+ *
+ * config.c
+ *
+ * Configuration module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This module handles pgxc_ctl configuration. This file includes
+ * parser of incoming variable inforation which is installed to
+ * pgxc_ctl variable system.
+ *
+ * This module also checks if there's any conficts in the resources
+ * among different nodes.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+#include "variables.h"
+#include "varnames.h"
+#include "config.h"
+#include "pgxc_ctl_log.h"
+#include "pgxc_ctl.h"
+#include "utils.h"
+#include "do_shell.h"
+
+extern char *pgxc_ctl_conf_prototype[];
+
+static void addServer(char **name);
+static void verifyResource(void);
+
+enum Quote { UNQUOTED, SINGLEQUOTED, DOUBLEQUOTED };
+
+/*====================================================================
+ *
+ * Configuration parser
+ *
+ * The following functions read, parse and constract variables.
+ *
+ * As you see pgxc_ctl_bash_script[] in pgxc_ctl_bash.c, each variable
+ * will be read in the form of
+ * varname value value ....
+ *
+ * Each variable is basically an array. Sometimes, only the first
+ * element is needed.
+ *
+ * Please note that pgxc_ctl_bash.c is build by make_signature script
+ * using pgxc_ctl_bash_2 and pgxc_cont_conf_part dynamically.
+ *
+ * You can change detailed behaviors of the script by editing these
+ * files. Be careful! Please maintain the interface to this module.
+ *
+ *===================================================================
+ */
+/*
+ * Get a token from the line
+ */
+char *get_word(char *line, char **token)
+{
+ enum Quote quoted = UNQUOTED;
+
+ *token = NULL;
+
+ if (!line)
+ return NULL;
+ for(;*line == ' ' || *line == '\t'; line++);
+ if (!*line)
+ {
+ *token = NULL;
+ return NULL;
+ }
+ if (*line == '"')
+ {
+ quoted = DOUBLEQUOTED;
+ line++;
+ }
+ else if (*line == '\'')
+ {
+ quoted = SINGLEQUOTED;
+ line++;
+ }
+ else if (*line == '#')
+ {
+ *line = 0;
+ *token = NULL;
+ return NULL;
+ }
+ *token = line;
+ if (quoted == DOUBLEQUOTED)
+ {
+ for (; *line && *line != '\n' && *line != '"'; line++);
+ if (*line == '"')
+ {
+ *line = 0;
+ line++;
+ }
+ }
+ else if (quoted == SINGLEQUOTED)
+ {
+ for (; *line && *line != '\n' && *line != '\''; line++);
+ if (*line == '\'')
+ {
+ *line = 0;
+ line++;
+ }
+ }
+ else
+ for (; *line && *line != ' ' && *line != '\t' && *line != '\n' && *line != '#'; line++);
+ if (*line == '#')
+ {
+ *line = 0;
+ return(line);
+ }
+ else if (*line)
+ {
+ *line = 0;
+ return(line+1);
+ }
+ else
+ return(line);
+}
+
+/*
+ * Parse line and create/update a variable.
+ */
+static void parse_line(char *line)
+{
+ char *varname;
+ char *val;
+ pgxc_ctl_var *newv;
+
+ line = get_word(line, &varname);
+ if (!varname)
+ return;
+ if (!(newv = confirm_var(varname)))
+ return;
+ reset_value(newv);
+ while((line = get_word(line, &val)))
+ {
+ if (val)
+ {
+ add_val(newv, val);
+ }
+ }
+}
+
+/*
+ * Parse line and filter only pre-defined variables.
+ *
+ * This blocks any unknow variables to be build within pgxc_ctl structure.
+ */
+static void parse_line_select(char *line, char *selectThis[])
+{
+ char *varname;
+ char *val;
+ pgxc_ctl_var *newv;
+ int ii;
+
+ line = get_word(line, &varname);
+ if (!varname || varname[0] == '#')
+ return;
+ for (ii = 0; selectThis[ii]; ii++)
+ {
+ if (strcmp(varname, selectThis[ii]) == 0)
+ {
+ if (!(newv = confirm_var(varname)))
+ return;
+ while((line = get_word(line, &val)))
+ {
+ if (val)
+ add_val(newv, val);
+ }
+ }
+ }
+}
+
+/*
+ * Configuration file I/F
+ */
+void read_vars(FILE *conf)
+{
+ char line[MAXLINE+1];
+
+ while (fgets(line, MAXLINE, conf))
+ parse_line(line);
+}
+
+/*
+ * Configuration file I/F
+ */
+void read_selected_vars(FILE *conf, char *selectThis[])
+{
+ char line[MAXLINE+1];
+
+ while (fgets(line, MAXLINE, conf))
+ parse_line_select(line, selectThis);
+}
+
+/*
+ * Build the configuraiton file prototype.
+ */
+void install_conf_prototype(char *path)
+{
+ char cmd[MAXPATH+1];
+ FILE *pgxc_config_proto = fopen(path, "w");
+ int i;
+
+ if (!pgxc_config_proto)
+ {
+ elog(ERROR, "ERROR Could not open configuration prototype to %s. %s\n", path, strerror(errno));
+ return;
+ }
+ for (i = 0; pgxc_ctl_conf_prototype[i]; i++)
+ fprintf(pgxc_config_proto, "%s\n", pgxc_ctl_conf_prototype[i]);
+ fclose(pgxc_config_proto);
+ snprintf(cmd, MAXPATH, "chmod +x %s", path);
+ system(cmd);
+}
+
+/*
+ * Get all the servers --> VAR_allServers
+ */
+static void addServer(char **name)
+{
+ int ii, jj;
+ int flag;
+
+ confirm_var(VAR_allServers);
+
+ for (ii = 0; name[ii]; ii++)
+ {
+ flag = TRUE;
+ for (jj = 0; aval(VAR_allServers)[jj]; jj++)
+ {
+ if (strcmp(name[ii], aval(VAR_allServers)[jj]) != 0)
+ continue;
+ else
+ {
+ flag = FALSE;
+ break;
+ }
+ }
+ if (flag)
+ add_val(find_var(VAR_allServers), name[ii]);
+ }
+}
+
+/*
+ * Test each node and build target server list
+ */
+void makeServerList(void)
+{
+ /* Initialize */
+ reset_var(VAR_allServers);
+ /* GTM Master */
+ addServer(aval(VAR_gtmMasterServer));
+ /* GTM Slave */
+ if (isVarYes(VAR_gtmSlave))
+ addServer(aval(VAR_gtmSlaveServer));
+ /* GTM_Proxy */
+ if (isVarYes(VAR_gtmProxy))
+ addServer(aval(VAR_gtmProxyServers));
+ /* Coordinator Master */
+ if (find_var(VAR_coordMasterServers))
+ addServer(aval(VAR_coordMasterServers));
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ addServer(aval(VAR_coordSlaveServers));
+ /* Datanode Master */
+ addServer(aval(VAR_datanodeMasterServers));
+ /* Datanode Slave */
+ if (isVarYes(VAR_datanodeSlave))
+ addServer(aval(VAR_datanodeSlaveServers));
+ /* Should add secondary slaves */
+}
+
+
+/*
+ * Take care of "no" slaves and build server list. At present, we don't support
+ * Cascaded or multi slaves. This will be supported in the future.
+ *
+ * Please note that log archive backup site is not counted as this server list
+ * because such servers are not likely to provide XC node operation.
+ *
+ * Log archive backup will be implemented in the future.
+ */
+int is_none(char *s)
+{
+ if (s == NULL)
+ return TRUE;
+ if (strcmp(s, "none") == 0)
+ return TRUE;
+ if (strcmp(s, "N/A") == 0)
+ return TRUE;
+ return FALSE;
+}
+
+/*
+ * Remove gtm slave. Used at failover.
+ */
+static void emptyGtmSlave()
+{
+ reset_var_val(VAR_gtmSlaveServer, "none");
+ reset_var_val(VAR_gtmSlavePort, "0");
+ reset_var_val(VAR_gtmSlaveDir, "none");
+}
+
+/*
+ * Remove gtm proxies. Used when a node crashes.
+ * Because gtm_proxy is expected to be running at any target
+ * server, we don't have gtm_proxy slaves.
+ * We can just initialize gtm_proy using gtminit, configure and
+ * run it.
+ */
+static void emptyGtmProxies()
+{
+ int ii;
+
+ reset_var_val(VAR_gtmProxy, "n");
+ reset_var(VAR_gtmProxyServers);
+ reset_var(VAR_gtmProxyNames);
+ reset_var(VAR_gtmProxyPorts);
+ reset_var(VAR_gtmProxyDirs);
+ reset_var_val(VAR_gtmPxyExtraConfig, "none");
+ reset_var(VAR_gtmPxySpecificExtraConfig);
+ for (ii = 0; ii < arraySizeName(VAR_allServers); ii++)
+ {
+ add_val(find_var(VAR_gtmProxyServers), "none");
+ add_val(find_var(VAR_gtmProxyNames), "none");
+ add_val(find_var(VAR_gtmProxyPorts), "-1");
+ add_val(find_var(VAR_gtmProxyDirs), "none");
+ add_val(find_var(VAR_gtmPxyExtraConfig), "none");
+ }
+}
+
+/*
+ * Removes coordinator slaves from pgxc_ctl configuration.
+ * This is needed when a slave promotes and becomes a new
+ * master.
+ */
+static void emptyCoordSlaves()
+{
+ int ii;
+
+ reset_var_val(VAR_coordSlave, "n");
+ reset_var(VAR_coordSlaveServers);
+ reset_var(VAR_coordSlaveDirs);
+ reset_var(VAR_coordArchLogDirs);
+ for (ii = 0; ii < arraySizeName(VAR_coordNames); ii++)
+ {
+ add_val(find_var(VAR_coordSlaveServers), "none");
+ add_val(find_var(VAR_coordSlaveDirs), "none");
+ add_val(find_var(VAR_coordArchLogDirs), "none");
+ }
+}
+
+/*
+ * Removes datanode slave from pgxc_ctl configuration.
+ */
+static void emptyDatanodeSlaves()
+{
+ int ii;
+
+ reset_var_val(VAR_datanodeSlave, "n");
+ reset_var(VAR_datanodeSlaveServers);
+ reset_var(VAR_datanodeSlaveDirs);
+ reset_var(VAR_datanodeArchLogDirs);
+ for (ii = 0; ii < arraySizeName(VAR_datanodeSlaveServers); ii++)
+ {
+ add_val(find_var(VAR_datanodeSlaveServers), "none");
+ add_val(find_var(VAR_coordSlaveDirs), "none");
+ add_val(find_var(VAR_coordArchLogDirs), "none");
+ }
+}
+
+/*
+ * Scans initial configuration and set up "not configured" things.
+ *
+ * If, for example, gtm proxy is not configured,
+ * we set gtmProxy variable to "n".
+ *
+ * When gtmProxy varieble is already set to "n", remove gtm_proxy
+ * configuration information.
+ *
+ * Similar handling will be done for gtm slave, coordinator slaves
+ * and datanode slaves.
+ */
+void handle_no_slaves()
+{
+ int is_empty;
+ int ii;
+
+ /* GTM Slave */
+ if (!find_var(VAR_gtmSlave))
+ reset_var_val(VAR_gtmSlave, "n");
+ if (!isVarYes(VAR_gtmSlave))
+ emptyGtmSlave();
+ else
+ {
+ confirm_var(VAR_gtmSlaveServer);
+ if (!sval(VAR_gtmSlaveServer) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ emptyGtmSlave();
+ reset_var_val(VAR_gtmSlaveServer, "n");
+ }
+ }
+
+ /* GTM Proxy */
+ if (!find_var(VAR_gtmProxy))
+ reset_var_val(VAR_gtmProxy, "n");
+ if (!isVarYes(VAR_gtmProxy))
+ emptyGtmProxies();
+ else
+ {
+ is_empty = TRUE;
+ for (ii = 0; aval(VAR_gtmProxyServers)[ii]; ii++)
+ {
+ if (is_none(aval(VAR_gtmProxyServers)[ii]))
+ continue;
+ else
+ {
+ is_empty = FALSE;
+ break;
+ }
+ }
+ if (is_empty)
+ {
+ reset_var_val(VAR_gtmProxy, "n");
+ emptyGtmProxies();
+ }
+ }
+ /* Coordinator Slaves */
+ if (!find_var(VAR_coordSlave))
+ reset_var_val(VAR_coordSlave, "n");
+ if (!isVarYes(VAR_coordSlave))
+ emptyCoordSlaves();
+ else
+ {
+ is_empty = TRUE;
+ if (find_var(VAR_coordSlaveServers))
+ {
+ for (ii = 0; aval(VAR_coordSlaveServers)[ii]; ii++)
+ {
+ if (is_none(aval(VAR_coordSlaveServers)[ii]))
+ continue;
+ else
+ {
+ is_empty = FALSE;
+ break;
+ }
+ }
+ if (is_empty)
+ {
+ reset_var_val(VAR_coordSlave, "n");
+ emptyCoordSlaves();
+ }
+ }
+ else
+ {
+ elog(WARNING, "WARNING: coordSlaveServers variable not found where coordSlave is set to \"y\"\n");
+ reset_var_val(VAR_coordSlave, "n");
+ emptyCoordSlaves();
+ }
+ }
+ /* Datanode Slaves */
+ if (!find_var(VAR_datanodeSlave))
+ reset_var_val(VAR_datanodeSlave, "n");
+ if (!isVarYes(VAR_datanodeSlave))
+ emptyDatanodeSlaves();
+ else
+ {
+ is_empty = TRUE;
+ if (find_var(VAR_datanodeSlaveServers))
+ {
+ for (ii = 0; aval(VAR_datanodeSlaveServers)[ii]; ii++)
+ {
+ if (is_none(aval(VAR_datanodeSlaveServers)[ii]))
+ continue;
+ else
+ {
+ is_empty = FALSE;
+ break;
+ }
+ }
+ if (is_empty)
+ {
+ reset_var_val(VAR_datanodeSlave, "n");
+ emptyDatanodeSlaves();
+ }
+ }
+ else
+ {
+ elog(WARNING, "WARNING: datanodeSlaveServers variable not found where datanodeSlave is set to \"y\"\n");
+ reset_var_val(VAR_datanodeSlave, "n");
+ emptyDatanodeSlaves();
+ }
+ }
+}
+
+/*
+ * Check if there's no overlap in the resource --> Port, host and directory.
+ */
+static void reportMissingVar(char *name)
+{
+ elog(ERROR, "ERROR: %s is not configured.\n", name);
+}
+
+static int anyConfigErrors = FALSE;
+
+static void checkIfVarIsConfigured(char *name)
+{
+ if (!find_var(name) || !sval(name))
+ {
+ anyConfigErrors = TRUE;
+ reportMissingVar(name);
+ }
+}
+
+static void checkIfConfigured(char *names[])
+{
+ int ii;
+ for(ii = 0; names[ii]; ii++)
+ checkIfVarIsConfigured(names[ii]);
+}
+
+static void checkConfiguredAndSize(char *names[], char *msg)
+{
+ int ii;
+ int sz0;
+
+ for (ii = 0; names[ii]; ii++)
+ {
+ checkIfVarIsConfigured(names[ii]);
+ confirm_var(names[ii]);
+ }
+ sz0 = arraySizeName(names[0]);
+ for (ii = 1; names[ii]; ii++)
+ {
+ if (arraySizeName(names[ii]) != sz0)
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Number of elements in %s definitions are different %s and %s. Check your configuration\n", msg, names[0], names[ii]);
+ }
+ }
+}
+
+int checkSpecificResourceConflict(char *name, char *host, int port, char *dir, int is_gtm)
+{
+ if (checkNameConflict(name, is_gtm))
+ return 1;
+ if (checkPortConflict(host, port))
+ return 1;
+ if (checkDirConflict(host, dir))
+ return 1;
+ return 0;
+}
+/*
+ * Note that 1 will be returned when a conflict is found
+ */
+int checkNameConflict(char *name, int is_gtm)
+{
+ int ii;
+
+ /*
+ * GTM Master
+ */
+ if (!is_gtm && strcasecmp(name, sval(VAR_gtmName)) == 0)
+ return 1;
+ /*
+ * GTM Proxy
+ */
+ if (isVarYes(VAR_gtmProxy))
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ if (strcasecmp(name, aval(VAR_gtmProxyNames)[ii]) == 0)
+ return 1;
+ /*
+ * Coordinator
+ */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if (strcasecmp(name, aval(VAR_coordNames)[ii]) == 0)
+ return 1;
+ /*
+ * Datanode
+ */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if (strcasecmp(name, aval(VAR_datanodeNames)[ii]) == 0)
+ return 1;
+ return 0;
+}
+
+/*
+ * Note that 1 will be returned when a conflict is found.
+ */
+int checkPortConflict(char *host, int port)
+{
+ int ii;
+
+ /* GTM Master */
+ if ((strcasecmp(host, sval(VAR_gtmMasterServer)) == 0) && (atoi(sval(VAR_gtmMasterPort)) == port))
+ return 1;
+ /* GTM Slave */
+ if (isVarYes(VAR_gtmSlave) && (strcasecmp(host, sval(VAR_gtmSlaveServer)) == 0) && (atoi(sval(VAR_gtmSlavePort)) == port))
+ return 1;
+ /* GTM Proxy */
+ if (isVarYes(VAR_gtmProxy))
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_gtmProxyServers)[ii]) == 0) && (atoi(aval(VAR_gtmProxyPorts)[ii]) == port))
+ return 1;
+ /* Coordinator Master */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_coordMasterServers)[ii]) == 0) &&
+ ((atoi(aval(VAR_coordPorts)[ii]) == port) || (atoi(aval(VAR_poolerPorts)[ii])) == port))
+ return 1;
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if (doesExist(VAR_coordSlaveServers, ii) && !is_none(aval(VAR_coordSlaveServers)[ii]) &&
+ (strcasecmp(host, aval(VAR_coordSlaveServers)[ii]) == 0) && (atoi(aval(VAR_coordPorts)[ii]) == port))
+ return 1;
+ /* Datanode Master */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_datanodeMasterServers)[ii]) == 0) && (atoi(aval(VAR_datanodePorts)[ii]) == port))
+ return 1;
+ /* Datanode Slave */
+ if (isVarYes(VAR_datanodeSlave))
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if (doesExist(VAR_datanodeSlaveServers, ii) && !is_none(aval(VAR_datanodeSlaveServers)[ii]) &&
+ (strcasecmp(host, aval(VAR_datanodeSlaveServers)[ii]) == 0) && (atoi(aval(VAR_datanodePorts)[ii]) == port))
+ return 1;
+ return 0;
+}
+
+int checkDirConflict(char *host, char *dir)
+{
+ int ii;
+
+ /* GTM Master */
+ if ((strcasecmp(host, sval(VAR_gtmMasterServer)) == 0) && (strcmp(dir, sval(VAR_gtmMasterDir)) == 0))
+ return 1;
+ /* GTM Slave */
+ if (isVarYes(VAR_gtmSlave) && (strcasecmp(host, sval(VAR_gtmSlaveServer)) == 0) && (strcmp(dir, sval(VAR_gtmSlaveDir)) == 0))
+ return 1;
+ /* GTM Proxy */
+ if (isVarYes(VAR_gtmProxy))
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_gtmProxyServers)[ii]) == 0) && (strcmp(dir, aval(VAR_gtmProxyDirs)[ii]) == 0))
+ return 1;
+ /* Coordinator Master */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_coordMasterServers)[ii]) == 0) && (strcmp(dir, aval(VAR_coordMasterDirs)[ii]) == 0))
+ return 1;
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_coordSlaveServers)[ii]) == 0) && (strcmp(dir, aval(VAR_coordSlaveDirs)[ii]) == 0))
+ return 1;
+ /* Datanode Master */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_datanodeMasterServers)[ii]) == 0) && (strcmp(dir, aval(VAR_datanodeMasterDirs)[ii]) == 0))
+ return 1;
+ /* Datanode Slave */
+ if (isVarYes(VAR_datanodeSlave))
+ if (doesExist(VAR_datanodeSlaveServers, ii) && doesExist(VAR_datanodeSlaveDirs, ii) &&
+ (strcasecmp(host, aval(VAR_datanodeSlaveServers)[ii]) == 0) && (strcmp(dir, aval(VAR_datanodeSlaveDirs)[ii]) == 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * Check if there's any conflict among src and dest, checks duplicate in names, servers, ports and directories.
+ *
+ * The rules are:
+ *
+ * 1) Each node (gtm, gtm_proxy, coordinator, datanode) must have unique name.
+ *
+ * 2) A port, in a given host, must be owned (listed to) only by single node.
+ *
+ * 3) A directory, in a given host, must be owned (used) only by single node.
+ */
+static void checkResourceConflict(char *srcNames, char *srcServers, char *srcPorts, char *srcPoolers, char *srcDirs,
+ char *destNames, char *destServers, char *destPorts, char *destPoolers, char *destDirs,
+ int destOnly, int checkName)
+{
+ int ii, jj;
+
+ if (!srcNames || !find_var(srcNames))
+ {
+ /* No source specified */
+ return;
+ }
+ if (!destOnly)
+ {
+ /* Check conflict among the source first */
+ for (ii = 0; aval(srcNames)[ii]; ii++)
+ {
+ if (is_none(aval(srcNames)[ii]))
+ continue;
+ /* Pooler and the port in the same name */
+ if (srcPoolers && (atoi(aval(srcPorts)[ii]) == atoi(aval(srcPoolers)[ii])))
+ {
+ if (atoi(aval(srcPorts)[ii]) > 0)
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in between port and pooler within %s variable.\n", srcNames);
+ }
+ }
+ if (checkName && srcNames && !doesExist(srcNames, ii))
+ assign_arrayEl(srcNames, ii, "none", NULL);
+ if (srcServers && !doesExist(srcServers, ii))
+ assign_arrayEl(srcServers, ii, "none", NULL);
+ if (srcPoolers && !doesExist(srcPoolers, ii))
+ assign_arrayEl(srcPoolers, ii, "-1", "-1");
+ if (srcPorts && !doesExist(srcPorts, ii))
+ assign_arrayEl(srcPorts, ii, "-1", "-1");
+ if (srcDirs && !doesExist(srcDirs, ii))
+ assign_arrayEl(srcDirs, ii, "none", NULL);
+ for (jj = ii+1; aval(srcNames)[jj]; jj++)
+ {
+ /* Name conflict */
+ if (checkName && srcNames && !doesExist(srcNames, jj))
+ assign_arrayEl(srcNames, jj, "none", NULL);
+ if (checkName && srcNames && (strcmp(aval(srcNames)[ii], aval(srcNames)[jj]) == 0))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in resource name within %s variable.\n", srcNames);
+ }
+ if (srcServers && is_none(aval(srcServers)[ii]))
+ continue;
+ if (srcServers && !doesExist(srcServers, jj))
+ assign_arrayEl(srcServers, jj, "none", NULL);
+ if (srcServers && strcmp(aval(srcServers)[ii], aval(srcServers)[jj]) == 0)
+ {
+ /* Ports and Poolers */
+ if (srcPorts && !doesExist(srcPorts, jj))
+ assign_arrayEl(srcPorts, jj, "-1", "-1");
+ if (srcPoolers && !doesExist(srcPoolers, jj))
+ assign_arrayEl(srcPoolers, jj, "-1", "-1");
+ if((srcPorts && (atoi(aval(srcPorts)[ii]) > 0) && (atoi(aval(srcPorts)[ii]) == atoi(aval(srcPorts)[jj]))) ||
+ (srcPorts && srcPoolers && (atoi(aval(srcPorts)[ii]) > 0) && (atoi(aval(srcPorts)[ii]) == atoi(aval(srcPoolers)[jj]))) ||
+ (srcPoolers && (atoi(aval(srcPoolers)[ii]) > 0) && (atoi(aval(srcPoolers)[ii]) == atoi(aval(srcPoolers)[jj]))))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in port and pooler numbers within %s variable.\n", srcNames);
+ }
+ /* Directories */
+ if (srcDirs && !doesExist(srcDirs, jj))
+ assign_arrayEl(srcDirs, jj, "none", NULL);
+ if (srcDirs && strcmp(aval(srcDirs)[ii], aval(srcDirs)[jj]) == 0)
+ {
+ if (!is_none(aval(srcDirs)[ii]))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in directories within %s variable.\n", srcNames);
+ }
+ }
+ }
+ }
+ }
+ }
+ /* Check between src and destination */
+ if (destNames)
+ {
+ for (ii = 0; aval(srcNames)[ii]; ii++)
+ {
+ if (is_none(aval(srcNames)[ii]))
+ continue;
+ for (jj = 0; aval(destNames)[jj]; jj++)
+ {
+ /* Resource names */
+ if (checkName && (strcmp(aval(srcNames)[ii], aval(destNames)[jj]) == 0))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in names between %s and %s variable.\n", srcNames, destNames);
+ }
+ if (destServers && !doesExist(destServers, jj))
+ assign_arrayEl(destServers, jj, "none", NULL);
+ if (srcServers && destServers && (strcmp(aval(srcServers)[ii], aval(destServers)[jj]) == 0) && !is_none(aval(srcServers)[ii]))
+ {
+ /* Ports and poolers */
+ if (destPorts && !doesExist(destPorts, jj))
+ assign_arrayEl(destPorts, jj, "-1", "-1");
+ if (destPoolers && !doesExist(destPoolers, jj))
+ assign_arrayEl(destPoolers, jj, "-1", "-1");
+ if ((srcPorts && destPorts && (atoi(aval(srcPorts)[ii]) == atoi(aval(destPorts)[jj])) && (atoi(aval(srcPorts)[ii]) > 0)) ||
+ (destPoolers && srcPorts && (destPoolers && (atoi(aval(srcPorts)[ii]) == atoi(aval(destPoolers)[jj]))) && (atoi(aval(srcPorts)[ii]) > 0)) ||
+ (srcPoolers && destPorts && (atoi(aval(srcPoolers)[ii]) == atoi(aval(destPorts)[jj])) && (atoi(aval(srcPoolers)[ii]) > 0)) ||
+ (srcPoolers && destPoolers && (atoi(aval(srcPoolers)[ii]) == atoi(aval(destPoolers)[jj])) && (atoi(aval(srcPoolers)[ii]) > 0)))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in port/pooler in %s and %s variable.\n", srcNames, destNames);
+ }
+ /* Dir Names */
+ if (srcDirs && destDirs && !is_none(aval(srcDirs)[ii]) && (strcmp(aval(srcDirs)[ii], aval(destDirs)[jj]) == 0))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in directory names in %s and %s variable.\n", srcNames, destNames);
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Check if each node resource is configured properly
+ * Again, finding an error will not make the program stop.
+ */
+static void verifyResource(void)
+{
+ char *GtmVars[] = {VAR_gtmName,
+ VAR_gtmMasterServer,
+ VAR_gtmMasterPort,
+ VAR_gtmMasterDir,
+ NULL};
+ char *GtmSlaveVars[] = {VAR_gtmSlaveServer,
+ VAR_gtmSlavePort,
+ VAR_gtmSlaveDir,
+ NULL};
+ char *gtmProxyVars[] = {VAR_gtmProxyNames,
+ VAR_gtmProxyServers,
+ VAR_gtmProxyPorts,
+ VAR_gtmProxyDirs,
+ NULL};
+ char *coordMasterVars[] = {VAR_coordNames,
+ VAR_coordPorts,
+ VAR_poolerPorts,
+ VAR_coordMasterServers,
+ VAR_coordMasterDirs,
+ VAR_coordMaxWALSenders,
+ NULL};
+ char *coordSlaveVars[] = {VAR_coordNames,
+ VAR_coordSlaveServers,
+ VAR_coordSlaveDirs,
+ VAR_coordArchLogDirs,
+ NULL};
+#if 0
+ /*
+ * Please note that at present, pgxc_ctl supports only synchronous replication
+ * between {coordinator|datanode} master and server.
+ *
+ * Start/stop operation of the master and failover operation is affected by this
+ * settings. Will be improved soon.
+ */
+ char *coordSlaveSVars[] = {VAR_coordSlaveSync, NULL}; /* For extension */
+#endif
+ char *datanodeMasterVars[] = {VAR_datanodeNames,
+ VAR_datanodePorts,
+#ifdef XCP
+ VAR_datanodePoolerPorts,
+#endif
+ VAR_datanodeMasterServers,
+ VAR_datanodeMasterDirs,
+ VAR_datanodeMaxWALSenders,
+ NULL};
+ char *datanodeSlaveVars[] = {VAR_datanodeNames,
+ VAR_datanodeSlaveServers,
+ VAR_datanodeSlaveDirs,
+ VAR_datanodeArchLogDirs,
+ NULL};
+#if 0
+ char *datanodeSlaveSVars[] = {VAR_datanodeSlaveSync, NULL}; /* For extension, see above */
+#endif
+
+ /*
+ * -------------- Fundamental check -------------------
+ */
+ anyConfigErrors = FALSE;
+ /* GTM */
+ checkIfConfigured(GtmVars);
+ /* GTM slave */
+ if (isVarYes(VAR_gtmSlave))
+ checkIfConfigured(GtmSlaveVars);
+ /* GTM proxy */
+ if (isVarYes(VAR_gtmProxy))
+ checkConfiguredAndSize(gtmProxyVars, "GTM Proxy");
+ /* Coordinator Master */
+ checkIfConfigured(coordMasterVars);
+ checkConfiguredAndSize(coordMasterVars, "coordinator master");
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ {
+#if 0
+ checkIfConfigured(coordSlaveSVars); /* For extension, see above */
+
+#endif
+ checkConfiguredAndSize(coordSlaveVars, "coordinator slave");
+ }
+ /* Datanode Master */
+ checkConfiguredAndSize(datanodeMasterVars, "datanode master");
+ /* Datanode Slave */
+ if (sval(VAR_datanodeSlave) && strcmp(sval(VAR_datanodeSlave), "y") == 0)
+ {
+#if 0
+ checkIfConfigured(datanodeSlaveSVars); /* For extension, see above */
+#endif
+ checkConfiguredAndSize(datanodeSlaveVars, "datanode slave");
+ }
+ if (anyConfigErrors)
+ elog(ERROR, "ERROR: Found fundamental configuration error.\n");
+ /*
+ * --------------- Resource Conflict Check ---------------------
+ */
+ /*
+ * GTM Master and others ----------------
+ */
+ anyConfigErrors = FALSE;
+ /* GTM and GTM slave */
+ if (isVarYes(VAR_gtmSlave))
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_gtmName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir, TRUE, FALSE);
+ /* GTM and GTM Proxy, if any */
+ if (isVarYes(VAR_gtmProxy))
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs, TRUE, TRUE);
+ /* GTM and coordinator masters */
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ TRUE, TRUE);
+ /* GTM and coordinator slaves, if any */
+ if (isVarYes(VAR_coordSlave))
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_coordNames, VAR_coordSlaveServers, VAR_coordPorts, NULL, VAR_coordSlaveDirs, TRUE, TRUE);
+ /* GTM and datanode masters */
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs, TRUE, TRUE);
+ /* GTM and datanode slaves, if any */
+ if(isVarYes(VAR_datanodeSlave))
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ /*
+ * GTM slave and others ------------
+ */
+ if (isVarYes(VAR_gtmSlave))
+ {
+ /* GTM slave and GTM Proxy, if any */
+ if (isVarYes(VAR_gtmProxy))
+ checkResourceConflict(VAR_gtmName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ TRUE, TRUE);
+ /* GTM slave and coordinator masters */
+ checkResourceConflict(VAR_gtmName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ TRUE, TRUE);
+ /* GTM slave and coordinator slaves, if any */
+ if (isVarYes(VAR_coordSlave))
+ checkResourceConflict(VAR_gtmName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_coordNames, VAR_coordSlaveServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordSlaveDirs,
+ TRUE, TRUE);
+ /* GTM slave and datanode masters */
+ checkResourceConflict(VAR_gtmName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ TRUE, TRUE);
+ /* GTM slave and datanode slave, if any */
+ if (isVarYes(VAR_datanodeSlave))
+ checkResourceConflict(VAR_gtmName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_datanodeNames, VAR_datanodeSlaveServers, VAR_datanodePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ }
+ /*
+ * GTM proxy and others ---------
+ */
+ if (isVarYes(VAR_gtmProxy))
+ {
+ /* GTM proxy and coordinator masters */
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ FALSE, TRUE);
+ /* GTM proxy and coordinator slaves, if any */
+ if (sval(VAR_coordSlave) && (strcmp(sval(VAR_coordSlave), "y") == 0))
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_coordNames, VAR_coordSlaveServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordSlaveDirs,
+ TRUE, TRUE);
+ /* GTM proxy and datanode masters */
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ TRUE, TRUE);
+ /* GTM proxy and datanode slave, if any */
+ if (sval(VAR_datanodeSlave) && (strcmp(sval(VAR_datanodeSlave), "y") == 0))
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers, VAR_datanodePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ }
+ /*
+ * Coordinator Masters and others
+ */
+ /* Coordinator master and coordinator slaves, if any */
+ if (isVarYes(VAR_coordSlave))
+ checkResourceConflict(VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ VAR_coordNames, VAR_coordSlaveServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordSlaveDirs,
+ TRUE, FALSE);
+ /* Coordinator masters and datanode masters */
+ checkResourceConflict(VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ FALSE, TRUE);
+ /* Coordinator masters and datanode slave, if any */
+ if (isVarYes(VAR_datanodeSlave))
+ checkResourceConflict(VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers, VAR_datanodePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ /*
+ * Coordinator slaves and others
+ */
+ if (isVarYes(VAR_coordSlave))
+ {
+ /* Coordinator slave and datanode masters */
+ checkResourceConflict(VAR_coordNames, VAR_coordSlaveServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordSlaveDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers, VAR_datanodePorts, NULL, VAR_datanodeSlaveDirs,
+ FALSE, TRUE);
+ /* Coordinator slave and datanode slave, if any */
+ if (isVarYes(VAR_datanodeSlave))
+ checkResourceConflict(VAR_coordNames, VAR_coordSlaveServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordSlaveDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers, VAR_datanodePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ }
+ /*
+ * Datanode masters and others ---
+ */
+ /* Datanode master self */
+ checkResourceConflict(VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ NULL, NULL, NULL, NULL, NULL,
+ FALSE, TRUE);
+ /* Datanode master and datanode slave, if any */
+ if (sval(VAR_datanodeSlave) && (strcmp(sval(VAR_datanodeSlave), "y") == 0))
+ checkResourceConflict(VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers, VAR_datanodePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, FALSE);
+ if (anyConfigErrors)
+ {
+ elog(ERROR, "ERROR: Found conflicts among resources. Exiting.\n");
+ exit(1);
+ }
+}
+
+/*
+ * Check if the minimum components are configured --- gtm master, coordinator master and datanode master.
+ */
+void check_configuration(void)
+{
+ /*
+ * See if mandatory configuration is defined. Will continue if error is detected
+ * to check all the errors at a time.
+ */
+ /* GTM Master */
+ if (!find_var(VAR_gtmName) || !find_var(VAR_gtmMasterServer) || !find_var(VAR_gtmMasterPort) || !find_var(VAR_gtmMasterDir))
+ elog(ERROR, "ERROR: GTM master configuration is missing. gtmName, gtmMasterServer, gtmMasterPort or gtmMasterDir\n");
+ /* Coordinator Master */
+ if (!find_var(VAR_coordNames) || !find_var(VAR_coordPorts) || !find_var(VAR_poolerPorts) ||
+ !find_var(VAR_coordMasterServers) || !find_var(VAR_coordMasterDirs))
+ elog(ERROR, "ERROR: Coordinator master configuration is missing. coordNames, coodPorts, poolerPorts, coordMasterPorts or coordMasterDirs\n");
+ /* Datanode Master */
+#ifdef XCP
+ if (!find_var(VAR_datanodeNames) || !find_var(VAR_datanodePorts) || !find_var(VAR_datanodeMasterServers) ||
+#else
+ if (!find_var(VAR_datanodeNames) || !find_var(VAR_datanodePorts) || !find_var(VAR_datanodePoolerPorts) || !find_var(VAR_datanodeMasterServers) ||
+#endif
+
+ !find_var(VAR_datanodeMasterDirs))
+#ifdef XCP
+ elog(ERROR, "ERROR: Datanode master configuration is missing. datanodeNames, datanodePorts, datanodePoolerPorts, datanodeMasterPorts or datanodeMasterDirs\n");
+#else
+ elog(ERROR, "ERROR: Datanode master configuration is missing. datanodeNames, datanodePorts, datanodeMasterPorts or datanodeMasterDirs\n");
+#endif
+ handle_no_slaves();
+ verifyResource();
+ makeServerList();
+}
+
+/*
+ * Backup configuration files to a remote site as specified.
+ */
+int backup_configuration(void)
+{
+ if ((strcasecmp(sval(VAR_configBackup), "y") != 0) || is_none(sval(VAR_configBackupHost)) ||
+ is_none(sval(VAR_configBackupDir)) || is_none(sval(VAR_configBackupFile)))
+ return (2);
+ return(doImmediate(NULL, NULL, "scp %s %s@%s:%s/%s",
+ pgxc_ctl_config_path,
+ sval(VAR_pgxcUser), sval(VAR_configBackupHost),
+ sval(VAR_configBackupDir), sval(VAR_configBackupFile)));
+}
+
+NodeType getNodeType(char *nodeName)
+{
+ int ii;
+
+ /* Check GTM */
+ if (strcmp(nodeName, sval(VAR_gtmName)) == 0)
+ return NodeType_GTM;
+ /* GTM_Proxy */
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ if (strcmp(nodeName, aval(VAR_gtmProxyNames)[ii]) == 0)
+ return NodeType_GTM_PROXY;
+ /* Coordinator */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if (strcmp(nodeName, aval(VAR_coordNames)[ii]) == 0)
+ return NodeType_COORDINATOR;
+ /* Datanode */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if (strcmp(nodeName, aval(VAR_datanodeNames)[ii]) == 0)
+ return NodeType_DATANODE;
+ /* Nodename */
+ for (ii = 0; aval(VAR_allServers)[ii]; ii++)
+ if (strcmp(nodeName, aval(VAR_allServers)[ii]) == 0)
+ return NodeType_SERVER;
+ return NodeType_UNDEF;
+
+}
+
+int getDefaultWalSender(int isCoord)
+{
+ int ii;
+
+ char *names = isCoord ? VAR_coordNames : VAR_datanodeNames;
+ char *walSender = isCoord ? VAR_coordMaxWALSenders : VAR_datanodeMaxWALSenders;
+
+ for (ii = 0; aval(names)[ii]; ii++)
+ {
+ if (doesExist(names, ii) && !is_none(aval(names)[ii]) && (atoi(aval(walSender)[ii]) >= 0))
+ return atoi(aval(walSender)[ii]);
+ }
+ return 0;
+}
diff --git a/contrib/pgxc_ctl/config.h b/contrib/pgxc_ctl/config.h
new file mode 100644
index 0000000000..039f953138
--- /dev/null
+++ b/contrib/pgxc_ctl/config.h
@@ -0,0 +1,46 @@
+/*-------------------------------------------------------------------------
+ *
+ * config.h
+ *
+ * Configuration module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#include <stdio.h>
+#include <string.h>
+
+typedef enum NodeType {
+ NodeType_UNDEF = 0,
+ NodeType_GTM,
+ NodeType_GTM_PROXY,
+ NodeType_COORDINATOR,
+ NodeType_DATANODE,
+ NodeType_SERVER} NodeType;
+
+void read_vars(FILE *conf);
+void install_conf_prototype(char *path);
+void check_configuration(void);
+void read_selected_vars(FILE *conf, char *selectThis[]);
+char *get_word(char *line, char **token);
+int is_none(char *s);
+int backup_configuration(void);
+NodeType getNodeType(char *nodeName);
+int checkSpecificResourceConflict(char *name, char *host, int port, char *dir, int is_gtm);
+int checkNameConflict(char *name, int is_gtm);
+int checkPortConflict(char *host, int port);
+int checkDirConflict(char *host, char *dir);
+void makeServerList(void);
+int getDefaultWalSender(int isCoord);
+
+#define DEBUG() (strcasecmp(sval(VAR_debug), "y") == 0)
+#define VERBOSE() (strcasecmp(sval(VAR_verbose), "y") == 0)
+#define isVarYes(x) ((sval(x) != NULL) && (strcasecmp(sval(x), "y") == 0))
+
+void handle_no_slaves(void);
+
+#endif /* CONFIG_H */
diff --git a/contrib/pgxc_ctl/coord_cmd.c b/contrib/pgxc_ctl/coord_cmd.c
new file mode 100644
index 0000000000..d1511cc5fa
--- /dev/null
+++ b/contrib/pgxc_ctl/coord_cmd.c
@@ -0,0 +1,2185 @@
+/*-------------------------------------------------------------------------
+ *
+ * coord_cmd.c
+ *
+ * Coordinator command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "do_command.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+#include "coord_cmd.h"
+#include "gtm_util.h"
+
+
+static int failover_oneCoordinator(int coordIdx);
+static int configure_datanodes(char **nodeList);
+static cmd_t *prepare_configureDataNode(char *nodeName);
+
+static char date[MAXTOKEN+1];
+
+/*
+ *======================================================================
+ *
+ * Coordinator staff
+ *
+ *=====================================================================
+ */
+/*
+ * Initialize coordinator masters -----------------------------------------------------------
+ */
+int init_coordinator_master_all(void)
+{
+ elog(NOTICE, "Initialize all the coordinator masters.\n");
+ return(init_coordinator_master(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_initCoordinatorMaster(char *nodeName)
+{
+ cmd_t *cmd, *cmdInitdb, *cmdPgConf, *cmdWalArchDir, *cmdWalArch, *cmdPgHba;
+ int jj, kk, gtmPxyIdx;
+ char **confFiles = NULL;
+ FILE *f;
+ char localStdin[MAXPATH+1];
+ char *gtmHost, *gtmPort;
+ char timestamp[MAXTOKEN+1];
+
+ /* Reset coordinator master directory and run initdb */
+ if ((jj = coordIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Node %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ if(pingNode(aval(VAR_coordMasterServers)[jj], aval(VAR_coordPorts)[jj]) == 0)
+ {
+ elog(ERROR, "ERROR: target coordinator master %s is running now. Skip initilialization.\n",
+ nodeName);
+ return(NULL);
+ }
+ cmd = cmdInitdb = initCmd(aval(VAR_coordMasterServers)[jj]);
+ snprintf(newCommand(cmdInitdb), MAXLINE,
+ "rm -rf %s;"
+ "mkdir -p %s;"
+ "initdb --nodename %s -D %s",
+ aval(VAR_coordMasterDirs)[jj],
+ aval(VAR_coordMasterDirs)[jj],
+ nodeName,
+ aval(VAR_coordMasterDirs)[jj]);
+
+ /* Update postgresql.conf */
+
+ /* coordSpecificExtraConfig */
+ gtmPxyIdx = getEffectiveGtmProxyIdxFromServerName(aval(VAR_coordMasterServers)[jj]);
+ gtmHost = (gtmPxyIdx >= 0) ? aval(VAR_gtmProxyServers)[gtmPxyIdx] : sval(VAR_gtmMasterServer);
+ gtmPort = (gtmPxyIdx >= 0) ? aval(VAR_gtmProxyPorts)[gtmPxyIdx] : sval(VAR_gtmMasterPort);
+ appendCmdEl(cmdInitdb, (cmdPgConf = initCmd(aval(VAR_coordMasterServers)[jj])));
+ snprintf(newCommand(cmdPgConf), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_coordMasterDirs)[jj]);
+ if (!is_none(sval(VAR_coordExtraConfig)))
+ AddMember(confFiles, sval(VAR_coordExtraConfig));
+ if (!is_none(aval(VAR_coordSpecificExtraConfig)[jj]))
+ AddMember(confFiles, aval(VAR_coordSpecificExtraConfig)[jj]);
+ if ((f = prepareLocalStdin((cmdPgConf->localStdin = Malloc(MAXPATH+1)), MAXPATH, confFiles)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ /* From configuration variables */
+ fprintf(f,
+ "#===========================================\n"
+ "# Added at initialization. %s\n"
+ "port = %d\n"
+ "pooler_port = %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "# End of Additon\n",
+ timeStampString(timestamp, MAXTOKEN),
+ atoi(aval(VAR_coordPorts)[jj]),
+ aval(VAR_poolerPorts)[jj],
+ gtmHost, gtmPort);
+ fclose(f);
+ CleanArray(confFiles);
+
+ /* Log Shipping */
+
+ if (isVarYes(VAR_coordSlave) && !is_none(aval(VAR_coordSlaveServers)[jj]))
+ {
+ /* Build WAL archive target directory */
+ appendCmdEl(cmdInitdb, (cmdWalArchDir = initCmd(aval(VAR_coordSlaveServers)[jj])));
+ snprintf(newCommand(cmdWalArchDir), MAXLINE,
+ "rm -rf %s;mkdir -p %s; chmod 0700 %s",
+ aval(VAR_coordArchLogDirs)[jj], aval(VAR_coordArchLogDirs)[jj],
+ aval(VAR_coordArchLogDirs)[jj]);
+ /* Build master's postgresql.conf */
+ appendCmdEl(cmdInitdb, (cmdWalArch = initCmd(aval(VAR_coordMasterServers)[jj])));
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#========================================\n"
+ "# Addition for log shipping, %s\n"
+ "wal_level = hot_standby\n"
+ "archive_mode = on\n"
+ "archive_command = 'rsync %%p %s@%s:%s/%%f'\n"
+ "max_wal_senders = %s\n"
+ "# End of Addition\n",
+ timeStampString(timestamp, MAXPATH),
+ sval(VAR_pgxcUser), aval(VAR_coordSlaveServers)[jj], aval(VAR_coordArchLogDirs)[jj],
+ aval(VAR_coordMaxWALSenders)[jj]);
+ fclose(f);
+ cmdWalArch->localStdin = Strdup(localStdin);
+ snprintf(newCommand(cmdWalArch), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_coordMasterDirs)[jj]);
+ }
+
+ /* pg_hba.conf */
+
+ appendCmdEl(cmdInitdb, (cmdPgHba = initCmd(aval(VAR_coordMasterServers)[jj])));
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#=================================================\n"
+ "# Addition at initialization, %s\n",
+ timeStampString(timestamp, MAXTOKEN));
+ if (!is_none(sval(VAR_coordExtraPgHba)))
+ AddMember(confFiles, sval(VAR_coordExtraPgHba));
+ if (!is_none(aval(VAR_coordSpecificExtraPgHba)[jj]))
+ AddMember(confFiles, aval(VAR_coordSpecificExtraPgHba)[jj]);
+ appendFiles(f, confFiles);
+ CleanArray(confFiles);
+ for (kk = 0; aval(VAR_coordPgHbaEntries)[kk]; kk++)
+ {
+ fprintf(f,"host all %s %s trust\n", sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ if (isVarYes(VAR_coordSlave))
+ if (!is_none(aval(VAR_coordSlaveServers)[jj]))
+ fprintf(f, "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ }
+ fprintf(f, "# End of addition\n");
+ fclose(f);
+ cmdPgHba->localStdin = Strdup(localStdin);
+ snprintf(newCommand(cmdPgHba), MAXLINE,
+ "cat >> %s/pg_hba.conf", aval(VAR_coordMasterDirs)[jj]);
+
+ /*
+ * Now prepare statements to create/alter nodes.
+ */
+ return(cmd);
+}
+
+int init_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ /*
+ * Build directory and run initdb
+ */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(NOTICE, "Initialize coordinator master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_initCoordinatorMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Initialize coordinator slaves ---------------------------------------------------------------
+ */
+int init_coordinator_slave_all(void)
+{
+ elog(NOTICE, "Initialize all the coordinator slaves.\n");
+ return(init_coordinator_slave(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_initCoordinatorSlave(char *nodeName)
+{
+ cmd_t *cmd, *cmdBuildDir, *cmdStartMaster, *cmdBaseBkup, *cmdRecoveryConf, *cmdPgConf;
+ int idx;
+ FILE *f;
+ char localStdin[MAXPATH+1];
+ char timestamp[MAXTOKEN+1];
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ if (is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: Slave of the coordinator %s is not configured.\n", nodeName);
+ return(NULL);
+ }
+
+ /* Build work directory */
+ cmd = cmdBuildDir = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ snprintf(newCommand(cmdBuildDir), MAXLINE,
+ "rm -rf %s;mkdir -p %s;chmod 0700 %s",
+ aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordSlaveDirs)[idx]);
+ /*
+ * Check if the master is running --> May not need change if we have watchdog. This case, we need
+ * a master which can handle the request. So GTM should be running. We can test all of them by
+ * single 'select 1' command.
+ */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) != 0)
+ {
+ /* Master is not running. Must start it first */
+ appendCmdEl(cmdBuildDir, (cmdStartMaster = initCmd(aval(VAR_coordMasterServers)[idx])));
+ snprintf(newCommand(cmdStartMaster), MAXLINE,
+ "pg_ctl start -Z coordinator -D %s -o -i",
+ aval(VAR_coordMasterDirs)[idx]);
+ }
+ /*
+ * Obtain base backup of the master
+ */
+ appendCmdEl(cmdBuildDir, (cmdBaseBkup = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ snprintf(newCommand(cmdBaseBkup), MAXLINE,
+ "pg_basebackup -p %s -h %s -D %s -x",
+ aval(VAR_coordPorts)[idx], aval(VAR_coordMasterServers)[idx], aval(VAR_coordSlaveDirs)[idx]);
+
+ /* Configure recovery.conf file at the slave */
+ appendCmdEl(cmdBuildDir, (cmdRecoveryConf = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "standby_mode = on\n"
+ "primary_conninfo = 'host = %s port = %s "
+ "user = %s application_name = %s'\n"
+ "restore_command = 'cp %s/%%f %%p'\n"
+ "archive_cleanup_command = 'pg_archivecleanup %s %%r'\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN), aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx],
+ sval(VAR_pgxcOwner), aval(VAR_coordNames)[idx],
+ aval(VAR_coordArchLogDirs)[idx], aval(VAR_coordArchLogDirs)[idx]);
+ fclose(f);
+ cmdRecoveryConf->localStdin = Strdup(localStdin);
+ snprintf(newCommand(cmdRecoveryConf), MAXLINE,
+ "cat >> %s/recovery.conf\n", aval(VAR_coordSlaveDirs)[idx]);
+
+ /* Configure postgresql.conf at the slave */
+ appendCmdEl(cmdBuildDir, (cmdPgConf = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "hot_standby = on\n"
+ "port = %s\n"
+ "wal_level = minimal\n"
+ "archive_mode = off\n"
+ "archive_command = ''\n"
+ "max_wal_senders = 0\n"
+ "# End of Addition\n",
+ timeStampString(timestamp, MAXTOKEN), aval(VAR_coordPorts)[idx]);
+ fclose(f);
+ cmdPgConf->localStdin = Strdup(localStdin);
+ snprintf(newCommand(cmdPgConf), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_coordSlaveDirs)[idx]);
+ return(cmd);
+}
+
+
+int init_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ int rc;
+ cmd_t *cmd;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ /*
+ * First step: initialize work directory and run the master if necessary
+ */
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Initializa the coordinator slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_initCoordinatorSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Configure nodes in each coordinator -------------------------------------------
+ *
+ * Issues CREATE NODE/ALTER NODE through psql.
+ *
+ * Please note that CREATE/ALTER/DROP NODE are handled only locally. You have to
+ * visit all the coordinators.
+ */
+int configure_nodes_all(void)
+{
+ configure_nodes(aval(VAR_coordNames));
+ return configure_datanodes(aval(VAR_datanodeNames));
+}
+
+int configure_nodes(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((cmd = prepare_configureNode(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+static int configure_datanodes(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((cmd = prepare_configureDataNode(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+cmd_t *prepare_configureNode(char *nodeName)
+{
+ cmd_t *cmd;
+ int ii;
+ int idx;
+ FILE *f;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator.\n", nodeName);
+ return NULL;
+ }
+ if (is_none(aval(VAR_coordMasterServers)[idx]))
+ return NULL;
+ cmd = initCmd(NULL);
+ snprintf(newCommand(cmd), MAXLINE,
+ "psql -p %d -h %s -a %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ sval(VAR_defaultDatabase),
+ sval(VAR_pgxcOwner));
+ if ((f = prepareLocalStdin(newFilename(cmd->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return NULL;
+ }
+ /* Setup coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ int targetIdx;
+ if (is_none(aval(VAR_coordNames)[ii]))
+ continue;
+ if ((targetIdx = coordIdx(aval(VAR_coordNames)[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: internal error. Could not get coordinator idex for %s\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ if (!is_none(aval(VAR_coordMasterServers)[ii]))
+ {
+ if (idx != targetIdx)
+ /* Register outside coordinator */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='coordinator', HOST='%s', PORT=%d);\n",
+ aval(VAR_coordNames)[ii],
+ aval(VAR_coordMasterServers)[ii],
+ atoi(aval(VAR_coordPorts)[ii]));
+ else
+ /* Update myself */
+ fprintf(f, "ALTER NODE %s WITH (HOST='%s', PORT=%d);\n",
+ aval(VAR_coordNames)[ii],
+ aval(VAR_coordMasterServers)[ii],
+ atoi(aval(VAR_coordPorts)[ii]));
+ }
+ }
+ /* Setup datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ int dnIdx;
+
+ if ((dnIdx = datanodeIdx(aval(VAR_datanodeNames)[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: inernal error. Could not get datanode index for %s.\n", aval(VAR_datanodeNames)[ii]);
+ fclose(f);
+ cleanCmd(cmd);
+ return NULL;
+ }
+ if (is_none(aval(VAR_datanodeNames)[ii]) || is_none(aval(VAR_datanodeMasterServers)[dnIdx]))
+ continue;
+ if (sval(VAR_primaryDatanode) && (strcmp(sval(VAR_primaryDatanode), aval(VAR_datanodeNames)[dnIdx]) == 0))
+ {
+ /* Primary Node */
+ if (strcmp(aval(VAR_coordMasterServers)[idx], aval(VAR_datanodeMasterServers)[dnIdx]) == 0)
+ /* Primay and preferred node */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='datanode', HOST='%s', PORT=%d, PRIMARY, PREFERRED);\n",
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* Primary but not prefereed node */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='datanode', HOST='%s', PORT=%d, PRIMARY);\n",
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ else
+ {
+ /* Non-primary node */
+ if (strcmp(aval(VAR_coordMasterServers)[idx], aval(VAR_datanodeMasterServers)[dnIdx]) == 0)
+ /* Preferred node */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='datanode', HOST='%s', PORT=%d, PREFERRED);\n",
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* non-Preferred node */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='datanode', HOST='%s', PORT=%d);\n",
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ }
+ fclose(f);
+ return(cmd);
+}
+
+static cmd_t *prepare_configureDataNode(char *nodeName)
+{
+ cmd_t *cmd;
+ int ii;
+ int jj;
+ int idx;
+ FILE *f;
+ bool is_preferred;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode.\n", nodeName);
+ return NULL;
+ }
+ if (is_none(aval(VAR_datanodeMasterServers)[idx]))
+ return NULL;
+ cmd = initCmd(NULL);
+ /* We use one of the coordinators to send queries to datanodes */
+ snprintf(newCommand(cmd), MAXLINE,
+ "psql -p %d -h %s -a %s %s",
+ atoi(aval(VAR_coordPorts)[0]),
+ aval(VAR_coordMasterServers)[0],
+ sval(VAR_defaultDatabase),
+ sval(VAR_pgxcOwner));
+ if ((f = prepareLocalStdin(newFilename(cmd->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return NULL;
+ }
+ /* Setup coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ int targetIdx;
+ if (is_none(aval(VAR_coordNames)[ii]))
+ continue;
+ if ((targetIdx = coordIdx(aval(VAR_coordNames)[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: internal error. Could not get coordinator idex for %s\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ if (!is_none(aval(VAR_coordMasterServers)[ii]))
+ {
+ /* Register outside coordinator */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''coordinator'', HOST=''%s'', PORT=%d)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_coordNames)[ii],
+ aval(VAR_coordMasterServers)[ii],
+ atoi(aval(VAR_coordPorts)[ii]));
+ }
+ }
+ /* Setup datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ int dnIdx;
+
+ if ((dnIdx = datanodeIdx(aval(VAR_datanodeNames)[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: inernal error. Could not get datanode index for %s.\n", aval(VAR_datanodeNames)[ii]);
+ fclose(f);
+ cleanCmd(cmd);
+ return NULL;
+ }
+ if (is_none(aval(VAR_datanodeNames)[ii]) || is_none(aval(VAR_datanodeMasterServers)[dnIdx]))
+ continue;
+
+ // See if this data node is on the same host as a coordinator
+ is_preferred = false;
+ for (jj = 0; aval(VAR_coordNames)[jj]; jj++)
+ {
+ if (strcmp(aval(VAR_coordMasterServers)[jj], aval(VAR_datanodeMasterServers)[dnIdx]) == 0)
+ {
+ is_preferred = true;
+ break;
+ }
+ }
+
+ if (sval(VAR_primaryDatanode) && (strcmp(sval(VAR_primaryDatanode), aval(VAR_datanodeNames)[dnIdx]) == 0))
+ {
+ if (idx != dnIdx)
+ {
+ /* Primary Node */
+ if (is_preferred)
+{
+ /* Primay and preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PRIMARY, PREFERRED)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+}
+ else
+ /* Primary but not prefereed node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PRIMARY)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ else
+ /* Primary Node */
+ if (is_preferred)
+ /* Primay and preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PRIMARY, PREFERRED)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* Primary but not prefereed node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PRIMARY)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ else
+ {
+ if (idx != dnIdx)
+ {
+ /* Non-primary node */
+ if (is_preferred)
+ /* Preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PREFERRED)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* non-Preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ else
+ {
+ /* Non-primary node */
+ if (is_preferred)
+ /* Preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PREFERRED)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* non-Preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ }
+ }
+ fclose(f);
+ return(cmd);
+}
+
+
+/*
+ * Kill coordinator masters -------------------------------------------------------------
+ *
+ * It is not recommended to kill them in such a manner. This is just for emergence.
+ * You should try to stop component by "stop" command.
+ */
+
+int kill_coordinator_master_all(void)
+{
+ elog(INFO, "Killing all the coordinator masters.\n");
+ return(kill_coordinator_master(aval(VAR_coordNames)));
+}
+
+cmd_t * prepare_killCoordinatorMaster(char *nodeName)
+{
+ int idx;
+ pid_t pmPid;
+ cmd_t *cmdKill = NULL, *cmd = NULL;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: node %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ cmd = cmdKill = initCmd(aval(VAR_coordMasterServers)[idx]);
+ if ((pmPid = get_postmaster_pid(aval(VAR_coordMasterServers)[idx], aval(VAR_coordMasterDirs)[idx])) > 0)
+ {
+ char *pidList = getChPidList(aval(VAR_coordMasterServers)[idx], pmPid);
+
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "kill -9 %d %s; rm -f /tmp/.s.'*'%d'*'",
+ pmPid, pidList, atoi(aval(VAR_coordPorts)[idx]));
+ freeAndReset(pidList);
+ }
+ else
+ {
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "killall -u %s -9 postgres; rm -f /tmp/.s.'*'%d'*'",
+ sval(VAR_pgxcUser), atoi(aval(VAR_coordPorts)[idx]));
+ }
+ return cmd;
+}
+
+int kill_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Killing coordinator master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_killCoordinatorMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Kill coordinator masters -------------------------------------------------------------
+ *
+ * It is not recommended to kill them in such a manner. This is just for emergence.
+ * You should try to stop component by "stop" command.
+ */
+int kill_coordinator_slave_all(void)
+{
+ elog(INFO, "Killing all the cooridinator slaves.\n");
+ return(kill_coordinator_slave(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_killCoordinatorSlave(char *nodeName)
+{
+ int idx;
+ pid_t pmPid;
+ cmd_t *cmd = NULL, *cmdKill = NULL, *cmdRmSock;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ if ((pmPid = get_postmaster_pid(aval(VAR_coordSlaveServers)[idx], aval(VAR_coordSlaveDirs)[idx])) > 0)
+ {
+ char *pidList = getChPidList(aval(VAR_coordSlaveServers)[idx], pmPid);
+
+ cmd = cmdKill = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "kill -9 %d %s",
+ pmPid, pidList);
+ freeAndReset(pidList);
+ }
+ if (cmd == NULL)
+ cmd = cmdRmSock = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ else
+ appendCmdEl(cmd, (cmdRmSock = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ snprintf(newCommand(cmdRmSock), MAXLINE,
+ "rm -f /tmp/.s.'*'%d'*'", atoi(aval(VAR_coordPorts)[idx]));
+ return(cmd);
+}
+
+int kill_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(1);
+ }
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Killing coordinatlr slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_killCoordinatorSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return (rc);
+}
+
+cmd_t *prepare_cleanCoordinatorMaster(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ return NULL;
+ if (is_none(aval(VAR_coordMasterServers)[idx]))
+ return NULL;
+ cmd = initCmd(aval(VAR_coordMasterServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s;mkdir -p %s;chmod 0700 %s; rm -f /tmp/.s.*%d*; rm -f /tmp/.s.*%d*",
+ aval(VAR_coordMasterDirs)[idx], aval(VAR_coordMasterDirs)[idx], aval(VAR_coordMasterDirs)[idx],
+ atoi(aval(VAR_coordPorts)[idx]), atoi(aval(VAR_poolerPorts)[idx]));
+ return cmd;
+}
+
+/*
+ * Cleanup coordinator master resources -- directory and socket.
+ */
+int clean_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ int ii;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Clean coordinator master %s resources.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanCoordinatorMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: coordinator master %s not found.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return (rc);
+}
+
+int clean_coordinator_master_all(void)
+{
+ elog(INFO, "Cleaning all the coordinator masters resources.\n");
+ return(clean_coordinator_master(aval(VAR_coordNames)));
+}
+
+/*
+ * Cleanup coordinator slave resources -- directory and the socket.
+ */
+cmd_t *prepare_cleanCoordinatorSlave(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator.\n", nodeName);
+ return NULL;
+ }
+ if (!doesExist(VAR_coordSlaveServers, idx) || is_none(aval(VAR_coordSlaveServers)[idx]))
+ return NULL;
+ cmd = initCmd(aval(VAR_coordMasterServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s;mkdir -p %s;chmod 0700 %s; rm -f /tmp/.s.*%d*; rm -f /tmp/.s.*%d*",
+ aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordSlaveDirs)[idx],
+ atoi(aval(VAR_coordPorts)[idx]), atoi(aval(VAR_poolerPorts)[idx]));
+ return cmd;
+}
+
+int clean_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ int ii;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Clean coordinator slave %s resources.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanCoordinatorSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: coordinator slave %s not found.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return (rc);
+}
+
+int clean_coordinator_slave_all(void)
+{
+ elog(INFO, "Cleaning all the cooridnator slaves resources.\n");
+ return(clean_coordinator_slave(aval(VAR_coordNames)));
+}
+
+/*------------------------------------------------------------------------
+ *
+ * Add command
+ *
+ *-----------------------------------------------------------------------*/
+int add_coordinatorMaster(char *name, char *host, int port, int pooler, char *dir)
+{
+ FILE *f, *lockf;
+ int size, idx;
+ char port_s[MAXTOKEN+1];
+ char pooler_s[MAXTOKEN+1];
+ int gtmPxyIdx;
+ char *gtmHost;
+ char *gtmPort;
+ char pgdumpall_out[MAXPATH+1];
+ char **nodelist = NULL;
+ int ii, jj;
+ char **confFiles = NULL;
+
+ /* Check if all the coordinator masters are running */
+ if (!check_AllCoordRunning())
+ {
+ elog(ERROR, "ERROR: Some of the coordinator masters are not running. Cannot add one.\n");
+ return 1;
+ }
+ /* Check if there's no conflict with the current configuration */
+ if (checkNameConflict(name, FALSE))
+ {
+ elog(ERROR, "ERROR: Node name %s duplicate.\n", name);
+ return 1;
+ }
+ if (checkPortConflict(host, port) || checkPortConflict(host, pooler))
+ {
+ elog(ERROR, "ERROR: port numbrer (%d) or pooler port (%d) at host %s conflicts.\n", port, pooler, host);
+ return 1;
+ }
+ if (checkDirConflict(host, dir))
+ {
+ elog(ERROR, "ERROR: directory \"%s\" conflicts at host %s.\n", dir, host);
+ return 1;
+ }
+ /*
+ * Check if coordinator masgter configuration is consistent
+ */
+ idx = size = arraySizeName(VAR_coordNames);
+ if ((arraySizeName(VAR_coordPorts) != size) ||
+ (arraySizeName(VAR_poolerPorts) != size) ||
+ (arraySizeName(VAR_coordMasterServers) != size) ||
+ (arraySizeName(VAR_coordMasterDirs) != size) ||
+ (arraySizeName(VAR_coordMaxWALSenders) != size) ||
+ (arraySizeName(VAR_coordSpecificExtraConfig) != size) ||
+ (arraySizeName(VAR_coordSpecificExtraPgHba) != size))
+ {
+ elog(ERROR, "ERROR: sorry found some inconflicts in coordinator master configuration.");
+ return 1;
+ }
+ /*
+ * Now reconfigure
+ */
+ /*
+ * 000 We need another way to configure specific pg_hba.conf and max_wal_senders.
+ */
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ snprintf(pooler_s, MAXTOKEN, "%d", pooler);
+ assign_arrayEl(VAR_coordNames, idx, name, NULL);
+ assign_arrayEl(VAR_coordMasterServers, idx, host, NULL);
+ assign_arrayEl(VAR_coordPorts, idx, port_s, "-1");
+ assign_arrayEl(VAR_poolerPorts, idx, pooler_s, NULL);
+ assign_arrayEl(VAR_coordMasterDirs, idx, dir, NULL);
+ assign_arrayEl(VAR_coordMaxWALSenders, idx, aval(VAR_coordMaxWALSenders)[0], "-1"); /* Could be vulnerable */
+ assign_arrayEl(VAR_coordSlaveServers, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSlaveDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_coordArchLogDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSpecificExtraConfig, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSpecificExtraPgHba, idx, "none", NULL);
+ handle_no_slaves();
+ /*
+ * Update the configuration file and backup it
+ */
+ /*
+ * Take care of extra conf file
+ */
+ if (doesExist(VAR_coordExtraConfig, 0) && !is_none(sval(VAR_coordExtraConfig)))
+ AddMember(confFiles, sval(VAR_coordExtraConfig));
+ if (doesExist(VAR_coordSpecificExtraConfig, idx) && !is_none(aval(VAR_coordSpecificExtraConfig)[idx]))
+ AddMember(confFiles, aval(VAR_coordSpecificExtraConfig)[idx]);
+ /*
+ * Main part
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM slave addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintAval(f, VAR_coordNames);
+ fprintAval(f, VAR_coordMasterServers);
+ fprintAval(f, VAR_coordPorts);
+ fprintAval(f, VAR_poolerPorts);
+ fprintAval(f, VAR_coordMasterDirs);
+ fprintAval(f, VAR_coordMaxWALSenders);
+ fprintSval(f, VAR_coordSlave);
+ fprintAval(f, VAR_coordSlaveServers);
+ fprintAval(f, VAR_coordSlaveDirs);
+ fprintAval(f, VAR_coordArchLogDirs);
+ fprintAval(f, VAR_coordSpecificExtraConfig);
+ fprintAval(f, VAR_coordSpecificExtraPgHba);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+
+ /* Now add the master */
+
+ gtmPxyIdx = getEffectiveGtmProxyIdxFromServerName(host);
+ gtmHost = (gtmPxyIdx > 0) ? aval(VAR_gtmProxyServers)[gtmPxyIdx] : sval(VAR_gtmMasterServer);
+ gtmPort = (gtmPxyIdx > 0) ? aval(VAR_gtmProxyPorts)[gtmPxyIdx] : sval(VAR_gtmMasterPort);
+
+ /* initdb */
+ doImmediate(host, NULL, "initdb -D %s --nodename %s", dir, name);
+
+ /* Edit configurations */
+ if ((f = pgxc_popen_w(host, "cat >> %s/postgresql.conf", dir)))
+ {
+ appendFiles(f, confFiles);
+ fprintf(f,
+ "#===========================================\n"
+ "# Added at initialization. %s\n"
+ "port = %d\n"
+ "pooler_port = %d\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %d\n"
+ "# End of Additon\n",
+ timeStampString(date, MAXTOKEN+1),
+ port, pooler, gtmHost, atoi(gtmPort));
+ fclose(f);
+ }
+ CleanArray(confFiles);
+ jj = coordIdx(name);
+ if ((f = pgxc_popen_w(host, "cat >> %s/pg_hba.conf", dir)))
+ {
+ int kk;
+ for (kk = 0; aval(VAR_coordPgHbaEntries)[kk]; kk++)
+ {
+ fprintf(f,"host all %s %s trust\n", sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ if (isVarYes(VAR_coordSlave))
+ if (!is_none(aval(VAR_coordSlaveServers)[jj]))
+ fprintf(f, "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ }
+ fprintf(f, "# End of addition\n");
+ fclose(f);
+ }
+
+ /* Lock ddl */
+ if ((lockf = pgxc_popen_wRaw("psql -h %s -p %s %s", aval(VAR_coordMasterServers)[0], aval(VAR_coordPorts)[0], sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open psql command, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(lockf, "select pgxc_lock_for_backup();\n"); /* Keep open until the end of the addition. */
+ fflush(lockf);
+
+ /* pg_dumpall */
+ createLocalFileName(GENERAL, pgdumpall_out, MAXPATH);
+ doImmediateRaw("pg_dumpall -p %s -h %s -s --include-nodes --dump-nodes --file=%s",
+ aval(VAR_coordPorts)[0], aval(VAR_coordMasterServers)[0], pgdumpall_out);
+
+ /* Start the new coordinator */
+ doImmediate(host, NULL, "pg_ctl start -Z restoremode -D %s -o -i", dir);
+
+ /* Allow the new coordinator to start up by sleeping for a couple of seconds */
+ pg_usleep(2000000L);
+
+ /* Restore the backup */
+ doImmediateRaw("psql -h %s -p %d -d %s -f %s", host, port, sval(VAR_defaultDatabase), pgdumpall_out);
+ doImmediateRaw("rm -f %s", pgdumpall_out);
+
+ /* Quit the new coordinator */
+ doImmediate(host, NULL, "pg_ctl stop -Z restoremode -D %s", dir);
+
+ /* Start the new coordinator with --coordinator option */
+ AddMember(nodelist, name);
+ start_coordinator_master(nodelist);
+ CleanArray(nodelist);
+
+ /* Issue CREATE NODE on coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_coordNames)[ii]) && strcmp(aval(VAR_coordNames)[ii], name) != 0)
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %d %s", aval(VAR_coordMasterServers)[ii], atoi(aval(VAR_coordPorts)[ii]), sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator master %s.\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "CREATE NODE %s WITH (TYPE = 'coordinator', host='%s', PORT=%d);\n", name, host, port);
+ fprintf(f, "\\q\n");
+ fclose(f);
+ }
+ }
+ /* Issue CREATE NODE on datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeNames)[ii]))
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %d %s", aval(VAR_coordMasterServers)[0], atoi(aval(VAR_coordPorts)[0]), sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator master %s.\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE = ''coordinator'', host=''%s'', PORT=%d)';\n", aval(VAR_datanodeNames)[ii], name, host, port);
+ fprintf(f, "\\q\n");
+ fclose(f);
+ }
+ }
+ /* Quit DDL lokkup session */
+ fprintf(lockf, "\\q\n");
+ fclose(lockf);
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %d %s", host, port, sval(VAR_defaultDatabase))) == NULL)
+ elog(ERROR, "ERROR: cannot connect to the coordinator master %s.\n", name);
+ else
+ {
+ fprintf(f, "ALTER NODE %s WITH (host='%s', PORT=%d);\n", name, host, port);
+ fprintf(f, "\\q\n");
+ fclose(f);
+ }
+ return 0;
+}
+
+int add_coordinatorSlave(char *name, char *host, char *dir, char *archDir)
+{
+ int idx;
+ FILE *f;
+
+ /* Check if the name is valid coordinator */
+ if ((idx = coordIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified coordiantor %s is not configured.\n", name);
+ return 1;
+ }
+ /* Check if the coordinator slave is not configred */
+ if (isVarYes(VAR_coordSlave) && doesExist(VAR_coordSlaveServers, idx) && !is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: Slave for the coordinator %s has already been condigired.\n", name);
+ return 1;
+ }
+ /* Check if the resource does not conflict */
+ if (strcmp(dir, archDir) == 0)
+ {
+ elog(ERROR, "ERROR: working directory is the same as WAL archive directory.\n");
+ return 1;
+ }
+ /*
+ * We don't check the name conflict here because acquiring valid coordiinator index means that
+ * there's no name conflict.
+ */
+ if (checkPortConflict(host, atoi(aval(VAR_coordPorts)[idx])))
+ {
+ elog(ERROR, "ERROR: the port %s has already been used in the host %s.\n", aval(VAR_coordPorts)[idx], host);
+ return 1;
+ }
+ if (checkDirConflict(host, dir) || checkDirConflict(host, archDir))
+ {
+ elog(ERROR, "ERROR: directory %s or %s has already been used by other node.\n", dir, archDir);
+ return 1;
+ }
+ /* Check if the coordinator master is running */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) != 0)
+ {
+ elog(ERROR, "ERROR: Coordinator master %s is not running.\n", name);
+ return 1;
+ }
+ /* Prepare the resources (directories) */
+ doImmediate(host, NULL, "rm -rf %s; mkdir -p %s;chmod 0700 %s", dir, dir, dir);
+ doImmediate(host, NULL, "rm -rf %s; mkdir -p %s;chmod 0700 %s", archDir, archDir, archDir);
+ /* Reconfigure the master with WAL archive */
+ /* Update the configuration and backup the configuration file */
+ if ((f = pgxc_popen_w(aval(VAR_coordMasterServers)[idx], "cat >> %s/postgresql.conf", aval(VAR_coordMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open coordnator master's configuration file, %s/postgresql.conf, %s\n",
+ aval(VAR_coordMasterDirs)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#========================================\n"
+ "# Addition for log shipping, %s\n"
+ "wal_level = hot_standby\n"
+ "archive_mode = on\n"
+ "archive_command = 'rsync %%p %s@%s:%s/%%f'\n"
+ "max_wal_senders = %d\n"
+ "# End of Addition\n",
+ timeStampString(date, MAXPATH),
+ sval(VAR_pgxcUser), host, archDir,
+ getDefaultWalSender(TRUE));
+ fclose(f);
+ /* pg_hba.conf for replication */
+ if ((f = pgxc_popen_w(aval(VAR_coordMasterServers)[idx], "cat >> %s/pg_hba.conf", aval(VAR_coordMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open coordinator master's pg_hba.conf file, %s/pg_hba.conf, %s\n",
+ aval(VAR_coordMasterDirs)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================\n"
+ "# Additional entry by adding the slave, %s\n"
+ "host replication %s %s/32 trust\n"
+ "# End of addition ===============================\n",
+ timeStampString(date, MAXPATH),
+ sval(VAR_pgxcOwner), getIpAddress(host));
+ fclose(f);
+ /* Reconfigure pgxc_ctl configuration with the new slave */
+ /* Need an API to expand the array to desired size */
+ if ((extendVar(VAR_coordSlaveServers, idx, "none") != 0) ||
+ (extendVar(VAR_coordSlaveDirs, idx, "none") != 0) ||
+ (extendVar(VAR_coordArchLogDirs, idx, "none") != 0))
+ {
+ elog(PANIC, "PANIC: Internal error, inconsitent coordinator information\n");
+ return 1;
+ }
+ if (!isVarYes(VAR_coordSlave))
+ assign_sval(VAR_coordSlave, "y");
+ assign_arrayEl(VAR_coordSlaveServers, idx, host, NULL);
+ assign_arrayEl(VAR_coordSlaveDirs, idx, dir, NULL);
+ assign_arrayEl(VAR_coordArchLogDirs, idx, archDir, NULL);
+ /* Update the configuration file and backup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to coordinator slave addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_coordSlave);
+ fprintAval(f, VAR_coordSlaveServers);
+ fprintAval(f, VAR_coordArchLogDirs);
+ fprintAval(f, VAR_coordSlaveDirs);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+
+ /* Restart the master */
+ /*
+ * It's not a good idea to use "restart" here because some connection from other coordinators
+ * may be alive. They are posessed by the pooler and we have to reload the pool to release them,
+ * which aborts all the transactions.
+ *
+ * Beacse we need to issue pgxc_pool_reload() at all the coordinators, we need to give up all the
+ * transactions in the whole cluster.
+ *
+ * It is much better to shutdow the target coordinator master fast because it does not affect
+ * transactions this coordinator is not involved.
+ */
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL,
+ "pg_ctl stop -Z coordinator -D %s -m fast", aval(VAR_coordMasterDirs)[idx]);
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL,
+ "pg_ctl start -Z coordinator -D %s", aval(VAR_coordMasterDirs)[idx]);
+ /* pg_basebackup */
+ doImmediate(host, NULL, "pg_basebackup -p %s -h %s -D %s -x",
+ aval(VAR_coordPorts)[idx], aval(VAR_coordMasterServers)[idx], dir);
+ /* Update the slave configuration with hot standby and port */
+ if ((f = pgxc_popen_w(host, "cat >> %s/postgresql.conf", dir)) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open the new slave's postgresql.conf, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "hot_standby = on\n"
+ "port = %d\n"
+ "wal_level = minimal\n" /* WAL level --- minimal. No cascade slave so far. */
+ "archive_mode = off\n" /* No archive mode */
+ "archive_command = ''\n" /* No archive mode */
+ "max_wal_senders = 0\n" /* Minimum WAL senders */
+ "# End of Addition\n",
+ timeStampString(date, MAXTOKEN), atoi(aval(VAR_coordPorts)[idx]));
+ fclose(f);
+ /* Update the slave recovery.conf */
+ if ((f = pgxc_popen_w(host, "cat >> %s/recovery.conf", dir)) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open the slave's recovery.conf, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to add the slave, %s\n"
+ "standby_mode = on\n"
+ "primary_conninfo = 'host = %s port = %s "
+ "user = %s application_name = %s'\n"
+ "restore_command = 'cp %s/%%f %%p'\n"
+ "archive_cleanup_command = 'pg_archivecleanup %s %%r'\n"
+ "# End of addition\n",
+ timeStampString(date, MAXTOKEN), aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx],
+ sval(VAR_pgxcOwner), aval(VAR_coordNames)[idx],
+ aval(VAR_coordArchLogDirs)[idx], aval(VAR_coordArchLogDirs)[idx]);
+ fclose(f);
+
+ /* Start the slave */
+ doImmediate(host, NULL, "pg_ctl start -Z coordinator -D %s", dir);
+ return 0;
+}
+
+
+/*------------------------------------------------------------------------
+ *
+ * Remove command
+ *
+ *-----------------------------------------------------------------------*/
+int remove_coordinatorMaster(char *name, int clean_opt)
+{
+ /*
+
+ Removing an existing coordinator
+ ==========================
+
+ Assume a two coordinator cluster, COORD_1 & COORD_2
+ Suppose we want to remove COORD2 for any reason.
+
+ 1. Stop the coordinator to be removed.
+ In our example we need to stop COORD_2.
+
+ 2. Connect to any of the coordinators except the one to be removed.
+ In our example assuming COORD_1 is running on port 5432,
+ the following command would connect to COORD_1
+
+ psql postgres -p 5432
+
+ 3. Drop the coordinator to be removed.
+ For example to drop coordinator COORD_2
+
+ DROP NODE COORD_2;
+
+ 4. Update the connection information cached in pool.
+
+ SELECT pgxc_pool_reload();
+
+ COORD_2 is now removed from the cluster & COORD_1 would work as if COORD_2 never existed.
+
+ CAUTION : If COORD_2 is still running and clients are connected to it, any queries issued would create inconsistencies in the cluster.
+
+ Please note that there is no need to block DDLs because either way DDLs will fail after step 1 and before step 4.
+
+ */
+
+ int idx;
+ int ii;
+ FILE *f;
+ char **namelist = NULL;
+ char date[MAXTOKEN+1];
+
+ /* Check if the coordinator is configured */
+ if ((idx = coordIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: Coordinator %s is not configured.\n", name);
+ return 1;
+ }
+ /* Check if all the other coordinators are running */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if ((ii != idx) && !is_none(aval(VAR_coordNames)[ii]) && (pingNode(aval(VAR_coordMasterServers)[ii], aval(VAR_coordPorts)[ii]) != 0))
+ {
+ elog(ERROR, "ERROR: Coordinator master %s is not running.\n", aval(VAR_coordNames)[ii]);
+ return 1;
+ }
+ }
+ /* Check if there's a slave configured */
+ if (doesExist(VAR_coordSlaveServers, idx) && !is_none(aval(VAR_coordSlaveServers)[idx]))
+ remove_coordinatorSlave(name, clean_opt);
+#if 0
+ /* Stop the coordinator master if running */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ {
+ AddMember(namelist, name);
+ stop_coordinator_master(namelist, "fast");
+ CleanArray(namelist);
+ }
+ /* Cleanup the coordinator master resource if specified */
+ if (clean_opt)
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_coordMasterDirs)[idx]);
+#endif
+ /* Issue "drop node" at all the other coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if ((ii != idx) && doesExist(VAR_coordNames, ii) && !is_none(aval(VAR_coordNames)[ii]))
+ {
+ f = pgxc_popen_wRaw("psql -p %d -h %s %s", atoi(aval(VAR_coordPorts)[ii]), aval(VAR_coordMasterServers)[ii], sval(VAR_defaultDatabase));
+ if (f == NULL)
+ {
+ elog(ERROR, "ERROR: cannot begin psql for the coordinator master %s\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "DROP NODE %s;\n", name);
+ fprintf(f, "\\q");
+ fclose(f);
+ }
+ }
+ /* Issue "drop node" at all the datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (doesExist(VAR_datanodeNames, ii) && !is_none(aval(VAR_datanodeNames)[ii]))
+ {
+ int coord_idx;
+
+ if (idx == 0)
+ coord_idx = 1;
+ else
+ coord_idx = 0;
+
+ f = pgxc_popen_wRaw("psql -p %d -h %s %s", atoi(aval(VAR_coordPorts)[coord_idx]), aval(VAR_coordMasterServers)[coord_idx], sval(VAR_defaultDatabase));
+ if (f == NULL)
+ {
+ elog(ERROR, "ERROR: cannot begin psql for the coordinator master %s\n", aval(VAR_coordNames)[coord_idx]);
+ continue;
+ }
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'DROP NODE %s';\n",
+ aval(VAR_datanodeNames)[ii], name);
+ fprintf(f, "\\q");
+ fclose(f);
+ }
+ }
+#if 1
+ /* Stop the coordinator master if running */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ {
+ AddMember(namelist, name);
+ stop_coordinator_master(namelist, "fast");
+ CleanArray(namelist);
+ }
+ /* Cleanup the coordinator master resource if specified */
+ if (clean_opt)
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_coordMasterDirs)[idx]);
+#endif
+ /* Update configuration and backup --> should cleanup "none" entries here */
+ assign_arrayEl(VAR_coordNames, idx, "none", NULL);
+ assign_arrayEl(VAR_coordMasterDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_coordPorts, idx, "-1", "-1");
+ assign_arrayEl(VAR_poolerPorts, idx, "-1", "-1");
+ assign_arrayEl(VAR_coordMasterServers, idx, "none", NULL);
+ assign_arrayEl(VAR_coordMaxWALSenders, idx, "0", "0");
+ assign_arrayEl(VAR_coordSlaveServers, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSlaveDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_coordArchLogDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSpecificExtraConfig, idx, "none", NULL);
+ handle_no_slaves();
+ /*
+ * Write config files
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================================\n"
+ "# pgxc configuration file updated due to coodinator master removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_coordSlave);
+ fprintAval(f, VAR_coordNames);
+ fprintAval(f, VAR_coordMasterDirs);
+ fprintAval(f, VAR_coordPorts);
+ fprintAval(f, VAR_poolerPorts);
+ fprintAval(f, VAR_coordMasterServers);
+ fprintAval(f, VAR_coordMaxWALSenders);
+ fprintAval(f, VAR_coordSlaveServers);
+ fprintAval(f, VAR_coordSlaveDirs);
+ fprintAval(f, VAR_coordArchLogDirs);
+ fprintAval(f, VAR_coordSpecificExtraConfig);
+ fclose(f);
+ backup_configuration();
+ return 0;
+}
+
+int remove_coordinatorSlave(char *name, int clean_opt)
+{
+ int idx;
+ char **nodelist = NULL;
+ FILE *f;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: coordinator slave is not configured.\n");
+ return 1;
+ }
+ idx = coordIdx(name);
+ if (idx < 0)
+ {
+ elog(ERROR, "ERROR: coordinator %s is not configured.\n", name);
+ return 1;
+ }
+ if (!doesExist(VAR_coordSlaveServers, idx) || is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: coordinator slave %s is not configured.\n", name);
+ return 1;
+ }
+ AddMember(nodelist, name);
+ if (pingNode(aval(VAR_coordSlaveServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ stop_coordinator_slave(nodelist, "immediate");
+ {
+ FILE *f;
+ if ((f = pgxc_popen_w(aval(VAR_coordMasterServers)[idx], "cat >> %s/postgresql.conf", aval(VAR_coordMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot open %s/postgresql.conf at %s, %s\n", aval(VAR_coordMasterDirs)[idx], aval(VAR_coordMasterServers)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#=======================================\n"
+ "# Updated to remove the slave %s\n"
+ "archive_mode = off\n"
+ "synchronous_standby_names = ''\n"
+ "archive_command = ''\n"
+ "max_wal_senders = 0\n"
+ "wal_level = minimal\n"
+ "# End of the update\n",
+ timeStampString(date, MAXTOKEN));
+ fclose(f);
+ }
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL, "pg_ctl restart -Z coordinator -D %s", aval(VAR_coordMasterDirs)[idx]);
+ if (clean_opt)
+ clean_coordinator_slave(nodelist);
+ /*
+ * Maintain variables
+ */
+ assign_arrayEl(VAR_coordSlaveServers, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSlaveDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_coordArchLogDirs, idx, "none", NULL);
+ handle_no_slaves();
+ /*
+ * Maintain configuration file
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================================\n"
+ "# pgxc configuration file updated due to coodinator slave removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN));
+ fprintSval(f, VAR_coordSlave);
+ fprintAval(f, VAR_coordSlaveServers);
+ fprintAval(f, VAR_coordSlaveDirs);
+ fprintAval(f, VAR_coordArchLogDirs);
+ fclose(f);
+ backup_configuration();
+ CleanArray(nodelist);
+ return 0;
+
+}
+
+
+
+/*
+ * Start coordinator master ---------------------------------------------
+ */
+int start_coordinator_master_all(void)
+{
+ elog(INFO, "Starting coordinator master.\n");
+ return(start_coordinator_master(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_startCoordinatorMaster(char *nodeName)
+{
+ cmd_t *cmd = NULL, *cmdPgCtl;
+ int idx;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator, skipping.\n", nodeName);
+ return(NULL);
+ }
+ /*
+ * Check if the coordinator is running
+ */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ {
+ elog(ERROR, "ERROR: target coordinator master %s is already running now. Skip initilialization.\n",
+ nodeName);
+ return(NULL);
+ }
+ cmd = cmdPgCtl = initCmd(aval(VAR_coordMasterServers)[idx]);
+ snprintf(newCommand(cmdPgCtl), MAXLINE,
+ "pg_ctl start -Z coordinator -D %s -o -i",
+ aval(VAR_coordMasterDirs)[idx]);
+ return(cmd);
+}
+
+int start_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Starting coordinator master %s\n", actualNodeList[ii]);
+ if ((cmd = prepare_startCoordinatorMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Start coordinator slaves ----------------------------------------
+ */
+int start_coordinator_slave_all(void)
+{
+ elog(INFO, "Starting all the coordinator slaves.\n");
+ return(start_coordinator_slave(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_startCoordinatorSlave(char *nodeName)
+{
+ int idx;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+ cmd_t *cmd = NULL, *cmdPgCtlStart, *cmdPgConfMaster, *cmdMasterReload;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator, skipping.\n", nodeName);
+ return(NULL);
+ }
+ /*
+ * Check if the coordinator is running
+ */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) != 0)
+ {
+ elog(ERROR, "ERROR: Coordinator Master %s is not runnig now. Cannot start the slave.\n",
+ aval(VAR_coordNames)[idx]);
+ return(NULL);
+ }
+ cmd = cmdPgCtlStart = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ snprintf(newCommand(cmdPgCtlStart), MAXLINE,
+ "pg_ctl start -Z coordinator -D %s -o -i",
+ aval(VAR_coordSlaveDirs)[idx]);
+
+ /* Postgresql.conf at the Master */
+
+ appendCmdEl(cmdPgCtlStart, (cmdPgConfMaster = initCmd(aval(VAR_coordMasterServers)[idx])));
+ snprintf(newCommand(cmdPgConfMaster), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_coordMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdPgConfMaster->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================================\n"
+ "# Added to start the slave in sync. mode, %s\n"
+ "synchronous_commit = on\n"
+ "synchronous_standby_names = '%s'\n"
+ "# End of the addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_coordNames)[idx]);
+ fclose(f);
+
+ /* Reloae postgresql.conf change */
+ appendCmdEl(cmdPgCtlStart, (cmdMasterReload = initCmd(aval(VAR_coordMasterServers)[idx])));
+ snprintf(newCommand(cmdMasterReload), MAXLINE,
+ "pg_ctl reload -Z coordinator -D %s",
+ aval(VAR_coordMasterDirs)[idx]);
+ return(cmd);
+}
+
+int start_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(1);
+ }
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Starting coordinator slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_startCoordinatorSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done\n");
+ return(rc);
+}
+
+/*
+ * Stop coordinator masters ---------------------------------------------------
+ */
+/* Does not check if immediate is valid here */
+int stop_coordinator_master_all(char *immediate)
+{
+ elog(INFO, "Stopping all the coordinator masters.\n");
+ return(stop_coordinator_master(aval(VAR_coordNames), immediate));
+}
+
+cmd_t *prepare_stopCoordinatorMaster(char *nodeName, char *immediate)
+{
+ int idx;
+ cmd_t *cmd;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ cmd = initCmd(aval(VAR_coordMasterServers)[idx]);
+ if (immediate)
+ snprintf(newCommand(cmd), MAXLINE,
+ "pg_ctl stop -Z coordinator -D %s -m %s",
+ aval(VAR_coordMasterDirs)[idx], immediate);
+ else
+ snprintf(newCommand(cmd), MAXLINE,
+ "pg_ctl stop -Z coordinator -D %s",
+ aval(VAR_coordMasterDirs)[idx]);
+ return(cmd);
+}
+
+
+/* Does not check if immediate is valid here. */
+int stop_coordinator_master(char **nodeList, char *immediate)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ int rc;
+
+ if (immediate == NULL)
+ immediate = FAST;
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+ elog(INFO, "Stopping coordinator master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopCoordinatorMaster(actualNodeList[ii], immediate)))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+
+/*
+ * Stop coordinator slaves ----------------------------------------------------
+ */
+int stop_coordinator_slave_all(char *immediate)
+{
+ elog(INFO, "Stopping all the coordinator slaves.\n");
+ return(stop_coordinator_slave(aval(VAR_coordNames), immediate));
+}
+
+cmd_t *prepare_stopCoordinatorSlave(char *nodeName, char *immediate)
+{
+ int idx;
+ cmd_t *cmd = NULL, *cmdMasterReload, *cmdPgCtlStop;
+ FILE *f;
+ char localStdin[MAXPATH+1];
+ char timestamp[MAXTOKEN+1];
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ {
+ /* Master is running. Need to switch log shipping to asynchronous mode. */
+ cmd = cmdMasterReload = initCmd(aval(VAR_coordMasterServers)[idx]);
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#=======================================\n"
+ "# Updated to trun off the slave %s\n"
+ "synchronous_standby_names = ''\n"
+ "# End of the update\n",
+ timeStampString(timestamp, MAXTOKEN));
+ fclose(f);
+ snprintf(newCommand(cmdMasterReload), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_coordMasterDirs)[idx]);
+ cmdMasterReload->localStdin = Strdup(localStdin);
+ }
+ if (cmd)
+ appendCmdEl(cmdMasterReload, (cmdPgCtlStop = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ else
+ cmd = cmdPgCtlStop = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ if (immediate)
+ snprintf(newCommand(cmdPgCtlStop), MAXLINE,
+ "pg_ctl stop -Z coordinator -D %s -m %s",
+ aval(VAR_coordSlaveDirs)[idx], immediate);
+ else
+ snprintf(newCommand(cmdPgCtlStop), MAXLINE,
+ "pg_ctl stop -Z coordinator -D %s",
+ aval(VAR_coordSlaveDirs)[idx]);
+ return(cmd);
+}
+
+
+int stop_coordinator_slave(char **nodeList, char *immediate)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(1);
+ }
+ if (immediate == NULL)
+ immediate = "fast";
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Stopping the coordinator slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopCoordinatorSlave(actualNodeList[ii], immediate)))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Failover coordinator ---------------------------------------------------------
+ */
+int failover_coordinator(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc = 0;
+
+ elog(INFO, "Failover coordiantors.\n");
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(2);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ int idx;
+ int rc_local;
+
+ elog(INFO, "Failover the coordinator %s.\n", actualNodeList[ii]);
+ if ((idx = coordIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator. Skipping.\n", actualNodeList[ii]);
+ continue;
+ }
+ if (is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: slave of the coordinator %s is not configured. Skipping\n",
+ actualNodeList[ii]);
+ continue;
+ }
+ rc_local = failover_oneCoordinator(idx);
+ if (rc_local < 0)
+ return(rc_local);
+ else
+ if (rc_local > rc)
+ rc = rc_local;
+ }
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+static int failover_oneCoordinator(int coordIdx)
+{
+ int rc = 0;
+ int rc_local;
+ int jj;
+ int gtmPxyIdx;
+ char *gtmHost;
+ char *gtmPort;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+
+#define checkRc() do{if(WEXITSTATUS(rc_local) > rc) rc = WEXITSTATUS(rc_local);}while(0)
+
+ /*
+ * Determine the target gtm
+ */
+ gtmPxyIdx= getEffectiveGtmProxyIdxFromServerName(aval(VAR_coordSlaveServers)[coordIdx]);
+ gtmHost = (gtmPxyIdx < 0) ? sval(VAR_gtmMasterServer) : aval(VAR_gtmProxyServers)[coordIdx];
+ gtmPort = (gtmPxyIdx < 0) ? sval(VAR_gtmMasterPort) : aval(VAR_gtmProxyPorts)[coordIdx];
+ if (gtmPxyIdx >= 0)
+ elog(NOTICE, "Failover coordinator %s using gtm %s\n",
+ aval(VAR_coordNames)[coordIdx], aval(VAR_gtmProxyNames)[gtmPxyIdx]);
+ else
+ elog(NOTICE, "Filover coordinator %s using GTM itself\n",
+ aval(VAR_coordNames)[coordIdx]);
+
+ /* Unregister the coordinator from GTM */
+ unregister_coordinator(aval(VAR_coordNames)[coordIdx]);
+
+ /* Promote the slave */
+ rc_local = doImmediate(aval(VAR_coordSlaveServers)[coordIdx], NULL,
+ "pg_ctl promote -Z coordinator -D %s",
+ aval(VAR_coordSlaveDirs)[coordIdx]);
+ checkRc();
+
+ /* Reconfigure new coordinator master with new gtm_proxy or gtm */
+
+ if ((f = pgxc_popen_w(aval(VAR_coordSlaveServers)[coordIdx],
+ "cat >> %s/postgresql.conf",
+ aval(VAR_coordSlaveDirs)[coordIdx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Could not prepare to update postgresql.conf, %s", strerror(errno));
+ return(-1);
+ }
+ fprintf(f,
+ "#=================================================\n"
+ "# Added to promote, %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ gtmHost, gtmPort);
+ fclose(f);
+
+ /* Restart coord Slave Server */
+ rc_local = doImmediate(aval(VAR_coordSlaveServers)[coordIdx], NULL,
+ "pg_ctl restart -Z coordinator -D %s -w -o -i; sleep 1",
+ aval(VAR_coordSlaveDirs)[coordIdx]);
+ checkRc();
+
+ /* Update the configuration variable */
+ var_assign(&(aval(VAR_coordMasterServers)[coordIdx]), Strdup(aval(VAR_coordSlaveServers)[coordIdx]));
+ var_assign(&(aval(VAR_coordSlaveServers)[coordIdx]), Strdup("none"));
+ var_assign(&(aval(VAR_coordMasterDirs)[coordIdx]), Strdup(aval(VAR_coordSlaveDirs)[coordIdx]));
+ var_assign(&(aval(VAR_coordSlaveDirs)[coordIdx]), Strdup("none"));
+
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ elog(ERROR, "ERROR: Failed to open configuration file %s, %s\n", pgxc_ctl_config_path, strerror(errno));
+ return(-1);
+ }
+ fprintf(f,
+ "#=====================================================\n"
+ "# Updated due to the coordinator failover, %s, %s\n"
+ "coordMasterServers=( %s )\n"
+ "coordMasterDirs=( %s )\n"
+ "coordSlaveServers=( %s )\n"
+ "coordSlaveDirs=( %s )\n"
+ "# End of the update\n",
+ aval(VAR_coordNames)[coordIdx], timeStampString(timestamp, MAXTOKEN),
+ listValue(VAR_coordMasterServers),
+ listValue(VAR_coordMasterDirs),
+ listValue(VAR_coordSlaveServers),
+ listValue(VAR_coordSlaveDirs));
+ fclose(f);
+
+ /* Backup the configuration file */
+ if (isVarYes(VAR_configBackup))
+ {
+ rc_local = doConfigBackup();
+ checkRc();
+ }
+
+ /*
+ * Reconfigure coordinators with new coordinator
+ */
+ for (jj = 0; aval(VAR_coordNames)[jj]; jj++)
+ {
+ if (is_none(aval(VAR_coordMasterServers)[jj]))
+ continue;
+
+ if (pingNode(aval(VAR_coordMasterServers)[jj], aval(VAR_coordPorts)[jj]) != 0)
+ {
+ elog(ERROR, "Coordinator %s is not running. Skip reconfiguration for this coordinator.\n",
+ aval(VAR_coordNames)[jj]);
+ continue;
+ }
+ if ((f = pgxc_popen_wRaw("psql -p %s -h %s %s %s",
+ aval(VAR_coordPorts)[jj],
+ aval(VAR_coordMasterServers)[jj],
+ sval(VAR_defaultDatabase),
+ sval(VAR_pgxcOwner)))
+ == NULL)
+ {
+ elog(ERROR, "ERROR: failed to start psql for coordinator %s, %s\n", aval(VAR_coordNames)[jj], strerror(errno));
+ continue;
+ }
+ fprintf(f,
+ "ALTER NODE %s WITH (HOST='%s', PORT=%s);\n"
+ "select pgxc_pool_reload();\n"
+ "\\q\n",
+ aval(VAR_coordNames)[coordIdx], aval(VAR_coordMasterServers)[coordIdx], aval(VAR_coordPorts)[coordIdx]);
+ fclose(f);
+ }
+ return(rc);
+
+# undef checkRc
+}
+
+/*
+ * Show coordinator configuration
+ */
+int show_config_coordMasterSlaveMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(nodeList[ii])) < 0)
+ continue;
+ else
+ {
+ show_config_coordMaster(TRUE, idx, aval(VAR_coordMasterServers)[idx]);
+ if (isVarYes(VAR_coordSlave))
+ show_config_coordSlave(TRUE, idx, aval(VAR_coordSlaveServers)[idx]);
+ }
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_coordMasterMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(nodeList[ii])) < 0)
+ continue;
+ else
+ show_config_coordMaster(TRUE, idx, aval(VAR_coordMasterServers)[idx]);
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_coordSlaveMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ if (!isVarYes(VAR_coordSlave))
+ return(1);
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(nodeList[ii])) < 0)
+ continue;
+ else
+ show_config_coordSlave(TRUE, idx, aval(VAR_coordSlaveServers)[idx]);
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_coordMaster(int flag, int idx, char *hostname)
+{
+ int ii;
+ char outBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ outBuf[0] = 0;
+ if (flag)
+ strncat(outBuf, "Coordinator Master: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(outBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (outBuf[0])
+ elog(NOTICE, "%s", outBuf);
+ elog(NOTICE, " Nodename: '%s', port: %s, pooler port: %s\n",
+ aval(VAR_coordNames)[idx], aval(VAR_coordPorts)[idx], aval(VAR_poolerPorts)[idx]);
+ elog(NOTICE, " MaxWalSenders: %s, Dir: '%s'\n",
+ aval(VAR_coordMaxWALSenders)[idx], aval(VAR_coordMasterDirs)[idx]);
+ elog(NOTICE, " ExtraConfig: '%s', Specific Extra Config: '%s'\n",
+ sval(VAR_coordExtraConfig), aval(VAR_coordSpecificExtraConfig)[idx]);
+ strncpy(outBuf, " pg_hba entries ( ", MAXLINE);
+ for (ii = 0; aval(VAR_coordPgHbaEntries)[ii]; ii++)
+ {
+ snprintf(editBuf, MAXPATH, "'%s' ", aval(VAR_coordPgHbaEntries)[ii]);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ elog(NOTICE, "%s)\n", outBuf);
+ elog(NOTICE, " Extra pg_hba: '%s', Specific Extra pg_hba: '%s'\n",
+ sval(VAR_coordExtraPgHba), aval(VAR_coordSpecificExtraPgHba)[idx]);
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_coordSlave(int flag, int idx, char *hostname)
+{
+ char outBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ outBuf[0] = 0;
+ if (flag)
+ strncat(outBuf, "Coordinator Slave: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(outBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (outBuf[0])
+ elog(NOTICE, "%s", outBuf);
+ elog(NOTICE," Nodename: '%s', port: %s, pooler port: %s\n",
+ aval(VAR_coordNames)[idx], aval(VAR_coordPorts)[idx], aval(VAR_poolerPorts)[idx]);
+ elog(NOTICE, " Dir: '%s', Archive Log Dir: '%s'\n",
+ aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordArchLogDirs)[idx]);
+ unlockLogFile();
+ return 0;
+}
+
+
+/*
+ * Checks if all the coordinators are running
+ *
+ * Returns FALSE if any of them are not running.
+ */
+int check_AllCoordRunning(void)
+{
+ int ii;
+
+ for (ii = 0; aval(VAR_coordMasterServers)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_coordMasterServers)[ii]))
+ if (pingNode(aval(VAR_coordMasterServers)[ii], aval(VAR_coordPorts)[ii]) != 0)
+ return FALSE;
+ }
+ return TRUE;
+}
diff --git a/contrib/pgxc_ctl/coord_cmd.h b/contrib/pgxc_ctl/coord_cmd.h
new file mode 100644
index 0000000000..673e2c82f5
--- /dev/null
+++ b/contrib/pgxc_ctl/coord_cmd.h
@@ -0,0 +1,70 @@
+/*-------------------------------------------------------------------------
+ *
+ * cood_cmd.h
+ *
+ * Coordinator command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef COORD_CMD_H
+#define COORD_CMD_H
+
+#include "utils.h"
+
+extern int init_coordinator_master(char **nodeList);
+extern int init_coordinator_slave(char **nodeList);
+extern int init_coordinator_master_all(void);
+extern int init_coordinator_slave_all(void);
+extern cmd_t *prepare_initCoordinatorMaster(char *nodeName);
+extern cmd_t *prepare_initCoordinatorSlave(char *nodeName);
+
+extern int configure_nodes(char **nodeList);
+extern int configure_nodes_all(void);
+extern cmd_t *prepare_configureNode(char *nodeName);
+
+extern int kill_coordinator_master(char **nodeList);
+extern int kill_coordinator_master_all(void);
+extern int kill_coordinator_slave(char **nodeList);
+extern int kill_coordinator_slave_all(void);
+extern cmd_t *prepare_killCoordinatorMaster(char *nodeName);
+extern cmd_t *prepare_killCoordinatorSlave(char *nodeName);
+
+extern int clean_coordinator_master(char **nodeList);
+extern int clean_coordinator_master_all(void);
+extern int clean_coordinator_slave(char **nodeList);
+extern int clean_coordinator_slave_all(void);
+extern cmd_t *prepare_cleanCoordinatorMaster(char *nodeName);
+extern cmd_t *prepare_cleanCoordinatorSlave(char *nodeName);
+
+extern int start_coordinator_master(char **nodeList);
+extern int start_coordinator_master_all(void);
+extern int start_coordinator_slave(char **nodeList);
+extern int start_coordinator_slave_all(void);
+extern cmd_t *prepare_startCoordinatorMaster(char *nodeName);
+extern cmd_t *prepare_startCoordinatorSlave(char *nodeName);
+
+extern int stop_coordinator_master(char **nodeList, char *immediate);
+extern int stop_coordinator_master_all(char *immediate);
+extern int stop_coordinator_slave(char **nodeList, char *immediate);
+extern int stop_coordinator_slave_all(char *immediate);
+extern cmd_t *prepare_stopCoordinatorMaster(char *nodeName, char *immediate);
+extern cmd_t *prepare_stopCoordinatorSlave(char *nodeName, char *immediate);
+
+extern int add_coordinatorMaster(char *name, char *host, int port, int pooler, char *dir);
+extern int add_coordinatorSlave(char *name, char *host, char *dir, char *archDir);
+extern int remove_coordinatorMaster(char *name, int clean_opt);
+extern int remove_coordinatorSlave(char *name, int clean_opt);
+
+extern int failover_coordinator(char **nodeList);
+
+extern int show_config_coordMasterSlaveMulti(char **nodeList);
+extern int show_config_coordMasterMulti(char **nodeList);
+extern int show_config_coordSlaveMulti(char **nodeList);
+extern int show_config_coordMaster(int flag, int idx, char *hostname);
+extern int show_config_coordSlave(int flag, int idx, char *hostname);
+extern int check_AllCoordRunning(void);
+
+
+#endif /* COORD_CMD_H */
diff --git a/contrib/pgxc_ctl/coord_command.h b/contrib/pgxc_ctl/coord_command.h
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/contrib/pgxc_ctl/coord_command.h
@@ -0,0 +1 @@
+
diff --git a/contrib/pgxc_ctl/datanode_cmd.c b/contrib/pgxc_ctl/datanode_cmd.c
new file mode 100644
index 0000000000..0b5da0204c
--- /dev/null
+++ b/contrib/pgxc_ctl/datanode_cmd.c
@@ -0,0 +1,1970 @@
+/*-------------------------------------------------------------------------
+ *
+ * datanode_cmd.c
+ *
+ * Datanode command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "do_command.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+#include "datanode_cmd.h"
+#include "gtm_util.h"
+#include "coord_cmd.h"
+
+static char date[MAXTOKEN+1];
+
+/*
+ *======================================================================
+ *
+ * Datanode staff
+ *
+ *=====================================================================
+ */
+static int failover_oneDatanode(int datanodeIdx);
+
+/*
+ * Initialize datanode master ------------------------------------
+ */
+int init_datanode_master_all(void)
+{
+ elog(NOTICE, "Initialize all the datanode masters.\n");
+ return(init_datanode_master(aval(VAR_datanodeNames)));
+}
+
+cmd_t *prepare_initDatanodeMaster(char *nodeName)
+{
+ int idx;
+ int jj;
+ cmd_t *cmd, *cmdInitdb, *cmdPgConf, *cmdPgHba;
+ char *gtmHost;
+ char *gtmPort;
+ int gtmIdx;
+ char **fileList = NULL;
+ FILE *f;
+ char timeStamp[MAXTOKEN+1];
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ return(NULL);
+ /* Build each coordinator's initialize command */
+ cmd = cmdInitdb = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ snprintf(newCommand(cmdInitdb), MAXLINE,
+ "rm -rf %s; mkdir -p %s; initdb --nodename %s -D %s",
+ aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterDirs)[idx],
+ aval(VAR_datanodeNames)[idx], aval(VAR_datanodeMasterDirs)[idx]);
+
+ /* Initialize postgresql.conf */
+ appendCmdEl(cmdInitdb, (cmdPgConf = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdPgConf), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin((cmdPgConf->localStdin = Malloc(MAXPATH+1)), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#===========================================\n"
+ "# Added at initialization. %s\n"
+ "log_destination = 'stderr'\n"
+ "logging_collector = on\n"
+ "log_directory = 'pg_log'\n"
+ "listen_addresses = '*'\n"
+ "max_connections = 100\n",
+ timeStampString(timeStamp, MAXTOKEN));
+ if (!is_none(sval(VAR_datanodeExtraConfig)))
+ AddMember(fileList, sval(VAR_datanodeExtraConfig));
+ if (!is_none(aval(VAR_datanodeSpecificExtraConfig)[idx]))
+ AddMember(fileList, aval(VAR_datanodeSpecificExtraConfig)[idx]);
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+ freeAndReset(fileList);
+ gtmIdx = getEffectiveGtmProxyIdxFromServerName(aval(VAR_datanodeMasterServers)[idx]);
+ gtmHost = (gtmIdx < 0) ? sval(VAR_gtmMasterServer) : aval(VAR_gtmProxyServers)[gtmIdx];
+ gtmPort = (gtmIdx < 0) ? sval(VAR_gtmMasterPort) : aval(VAR_gtmProxyPorts)[gtmIdx];
+ fprintf(f,
+ "port = %s\n"
+#ifdef XCP
+ "pooler_port = %s\n"
+#endif
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n",
+ aval(VAR_datanodePorts)[idx],
+#ifdef XCP
+ aval(VAR_datanodePoolerPorts)[idx],
+#endif
+ gtmHost, gtmPort);
+ fclose(f);
+
+ /* Additional Initialization for log_shipping */
+ if (isVarYes(VAR_datanodeSlave) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ cmd_t *cmd_cleanDir, *cmd_PgConf;
+ /* This coordinator has a slave */
+
+ /* Build archive log target */
+ appendCmdEl(cmdInitdb, (cmd_cleanDir = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ snprintf(newCommand(cmd_cleanDir), MAXLINE,
+ "rm -rf %s;mkdir -p %s; chmod 0700 %s",
+ aval(VAR_datanodeArchLogDirs)[idx], aval(VAR_datanodeArchLogDirs)[idx],
+ aval(VAR_datanodeArchLogDirs)[idx]);
+
+ /* postgresql.conf */
+ appendCmdEl(cmdInitdb, (cmd_PgConf = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmd_PgConf), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmd_PgConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "wal_level = hot_standby\n"
+ "archive_mode = on\n"
+ "archive_command = 'rsync %%p %s@%s:%s/%%f'\n"
+ "max_wal_senders = %s\n"
+ "# End of Addition\n",
+ sval(VAR_pgxcUser), aval(VAR_datanodeSlaveServers)[idx], aval(VAR_datanodeArchLogDirs)[idx],
+ aval(VAR_datanodeMaxWALSenders)[idx]);
+ fclose(f);
+ }
+ else
+ {
+ cmd_t *cmd_PgConf;
+ appendCmdEl(cmdInitdb, (cmd_PgConf = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmd_PgConf), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmd_PgConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f, "# End of Addition\n");
+ fclose(f);
+ }
+
+ /* pg_hba.conf */
+ appendCmdEl(cmdInitdb, (cmdPgHba = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdPgHba), MAXLINE,
+ "cat >> %s/pg_hba.conf", aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdPgHba->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#=================================================\n"
+ "# Addition at initialization, %s\n",
+ timeStampString(timeStamp, MAXTOKEN));
+ if (!is_none(sval(VAR_datanodeExtraPgHba)))
+ AddMember(fileList, sval(VAR_datanodeExtraPgHba));
+ if (!is_none(aval(VAR_datanodeSpecificExtraPgHba)[idx]))
+ AddMember(fileList, aval(VAR_datanodeSpecificExtraPgHba)[idx]);
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+ for (jj = 0; aval(VAR_datanodePgHbaEntries)[jj]; jj++)
+ {
+ fprintf(f,
+ "host all %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[jj]);
+ if (isVarYes(VAR_datanodeSlave))
+ if (!is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ fprintf(f,
+ "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[jj]);
+ }
+ fprintf(f, "# End of additon\n");
+ fclose(f);
+ return(cmd);
+}
+
+
+int init_datanode_master(char **nodeList)
+{
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for(ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Initialize the datanode master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_initDatanodeMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Initialize datanode slave ----------------------------------------------------
+ */
+int init_datanode_slave_all(void)
+{
+ elog(INFO, "Initialize all the datanode slaves.\n");
+ return(init_datanode_slave(aval(VAR_datanodeNames)));
+}
+
+cmd_t *prepare_initDatanodeSlave(char *nodeName)
+{
+ cmd_t *cmd, *cmdBuildDir, *cmdStartMaster, *cmdBaseBkup, *cmdRecovConf, *cmdPgConf, *cmdStopMaster;
+ FILE *f;
+ int idx;
+ int startMaster;
+ char timestamp[MAXTOKEN+1];
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: specified node %s is not datanode. skipping.\n", nodeName);
+ return(NULL);
+ }
+ startMaster = FALSE;
+ /* Check if the datanode master is running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) != 0)
+ startMaster = TRUE;
+
+ /* Build slave's directory -1- */
+ cmd = cmdBuildDir = initCmd(aval(VAR_datanodeSlaveServers)[idx]);
+ snprintf(newCommand(cmdBuildDir), MAXLINE,
+ "rm -rf %s;mkdir -p %s; chmod 0700 %s",
+ aval(VAR_datanodeSlaveDirs)[idx], aval(VAR_datanodeSlaveDirs)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]);
+
+ /* Start datanode master if it is not running -2- */
+ if (startMaster)
+ {
+ appendCmdEl(cmdBuildDir, (cmdStartMaster = prepare_startDatanodeMaster(nodeName)));
+ }
+
+ /* Obtain base backup of the master */
+ appendCmdEl(cmdBuildDir, (cmdBaseBkup = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ snprintf(newCommand(cmdBaseBkup), MAXLINE,
+ "pg_basebackup -p %s -h %s -D %s -x",
+ aval(VAR_datanodePorts)[idx], aval(VAR_datanodeMasterServers)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]);
+
+ /* Configure recovery.conf of the slave */
+ appendCmdEl(cmdBuildDir, (cmdRecovConf = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ snprintf(newCommand(cmdRecovConf), MAXLINE,
+ "cat >> %s/recovery.conf",
+ aval(VAR_datanodeSlaveDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdRecovConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "standby_mode = on\n"
+ "primary_conninfo = 'host = %s port = %sd user = %s application_name = %s'\n"
+ "restore_command = 'cp %s/%%f %%p'\n"
+ "archive_cleanup_command = 'pg_archivecleanup %s %%r'\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx],
+ sval(VAR_pgxcOwner), aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeArchLogDirs)[idx],
+ aval(VAR_datanodeArchLogDirs)[idx]);
+ fclose(f);
+
+ /* Configure slave's postgresql.conf */
+ appendCmdEl(cmdBuildDir, (cmdPgConf = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ snprintf(newCommand(cmdPgConf), MAXPATH,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeSlaveDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdPgConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to startup the slave, %s\n"
+ "hot_standby = on\n"
+ "port = %s\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_datanodePorts)[idx]);
+ fclose(f);
+
+ /* Stp datanode master if needed */
+ if (startMaster == TRUE)
+ appendCmdEl(cmdBuildDir, (cmdStopMaster = prepare_stopDatanodeMaster(aval(VAR_datanodeNames)[idx], FAST)));
+ return(cmd);
+}
+
+int init_datanode_slave(char **nodeList)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return 1;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Initialize datanode slave %s\n", actualNodeList[ii]);
+ if ((cmd = prepare_initDatanodeSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Start datanode master --------------------------------------------------
+ */
+int start_datanode_master_all(void)
+{
+ elog(INFO, "Starting all the datanode masters.\n");
+ return(start_datanode_master(aval(VAR_datanodeNames)));
+}
+
+cmd_t *prepare_startDatanodeMaster(char *nodeName)
+{
+ cmd_t *cmdStartDatanodeMaster = NULL;
+ int idx;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode, skipping\n", nodeName);
+ return(NULL);
+ }
+ /* Check if the target is running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
+ {
+ elog(WARNING, "WARNING: datanode master %s is running now. Skipping.\n",
+ aval(VAR_datanodeNames)[idx]);
+ cleanCmd(cmdStartDatanodeMaster);
+ return(NULL);
+ }
+ cmdStartDatanodeMaster = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ snprintf(newCommand(cmdStartDatanodeMaster), MAXLINE,
+ "pg_ctl start -Z datanode -D %s -o -i", aval(VAR_datanodeMasterDirs)[idx]);
+ return(cmdStartDatanodeMaster);
+}
+
+int start_datanode_master(char **nodeList)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+
+ actualNodeList = makeActualNodeList(nodeList);
+
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Starting datanode master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_startDatanodeMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Start datanode slave --------------------------------------------------
+ */
+int start_datanode_slave_all(void)
+{
+ elog(INFO, "Starting all the datanode slaves.\n");
+ return(start_datanode_slave(aval(VAR_datanodeNames)));
+}
+
+cmd_t *prepare_startDatanodeSlave(char *nodeName)
+{
+ cmd_t *cmd, *cmdStartDatanodeSlave, *cmdMasterToSyncMode;
+ FILE *f;
+ int idx;
+ char timestamp[MAXTOKEN+1];
+
+ /* If the node really a datanode? */
+ if((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: node %s is not a datanode. Skipping\n", nodeName);
+ return(NULL);
+ }
+ /* Check if the datanode master is running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) != 0)
+ {
+ elog(WARNING, "WARNING: master of the datanode %s is not running. Skipping\n", nodeName);
+ return(NULL);
+ }
+
+ cmd = cmdStartDatanodeSlave = initCmd(aval(VAR_datanodeSlaveServers)[idx]);
+ snprintf(newCommand(cmdStartDatanodeSlave), MAXLINE,
+ "pg_ctl start -Z datanode -D %s",
+ aval(VAR_datanodeSlaveDirs)[idx]);
+
+ /* Change the master to synchronous mode */
+ appendCmdEl(cmdStartDatanodeSlave, (cmdMasterToSyncMode = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdMasterToSyncMode), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdMasterToSyncMode->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================================\n"
+ "# Added to start the slave in sync. mode, %s\n"
+ "synchronous_commit = on\n"
+ "synchronous_standby_names = '%s'\n"
+ "# End of the addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_datanodeNames)[idx]);
+ fclose(f);
+ return(cmd);
+}
+
+int start_datanode_slave(char **nodeList)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ char **actualNodeList;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return 1;
+ }
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+
+ elog(INFO, "Starting datanode slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_startDatanodeSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+
+/*
+ * Stop datanode master ------------------------------------------------
+ */
+cmd_t *prepare_stopDatanodeMaster(char *nodeName, char *immediate)
+{
+ cmd_t *cmdStopDatanodeMaster;
+ int idx;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datande. Skipping\n", nodeName);
+ return(NULL);
+ }
+ cmdStopDatanodeMaster = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ if (immediate)
+ snprintf(newCommand(cmdStopDatanodeMaster), MAXLINE,
+ "pg_ctl stop -Z datanode -D %s -m %s",
+ aval(VAR_datanodeMasterDirs)[idx], immediate);
+ else
+ snprintf(newCommand(cmdStopDatanodeMaster), MAXLINE,
+ "pg_ctl stop -Z datanode -D %s",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ return(cmdStopDatanodeMaster);
+}
+
+
+int stop_datanode_master_all(char *immediate)
+{
+ elog(INFO, "Stopping all the datanode masters.\n");
+ return(stop_datanode_master(aval(VAR_datanodeNames), immediate));
+}
+
+
+int stop_datanode_master(char **nodeList, char *immediate)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for(ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Stopping datanode master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopDatanodeMaster(actualNodeList[ii], immediate)))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+
+/*
+ * Stop datanode slave --------------------------------------------------------
+ */
+cmd_t *prepare_stopDatanodeSlave(char *nodeName, char *immediate)
+{
+ int idx;
+ cmd_t *cmd, *cmdMasterToAsyncMode, *cmdStopSlave;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "%s is not a datanode. Skipping\n", nodeName);
+ return(NULL);
+ }
+ if (!doesExist(VAR_datanodeSlaveServers, idx) || is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(WARNING, "datanode %s does not have a slave. Skipping.\n", nodeName);
+ return(NULL);
+ }
+ /* Set the master to asynchronous mode */
+ cmd = cmdMasterToAsyncMode = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ snprintf(newCommand(cmdMasterToAsyncMode), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdMasterToAsyncMode->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#=======================================\n"
+ "# Updated to trun off the slave %s\n"
+ "synchronous_standby_names = ''\n"
+ "# End of the update\n",
+ timeStampString(timestamp, MAXTOKEN));
+ fclose(f);
+
+ /* Reload new config file if the master is running */
+ /* The next step might need improvement. When GTM is dead, the following may
+ * fail even though the master is running.
+ */
+ if (pingNode(aval(VAR_datanodeSlaveServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
+ {
+ cmd_t *cmdReloadMaster;
+
+ appendCmdEl(cmdMasterToAsyncMode, (cmdReloadMaster = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdReloadMaster), MAXLINE,
+ "pg_ctl reload -Z datanode -D %s",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ }
+
+ /* Stop the slave */
+ appendCmdEl(cmdMasterToAsyncMode, (cmdStopSlave = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ if (immediate)
+ snprintf(newCommand(cmdStopSlave), MAXLINE,
+ "pg_ctl stop -Z datanode -D %s -m %s", aval(VAR_datanodeSlaveDirs)[idx], immediate);
+ else
+ snprintf(newCommand(cmdStopSlave), MAXLINE,
+ "pg_ctl stop -Z datanode -D %s", aval(VAR_datanodeSlaveDirs)[idx]);
+ return(cmd);
+}
+
+
+int stop_datanode_slave_all(char *immediate)
+{
+ elog(INFO, "Stopping all the datanode slaves.\n");
+ return(stop_datanode_slave(aval(VAR_datanodeNames), immediate));
+}
+
+int stop_datanode_slave(char **nodeList, char *immediate)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "Datanode slave is not configured. Returning.\n");
+ return 1;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Stopping datanode slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopDatanodeSlave(actualNodeList[ii], immediate)))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Failover datanode ---------------------------------------------------------
+ */
+int failover_datanode(char **nodeList)
+{
+ int ii;
+ char **actualNodeList;
+ int rc = 0;
+
+ elog(INFO, "Failover specified datanodes.\n");
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datnaode slave is not configured.\n");
+ return 1;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ int idx;
+ int rc_local;
+
+ elog(INFO, "Failover the datanode %s.\n", actualNodeList[ii]);
+ if ((idx = datanodeIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode. Skipping.\n", actualNodeList[ii]);
+ continue;
+ }
+ if (is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: slave of the datanode %s is not configured. Skipping\n",
+ actualNodeList[ii]);
+ continue;
+ }
+ rc_local = failover_oneDatanode(idx);
+ if (rc_local < 0)
+ return(rc_local);
+ else
+ if (rc_local > rc)
+ rc = rc_local;
+ }
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+static int failover_oneDatanode(int datanodeIdx)
+{
+ int rc = 0;
+ int rc_local;
+ int jj;
+ char *gtmHost;
+ char *gtmPort;
+ int gtmPxyIdx;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+
+# define checkRc() do{if(WEXITSTATUS(rc_local) > rc) rc = WEXITSTATUS(rc_local);}while(0)
+
+ /*
+ * Determine the target GTM
+ */
+ gtmPxyIdx = getEffectiveGtmProxyIdxFromServerName(aval(VAR_datanodeSlaveServers)[datanodeIdx]);
+ gtmHost = (gtmPxyIdx >= 0) ? aval(VAR_gtmProxyServers)[gtmPxyIdx] : sval(VAR_gtmMasterServer);
+ gtmPort = (gtmPxyIdx >= 0) ? aval(VAR_gtmProxyPorts)[gtmPxyIdx] : sval(VAR_gtmMasterPort);
+ if (gtmPxyIdx >= 0)
+ elog(NOTICE, "Failover datanode %s using gtm %s\n",
+ aval(VAR_datanodeNames)[datanodeIdx], aval(VAR_gtmProxyNames)[gtmPxyIdx]);
+ else
+ elog(NOTICE, "Filover database %s using GTM itself\n",
+ aval(VAR_datanodeNames)[datanodeIdx]);
+
+ /* Unregister the datanode */
+ unregister_datanode(aval(VAR_datanodeNames)[datanodeIdx]);
+
+ /* Promote the slave */
+ rc_local = doImmediate(aval(VAR_datanodeSlaveServers)[datanodeIdx], NULL,
+ "pg_ctl promote -Z datanode -D %s",
+ aval(VAR_datanodeSlaveDirs)[datanodeIdx]);
+ checkRc();
+
+ /* Reconfigure new datanode master with new gtm_proxy or gtm */
+ if ((f = pgxc_popen_w(aval(VAR_datanodeSlaveServers)[datanodeIdx],
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeSlaveDirs)[datanodeIdx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Could not prepare to update postgresql.conf, %s", strerror(errno));
+ return(-1);
+ }
+ fprintf(f,
+ "#=================================================\n"
+ "# Added to promote, %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ gtmHost, gtmPort);
+ fclose(f);
+
+ /* Restart datanode slave (as the new master) */
+ rc_local = doImmediate(aval(VAR_datanodeSlaveServers)[datanodeIdx], NULL,
+ "pg_ctl restart -w -Z datanode -D %s -o -i; sleep 1",
+ aval(VAR_datanodeSlaveDirs)[datanodeIdx]);
+ checkRc();
+ /*
+ * Update the configuration variable
+ */
+ var_assign(&(aval(VAR_datanodeMasterServers)[datanodeIdx]), Strdup(aval(VAR_datanodeSlaveServers)[datanodeIdx]));
+ var_assign(&(aval(VAR_datanodeSlaveServers)[datanodeIdx]), Strdup("none"));
+ var_assign(&(aval(VAR_datanodeMasterDirs)[datanodeIdx]), Strdup(aval(VAR_datanodeSlaveDirs)[datanodeIdx]));
+ var_assign(&(aval(VAR_datanodeSlaveDirs)[datanodeIdx]), Strdup("none"));
+
+ /*
+ * Update the configuration file
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ elog(ERROR, "ERROR: Failed to open configuration file %s, %s\n", pgxc_ctl_config_path, strerror(errno));
+ return(-1);
+ }
+ fprintf(f,
+ "#=====================================================\n"
+ "# Updated due to the datanode failover, %s, %s\n"
+ "datanodeMasterServers=( %s )\n"
+ "datanodeMasterDirs=( %s )\n"
+ "datanodeSlaveServers=( %s )\n"
+ "datanodeSlaveDirs=( %s )\n"
+ "# End of the update\n",
+ aval(VAR_datanodeNames)[datanodeIdx], timeStampString(timestamp, MAXTOKEN),
+ listValue(VAR_datanodeMasterServers),
+ listValue(VAR_datanodeMasterDirs),
+ listValue(VAR_datanodeSlaveServers),
+ listValue(VAR_datanodeSlaveDirs));
+ fclose(f);
+
+ /* Backup the configuration file */
+ if (isVarYes(VAR_configBackup))
+ {
+ rc_local = doConfigBackup();
+ checkRc();
+ }
+
+ /*
+ * Reconfigure coordinators with new datanode
+ */
+ for (jj = 0; aval(VAR_coordNames)[jj]; jj++)
+ {
+ if (is_none(aval(VAR_coordMasterServers)[jj]))
+ continue;
+
+ if (pingNode(aval(VAR_coordMasterServers)[jj], aval(VAR_coordPorts)[jj]) != 0)
+ {
+ elog(ERROR, "Coordinator %s is not running. Skip reconfiguration for this coordinator.\n",
+ aval(VAR_coordNames)[jj]);
+ continue;
+ }
+ if ((f = pgxc_popen_wRaw("psql -p %d -h %s %s %s",
+ atoi(aval(VAR_coordPorts)[jj]),
+ aval(VAR_coordMasterServers)[jj],
+ sval(VAR_defaultDatabase),
+ sval(VAR_pgxcOwner)))
+ == NULL)
+ {
+ elog(ERROR, "ERROR: failed to start psql for coordinator %s, %s\n", aval(VAR_coordNames)[jj], strerror(errno));
+ continue;
+ }
+ fprintf(f,
+ "ALTER NODE %s WITH (HOST='%s', PORT=%s);\n"
+ "select pgxc_pool_reload();\n"
+ "\\q\n",
+ aval(VAR_datanodeNames)[datanodeIdx], aval(VAR_datanodeMasterServers)[datanodeIdx], aval(VAR_datanodePorts)[datanodeIdx]);
+ fclose(f);
+ }
+ return rc;
+
+# undef checkRc
+
+}
+
+/*------------------------------------------------------------------------
+ *
+ * Add command
+ *
+ *-----------------------------------------------------------------------*/
+#ifdef XCP
+int add_datanodeMaster(char *name, char *host, int port, int pooler, char *dir, char *restore_dname)
+#else
+int add_datanodeMaster(char *name, char *host, int port, char *dir, char *restore_dname)
+#endif
+{
+ FILE *f, *lockf;
+ int size, idx;
+ char port_s[MAXTOKEN+1];
+#ifdef XCP
+ char pooler_s[MAXTOKEN+1];
+#endif
+ int gtmPxyIdx;
+ char *gtmHost;
+ char *gtmPort;
+ char pgdumpall_out[MAXPATH+1];
+ char **nodelist = NULL;
+ int ii, jj, restore_dnode_idx;
+ char **confFiles = NULL;
+ char *only_globals = "-g";
+
+ /* Check if all the coordinators are running */
+ if (!check_AllDatanodeRunning())
+ {
+ elog(ERROR, "ERROR: Some of the coordinator masters are not running. Cannot add new one.\n");
+ return 1;
+ }
+ /* Check if there's no conflict with the current configuration */
+ if (checkNameConflict(name, FALSE))
+ {
+ elog(ERROR, "ERROR: Node name %s duplicate.\n", name);
+ return 1;
+ }
+#ifdef XCP
+ if (checkPortConflict(host, port) || checkPortConflict(host, pooler))
+ {
+ elog(ERROR, "ERROR: port numbrer (%d) or pooler port (%d) at host %s conflicts.\n", port, pooler, host);
+ return 1;
+ }
+#else
+ if (checkPortConflict(host, port))
+ {
+ elog(ERROR, "ERROR: port numbrer (%d) at host %s conflicts.\n", port, host);
+ return 1;
+ }
+#endif
+ if (checkDirConflict(host, dir))
+ {
+ elog(ERROR, "ERROR: directory \"%s\" conflicts at host %s.\n", dir, host);
+ return 1;
+ }
+ /*
+ * Check if datanode masgter configuration is consistent
+ */
+ idx = size = arraySizeName(VAR_datanodeNames);
+ if ((arraySizeName(VAR_datanodePorts) != size) ||
+#ifdef XCP
+ (arraySizeName(VAR_datanodePoolerPorts) != size) ||
+#endif
+ (arraySizeName(VAR_datanodeMasterServers) != size) ||
+ (arraySizeName(VAR_datanodeMasterDirs) != size) ||
+ (arraySizeName(VAR_datanodeMaxWALSenders) != size) ||
+ (arraySizeName(VAR_datanodeSpecificExtraConfig) != size) ||
+ (arraySizeName(VAR_datanodeSpecificExtraPgHba) != size))
+ {
+ elog(ERROR, "ERROR: sorry found some inconflicts in datanode master configuration.\n");
+ return 1;
+ }
+ /*
+ * Now reconfigure
+ */
+ /*
+ * 000 We need another way to configure specific pg_hba.conf and max_wal_senders.
+ */
+ snprintf(port_s, MAXTOKEN, "%d", port);
+#ifdef XCP
+ snprintf(pooler_s, MAXTOKEN, "%d", pooler);
+#endif
+ assign_arrayEl(VAR_datanodeNames, idx, name, NULL);
+ assign_arrayEl(VAR_datanodeMasterServers, idx, host, NULL);
+ assign_arrayEl(VAR_datanodePorts, idx, port_s, "-1");
+#ifdef XCP
+ assign_arrayEl(VAR_datanodePoolerPorts, idx, pooler_s, "-1");
+#endif
+ assign_arrayEl(VAR_datanodeMasterDirs, idx, dir, NULL);
+ assign_arrayEl(VAR_datanodeMaxWALSenders, idx, aval(VAR_datanodeMaxWALSenders)[0], NULL); /* Could be vulnerable */
+ assign_arrayEl(VAR_datanodeSlaveServers, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeSlaveDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeArchLogDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeSpecificExtraConfig, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeSpecificExtraPgHba, idx, "none", NULL);
+ /*
+ * Update the configuration file and backup it
+ */
+ /*
+ * Take care of exrtra conf file
+ */
+ if (doesExist(VAR_datanodeExtraConfig, 0) && !is_none(sval(VAR_coordExtraConfig)))
+ AddMember(confFiles, sval(VAR_datanodeExtraConfig));
+ if (doesExist(VAR_datanodeSpecificExtraConfig, idx) && !is_none(aval(VAR_datanodeSpecificExtraConfig)[idx]))
+ AddMember(confFiles, aval(VAR_datanodeSpecificExtraConfig)[idx]);
+ /*
+ * Main part
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to datanode master addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintAval(f, VAR_datanodeNames);
+ fprintAval(f, VAR_datanodeMasterServers);
+ fprintAval(f, VAR_datanodePorts);
+#ifdef XCP
+ fprintAval(f, VAR_datanodePoolerPorts);
+#endif
+ fprintAval(f, VAR_datanodeMasterDirs);
+ fprintAval(f, VAR_datanodeMaxWALSenders);
+ fprintAval(f, VAR_datanodeSlaveServers);
+ fprintAval(f, VAR_datanodeSlaveDirs);
+ fprintAval(f, VAR_datanodeArchLogDirs);
+ fprintAval(f, VAR_datanodeSpecificExtraConfig);
+ fprintAval(f, VAR_datanodeSpecificExtraPgHba);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+
+ /* Now add the master */
+
+ gtmPxyIdx = getEffectiveGtmProxyIdxFromServerName(name);
+ gtmHost = (gtmPxyIdx > 0) ? aval(VAR_gtmProxyServers)[gtmPxyIdx] : sval(VAR_gtmMasterServer);
+ gtmPort = (gtmPxyIdx > 0) ? aval(VAR_gtmProxyPorts)[gtmPxyIdx] : sval(VAR_gtmMasterPort);
+
+ /* initdb */
+ doImmediate(host, NULL, "initdb -D %s --nodename %s", dir, name);
+
+ /* Edit configurations */
+ if ((f = pgxc_popen_w(host, "cat >> %s/postgresql.conf", dir)))
+ {
+ appendFiles(f, confFiles);
+ fprintf(f,
+ "#===========================================\n"
+ "# Added at initialization. %s\n"
+ "port = %d\n"
+#ifdef XCP
+ "pooler_port = %d\n"
+#endif
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "# End of Additon\n",
+ timeStampString(date, MAXTOKEN+1),
+#ifdef XCP
+ port, pooler, gtmHost, gtmPort);
+#else
+ port, gtmHost, gtmPort);
+#endif
+ fclose(f);
+ }
+ CleanArray(confFiles);
+ jj = datanodeIdx(name);
+ if ((f = pgxc_popen_w(host, "cat >> %s/pg_hba.conf", dir)))
+ {
+ int kk;
+ for (kk = 0; aval(VAR_datanodePgHbaEntries)[kk]; kk++)
+ {
+ fprintf(f,"host all %s %s trust\n", sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[kk]);
+ if (isVarYes(VAR_datanodeSlave))
+ if (!is_none(aval(VAR_datanodeSlaveServers)[jj]))
+ fprintf(f, "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[kk]);
+ }
+ fprintf(f, "# End of addition\n");
+ fclose(f);
+ }
+
+ restore_dnode_idx = -1;
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeNames)[ii]))
+ {
+ if (strcmp(aval(VAR_datanodeNames)[ii], restore_dname) == 0)
+ restore_dnode_idx = ii;
+ }
+ }
+ if (strcmp("none", restore_dname) != 0 && restore_dnode_idx == -1)
+ {
+ elog(ERROR, "ERROR: improper datanode specified to restore from, %s\n", restore_dname);
+ return 1;
+ }
+
+ if (restore_dnode_idx == -1)
+ {
+ restore_dnode_idx = 0;
+ }
+ else
+ only_globals= " ";
+
+
+ /* Lock ddl */
+ if ((lockf = pgxc_popen_wRaw("psql -h %s -p %d %s", aval(VAR_datanodeMasterServers)[restore_dnode_idx], atoi(aval(VAR_datanodePorts)[restore_dnode_idx]), sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open psql command, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(lockf, "select pgxc_lock_for_backup();\n"); /* Keep open until the end of the addition. */
+ fflush(lockf);
+
+ /* pg_dumpall */
+ createLocalFileName(GENERAL, pgdumpall_out, MAXPATH);
+ doImmediateRaw("pg_dumpall -p %s -h %s -s --include-nodes --dump-nodes %s >%s",
+ aval(VAR_datanodePorts)[restore_dnode_idx],
+ aval(VAR_datanodeMasterServers)[restore_dnode_idx],
+ only_globals,
+ pgdumpall_out);
+
+ /* Start the new datanode */
+ doImmediate(host, NULL, "pg_ctl start -Z restoremode -D %s -o -i", dir);
+
+ /* Allow the new datanode to start up by sleeping for a couple of seconds */
+ pg_usleep(2000000L);
+
+ /* Restore the backup */
+ doImmediateRaw("psql -h %s -p %d -d %s -f %s", host, port, sval(VAR_defaultDatabase), pgdumpall_out);
+ doImmediateRaw("rm -f %s", pgdumpall_out);
+
+ /* Quit the new datanode */
+ doImmediate(host, NULL, "pg_ctl stop -Z restoremode -D %s", dir);
+
+ /* Start the new datanode with --datanode option */
+ AddMember(nodelist, name);
+ start_datanode_master(nodelist);
+ CleanArray(nodelist);
+
+ /* Issue CREATE NODE on coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_coordNames)[ii]))
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %s %s", aval(VAR_coordMasterServers)[ii], aval(VAR_coordPorts)[ii], sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the datanode master %s.\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "CREATE NODE %s WITH (TYPE = 'datanode', host='%s', PORT=%d);\n", name, host, port);
+ fprintf(f, "\\q\n");
+ fclose(f);
+ }
+ }
+
+ /* Issue CREATE NODE on datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeNames)[ii]))
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %s %s", aval(VAR_coordMasterServers)[0], aval(VAR_coordPorts)[0], sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator %s.\n", aval(VAR_coordNames)[0]);
+ continue;
+ }
+ if (strcmp(aval(VAR_datanodeNames)[ii], name) != 0)
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE = ''datanode'', host=''%s'', PORT=%d)';\n", aval(VAR_datanodeNames)[ii], name, host, port);
+ else
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE = ''datanode'', host=''%s'', PORT=%d)';\n", aval(VAR_datanodeNames)[ii], name, host, port);
+ fprintf(f, "\\q\n");
+ fclose(f);
+ }
+ }
+
+ /* Quit DDL lokkup session */
+ fprintf(lockf, "\\q\n");
+ fclose(lockf);
+ return 0;
+
+}
+
+
+int add_datanodeSlave(char *name, char *host, char *dir, char *archDir)
+{
+ int idx;
+ FILE *f;
+
+ /* Check if the name is valid datanode */
+ if ((idx = datanodeIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified datanodeiantor %s is not configured.\n", name);
+ return 1;
+ }
+ /* Check if the datanode slave is not configred */
+ if (isVarYes(VAR_datanodeSlave) && doesExist(VAR_datanodeSlaveServers, idx) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: Slave for the datanode %s has already been condigired.\n", name);
+ return 1;
+ }
+ /* Check if the resource does not conflict */
+ if (strcmp(dir, archDir) == 0)
+ {
+ elog(ERROR, "ERROR: working directory is the same as WAL archive directory.\n");
+ return 1;
+ }
+ /*
+ * We dont check the name conflict here because acquiring datanode index means that
+ * there's no name conflict.
+ */
+ if (checkPortConflict(host, atoi(aval(VAR_datanodePorts)[idx])))
+ {
+ elog(ERROR, "ERROR: the port %s has already been used in the host %s.\n", aval(VAR_datanodePorts)[idx], host);
+ return 1;
+ }
+ if (checkDirConflict(host, dir) || checkDirConflict(host, archDir))
+ {
+ elog(ERROR, "ERROR: directory %s or %s has already been used by other node.\n", dir, archDir);
+ return 1;
+ }
+ /* Check if the datanode master is running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) != 0)
+ {
+ elog(ERROR, "ERROR: Datanode master %s is not running.\n", name);
+ return 1;
+ }
+ /* Prepare the resources (directories) */
+ doImmediate(host, NULL, "rm -rf %s; mkdir -p %s;chmod 0700 %s", dir, dir, dir);
+ doImmediate(host, NULL, "rm -rf %s; mkdir -p %s;chmod 0700 %s", archDir, archDir, archDir);
+ /* Reconfigure the master with WAL archive */
+ /* Update the configuration and backup the configuration file */
+ if ((f = pgxc_popen_w(aval(VAR_datanodeMasterServers)[idx], "cat >> %s/postgresql.conf", aval(VAR_datanodeMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open datanodenator master's configuration file, %s/postgresql.conf",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ return 1;
+ }
+ fprintf(f,
+ "#========================================\n"
+ "# Addition for log shipping, %s\n"
+ "wal_level = hot_standby\n"
+ "archive_mode = on\n"
+ "archive_command = 'rsync %%p %s@%s:%s/%%f'\n"
+ "max_wal_senders = %d\n"
+ "# End of Addition\n",
+ timeStampString(date, MAXPATH),
+ sval(VAR_pgxcUser), host, archDir,
+ getDefaultWalSender(FALSE));
+ fclose(f);
+ /* pg_hba.conf for replication */
+ if ((f = pgxc_popen_w(aval(VAR_datanodeMasterServers)[idx], "cat >> %s/pg_hba.conf", aval(VAR_datanodeMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open datanode master's pg_hba.conf file, %s/pg_hba.conf, %s\n",
+ aval(VAR_datanodeMasterDirs)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================\n"
+ "# Additional entry by adding the slave, %s\n"
+ "host replication %s %s/32 trust\n"
+ "# End of addition ===============================\n",
+ timeStampString(date, MAXPATH),
+ sval(VAR_pgxcOwner), getIpAddress(host));
+ fclose(f);
+ /* Reconfigure pgxc_ctl configuration with the new slave */
+#if 0
+ /* Need an API to expand the array to desired size */
+ if ((extendVar(VAR_datanodeSlaveServers, idx, "none") != 0) ||
+ (extendVar(VAR_datanodeSlaveDirs, idx, "none") != 0) ||
+ (extendVar(VAR_datanodeArchLogDirs, idx, "none") != 0))
+ {
+ elog(PANIC, "PANIC: Internal error, inconsitent datanode information\n");
+ return 1;
+ }
+#endif
+ if (!isVarYes(VAR_datanodeSlave))
+ assign_sval(VAR_datanodeSlave, "y");
+ assign_arrayEl(VAR_datanodeSlaveServers, idx, host, NULL);
+ assign_arrayEl(VAR_datanodeSlaveDirs, idx, dir, NULL);
+ assign_arrayEl(VAR_datanodeArchLogDirs, idx, archDir, NULL);
+ /* Update the configuration file and backup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to datanode slave addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_datanodeSlave);
+ fprintAval(f, VAR_datanodeSlaveServers);
+ fprintAval(f, VAR_datanodeArchLogDirs);
+ fprintAval(f, VAR_datanodeSlaveDirs);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+
+ /* Restart the master */
+ /*
+ * It's not a good idea to use "restart" here because some connection from other coordinators
+ * may be alive. They are posessed by the pooler and we have to reload the pool to release them,
+ * which aborts all the transactions.
+ *
+ * Beacse we need to issue pgxc_pool_reload() at all the coordinators, we need to give up all the
+ * transactions in the whole cluster.
+ *
+ * It is much better to shutdow the target coordinator master fast because it does not affect
+ * transactions this coordinator is not involved.
+ */
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL,
+ "pg_ctl stop -Z datanode -D %s -m fast", aval(VAR_datanodeMasterDirs)[idx]);
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL,
+ "pg_ctl start -Z datanode -D %s", aval(VAR_datanodeMasterDirs)[idx]);
+ /* pg_basebackup */
+ doImmediate(host, NULL, "pg_basebackup -p %s -h %s -D %s -x",
+ aval(VAR_datanodePorts)[idx], aval(VAR_datanodeMasterServers)[idx], dir);
+ /* Update the slave configuration with hot standby and port */
+ if ((f = pgxc_popen_w(host, "cat >> %s/postgresql.conf", dir)) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open the new slave's postgresql.conf, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "hot_standby = on\n"
+ "port = %s\n"
+#ifdef XCP
+ "pooler_port = %s\n"
+#endif
+ "wal_level = minimal\n" /* WAL level --- minimal. No cascade slave so far. */
+ "archive_mode = off\n" /* No archive mode */
+ "archive_command = ''\n" /* No archive mode */
+ "max_wal_senders = 0\n" /* Minimum WAL senders */
+ "# End of Addition\n",
+#ifdef XCP
+ timeStampString(date, MAXTOKEN), aval(VAR_datanodePorts)[idx], aval(VAR_datanodePoolerPorts)[idx]);
+#else
+ timeStampString(date, MAXTOKEN), aval(VAR_datanodePorts)[idx]);
+#endif
+ fclose(f);
+ /* Update the slave recovery.conf */
+ if ((f = pgxc_popen_w(host, "cat >> %s/recovery.conf", dir)) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open the slave's recovery.conf, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to add the slave, %s\n"
+ "standby_mode = on\n"
+ "primary_conninfo = 'host = %s port = %s "
+ "user = %s application_name = %s'\n"
+ "restore_command = 'cp %s/%%f %%p'\n"
+ "archive_cleanup_command = 'pg_archivecleanup %s %%r'\n"
+ "# End of addition\n",
+ timeStampString(date, MAXTOKEN), aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx],
+ sval(VAR_pgxcOwner), aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeArchLogDirs)[idx], aval(VAR_datanodeArchLogDirs)[idx]);
+ fclose(f);
+ /* Start the slave */
+ doImmediate(host, NULL, "pg_ctl start -Z datanode -D %s", dir);
+ return 0;
+}
+
+
+/*------------------------------------------------------------------------
+ *
+ * Remove command
+ *
+ *-----------------------------------------------------------------------*/
+int remove_datanodeMaster(char *name, int clean_opt)
+{
+ /*
+ 1. Transfer the data from the datanode to be removed to the rest of the datanodes for all the tables in all the databases.
+ For example to shift data of the table rr_abc to the
+ rest of the nodes we can use command
+
+ ALTER TABLE rr_abc DELETE NODE (DATA_NODE_3);
+
+ This step is not included in remove_datanodeMaster() function.
+
+ 2. Confirm that there is no data left on the datanode to be removed.
+ For example to confirm that there is no data left on DATA_NODE_3
+
+ select c.pcrelid from pgxc_class c, pgxc_node n where
+ n.node_name = 'DATA_NODE_3' and n.oid = ANY (c.nodeoids);
+
+ This step is not included in this function either.
+
+ 3. Stop the datanode server to be removed.
+ Now any SELECTs that involve the datanode to be removed would start failing
+ and DMLs have already been blocked, so essentially the cluster would work
+ only partially.
+
+ If datanode slave is also configured, we need to remove it first.
+
+ 4. Connect to any of the coordinators.
+ In our example assuming COORD_1 is running on port 5432,
+ the following command would connect to COORD_1
+
+ psql postgres -p 5432
+
+ 5. Drop the datanode to be removed.
+ For example to drop datanode DATA_NODE_3 use command
+
+ DROP NODE DATA_NODE_3;
+
+ 6. Update the connection information cached in pool.
+
+ SELECT pgxc_pool_reload();
+
+ 7. Repeat steps 4,5 & 6 for all the coordinators in the cluster.
+ */
+
+ int idx;
+ int ii;
+ FILE *f;
+ char **namelist = NULL;
+ char date[MAXTOKEN+1];
+
+ /* Check if the datanodeinator is configured */
+ if ((idx = datanodeIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: Coordinator %s is not configured.\n", name);
+ return 1;
+ }
+ /* Check if all the other datanodeinators are running */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if ((ii != idx) && !is_none(aval(VAR_datanodeNames)[ii]) && (pingNode(aval(VAR_datanodeMasterServers)[ii], aval(VAR_datanodePorts)[ii]) != 0))
+ {
+ elog(ERROR, "ERROR: Datanode master %s is not running.\n", aval(VAR_datanodeNames)[ii]);
+ return 1;
+ }
+ }
+ /* Check if there's a slave configured */
+ if (doesExist(VAR_datanodeSlaveServers, idx) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ remove_datanodeSlave(name, clean_opt);
+#if 0
+ /* Stop the datanodeinator master if running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
+ {
+ AddMember(namelist, name);
+ stop_datanode_master(namelist, "fast");
+ CleanArray(namelist);
+ }
+ /* Cleanup the datanodeinator master resource if specified */
+ if (clean_opt)
+ doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_datanodeMasterDirs)[idx]);
+#endif
+ /* Issue "drop node" at all the other datanodeinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (doesExist(VAR_coordNames, ii) && !is_none(aval(VAR_coordNames)[ii]))
+ {
+ f = pgxc_popen_wRaw("psql -p %d -h %s %s", atoi(aval(VAR_coordPorts)[ii]), aval(VAR_coordMasterServers)[ii], sval(VAR_defaultDatabase));
+ if (f == NULL)
+ {
+ elog(ERROR, "ERROR: cannot begin psql for the coordinator master %s\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "DROP NODE %s;\n", name);
+ fprintf(f, "\\q");
+ fclose(f);
+ }
+ }
+ /* Issue DROP NODE on datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeNames)[ii]) &&
+ strcmp(aval(VAR_datanodeNames)[ii], name) != 0)
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %s %s", aval(VAR_coordMasterServers)[0], aval(VAR_coordPorts)[0], sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator %s.\n", aval(VAR_coordNames)[0]);
+ continue;
+ }
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'DROP NODE %s';\n", aval(VAR_datanodeNames)[ii], name);
+ fprintf(f, "\\q\n");
+ fclose(f);
+ }
+ }
+#if 1
+ /* Stop the datanodeinator master if running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
+ {
+ AddMember(namelist, name);
+ stop_datanode_master(namelist, "fast");
+ CleanArray(namelist);
+ }
+ /* Cleanup the datanodeinator master resource if specified */
+ if (clean_opt)
+ doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_datanodeMasterDirs)[idx]);
+#endif
+ /* Update configuration and backup --> should cleanup "none" entries here */
+ assign_arrayEl(VAR_datanodeNames, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeMasterDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodePorts, idx, "-1", "-1");
+#ifdef XCP
+ assign_arrayEl(VAR_datanodePoolerPorts, idx, "-1", "-1");
+#endif
+ assign_arrayEl(VAR_datanodeMasterServers, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeMaxWALSenders, idx, "0", "0");
+ assign_arrayEl(VAR_datanodeSlaveServers, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeSlaveDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeArchLogDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeSpecificExtraConfig, idx, "none", NULL);
+ handle_no_slaves();
+ /*
+ * Write config files
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================================\n"
+ "# pgxc configuration file updated due to coodinator master removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_datanodeSlave);
+ fprintAval(f, VAR_datanodeNames);
+ fprintAval(f, VAR_datanodeMasterDirs);
+ fprintAval(f, VAR_datanodePorts);
+#ifdef XCP
+ fprintAval(f, VAR_datanodePoolerPorts);
+#endif
+ fprintAval(f, VAR_datanodeMasterServers);
+ fprintAval(f, VAR_datanodeMaxWALSenders);
+ fprintAval(f, VAR_datanodeSlaveServers);
+ fprintAval(f, VAR_datanodeSlaveDirs);
+ fprintAval(f, VAR_datanodeArchLogDirs);
+ fprintAval(f, VAR_datanodeSpecificExtraConfig);
+ fclose(f);
+ backup_configuration();
+ return 0;
+}
+
+int remove_datanodeSlave(char *name, int clean_opt)
+{
+ int idx;
+ char **nodelist = NULL;
+ FILE *f;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return 1;
+ }
+ idx = datanodeIdx(name);
+ if (idx < 0)
+ {
+ elog(ERROR, "ERROR: datanode %s is not configured.\n", name);
+ return 1;
+ }
+ if (!doesExist(VAR_datanodeSlaveServers, idx) || is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: datanode slave %s is not configured.\n", name);
+ return 1;
+ }
+ AddMember(nodelist, name);
+ if (pingNode(aval(VAR_datanodeSlaveServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
+ stop_datanode_slave(nodelist, "immediate");
+ {
+ FILE *f;
+ if ((f = pgxc_popen_w(aval(VAR_datanodeMasterServers)[idx], "cat >> %s/postgresql.conf", aval(VAR_datanodeMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot open %s/postgresql.conf at %s, %s\n", aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterServers)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#=======================================\n"
+ "# Updated to remove the slave %s\n"
+ "archive_mode = off\n"
+ "synchronous_standby_names = ''\n"
+ "archive_command = ''\n"
+ "max_wal_senders = 0\n"
+ "wal_level = minimal\n"
+ "# End of the update\n",
+ timeStampString(date, MAXTOKEN));
+ fclose(f);
+ }
+ doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL, "pg_ctl restart -Z datanode -D %s", aval(VAR_datanodeMasterDirs)[idx]);
+
+ if (clean_opt)
+ clean_datanode_slave(nodelist);
+ /*
+ * Maintain variables
+ */
+ assign_arrayEl(VAR_datanodeSlaveServers, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeSlaveDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeArchLogDirs, idx, "none", NULL);
+ handle_no_slaves();
+ /*
+ * Maintain configuration file
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================================\n"
+ "# pgxc configuration file updated due to coodinator slave removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_datanodeSlave);
+ fprintAval(f, VAR_datanodeSlaveServers);
+ fprintAval(f, VAR_datanodeSlaveDirs);
+ fprintAval(f, VAR_datanodeArchLogDirs);
+ fclose(f);
+ backup_configuration();
+ CleanArray(nodelist);
+ return 0;
+
+}
+
+/*
+ * Clean datanode master resources -- directory and port -----------------------------
+ */
+cmd_t *prepare_cleanDatanodeMaster(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", nodeName);
+ return(NULL);
+ }
+ cmd = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+#ifdef XCP
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s; rm -f /tmp/.s.*%d*",
+ aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterDirs)[idx], atoi(aval(VAR_datanodePoolerPorts)[idx]));
+#else
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s*",
+ aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterDirs)[idx]);
+#endif
+ return(cmd);
+}
+
+int clean_datanode_master_all(void)
+{
+ elog(INFO, "Cleaning all the datanode master resources.\n");
+ return(clean_datanode_master(aval(VAR_datanodeNames)));
+}
+
+int clean_datanode_master(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int ii;
+ int rc;
+
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Cleaning datanode %s maseter resources.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanDatanodeMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Cleanup datanode slave resources -- directory and the socket ------------------
+ */
+cmd_t *prepare_cleanDatanodeSlave(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", nodeName);
+ return(NULL);
+ }
+ if (!doesExist(VAR_datanodeSlaveServers, idx) || is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ return NULL;
+ cmd = initCmd(aval(VAR_datanodeSlaveServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s",
+ aval(VAR_datanodeSlaveDirs)[idx], aval(VAR_datanodeSlaveDirs)[idx], aval(VAR_datanodeSlaveDirs)[idx]);
+ return(cmd);
+}
+
+int clean_datanode_slave_all(void)
+{
+ elog(INFO, "Cleaning all the datanode slave resouces.\n");
+ return(clean_datanode_slave(aval(VAR_datanodeNames)));
+}
+
+int clean_datanode_slave(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int ii;
+ int rc;
+
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Cleaning datanode %s slave resources.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanDatanodeSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: datanode slave %s not found.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Show configuration of datanodes -------------------------------------------------
+ */
+int show_config_datanodeMaster(int flag, int idx, char *hostname)
+{
+ int ii;
+ char outBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ outBuf[0] = 0;
+ if (flag)
+ strncat(outBuf, "Datanode Master: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(outBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (outBuf[0])
+ elog(NOTICE, "%s", outBuf);
+#ifdef XCP
+ elog(NOTICE, " Nodename: '%s', port: %s, pooler port %s\n",
+ aval(VAR_datanodeNames)[idx], aval(VAR_datanodePorts)[idx], aval(VAR_poolerPorts)[idx]);
+#else
+ elog(NOTICE, " Nodename: '%s', port: %s\n",
+ aval(VAR_datanodeNames)[idx], aval(VAR_datanodePorts)[idx]);
+#endif
+ elog(NOTICE, " MaxWALSenders: %s, Dir: '%s'\n",
+ aval(VAR_datanodeMaxWALSenders)[idx], aval(VAR_datanodeMasterDirs)[idx]);
+ elog(NOTICE, " ExtraConfig: '%s', Specific Extra Config: '%s'\n",
+ sval(VAR_datanodeExtraConfig), aval(VAR_datanodeSpecificExtraConfig)[idx]);
+ strncpy(outBuf, " pg_hba entries ( ", MAXLINE);
+ for (ii = 0; aval(VAR_datanodePgHbaEntries)[ii]; ii++)
+ {
+ snprintf(editBuf, MAXPATH, "'%s' ", aval(VAR_datanodePgHbaEntries)[ii]);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ elog(NOTICE, "%s)\n", outBuf);
+ elog(NOTICE, " Extra pg_hba: '%s', Specific Extra pg_hba: '%s'\n",
+ sval(VAR_datanodeExtraPgHba), aval(VAR_datanodeSpecificExtraPgHba)[idx]);
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_datanodeSlave(int flag, int idx, char *hostname)
+{
+ char outBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ outBuf[0] = 0;
+ if (flag)
+ strncat(outBuf, "Datanode Slave: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(outBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (outBuf[0])
+ elog(NOTICE, "%s", outBuf);
+#ifdef XCP
+ elog(NOTICE, " Nodename: '%s', port: %s, pooler port: %s\n",
+ aval(VAR_datanodeNames)[idx], aval(VAR_datanodePorts)[idx], aval(VAR_poolerPorts)[idx]);
+#else
+ elog(NOTICE, " Nodename: '%s', port: %s\n",
+ aval(VAR_datanodeNames)[idx], aval(VAR_datanodePorts)[idx]);
+#endif
+ elog(NOTICE," Dir: '%s', Archive Log Dir: '%s'\n",
+ aval(VAR_datanodeSlaveDirs)[idx], aval(VAR_datanodeArchLogDirs)[idx]);
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_datanodeMasterSlaveMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(nodeList[ii])) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode, skipping.\n", nodeList[ii]);
+ continue;
+ }
+ else
+ {
+ show_config_datanodeMaster(TRUE, idx, aval(VAR_datanodeMasterServers)[idx]);
+ if (isVarYes(VAR_datanodeSlave))
+ show_config_datanodeSlave(TRUE, idx, aval(VAR_datanodeSlaveServers)[idx]);
+ }
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_datanodeMasterMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(nodeList[ii])) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode. skipping\n", nodeList[ii]);
+ continue;
+ }
+ else
+ show_config_datanodeMaster(TRUE, idx, aval(VAR_datanodeMasterServers)[idx]);
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_datanodeSlaveMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return 1;
+ }
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(nodeList[ii])) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode, skipping.\n", nodeList[ii]);
+ continue;
+ }
+ else
+ show_config_datanodeSlave(TRUE, idx, aval(VAR_datanodeSlaveServers)[idx]);
+ }
+ unlockLogFile();
+ return(0);
+}
+
+/*
+ * Kill datanode master ---------------------------------------------------------------
+ *
+ * Normally, you should not kill masters in such a manner. It is just for
+ * emergence.
+ */
+cmd_t *prepare_killDatanodeMaster(char *nodeName)
+{
+ pid_t postmasterPid;
+ int dnIndex;
+ cmd_t *cmd = NULL;
+
+ if (is_none(nodeName))
+ return(NULL);
+ if ((dnIndex = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: \"%s\" is not a datanode name\n", nodeName);
+ return(NULL);
+ }
+ cmd = initCmd(aval(VAR_datanodeMasterServers)[dnIndex]);
+ if ((postmasterPid = get_postmaster_pid(aval(VAR_datanodeMasterServers)[dnIndex], aval(VAR_datanodeMasterDirs)[dnIndex])) > 0)
+ {
+ char *pidList = getChPidList(aval(VAR_datanodeMasterServers)[dnIndex], postmasterPid);
+
+ snprintf(newCommand(cmd), MAXLINE,
+ "kill -9 %d %s;" /* Kill the postmaster and all its children */
+ "rm -rf /tmp/.s.'*'%d'*'", /* Remove the socket */
+ postmasterPid,
+ pidList,
+ atoi(aval(VAR_datanodePorts)[dnIndex]));
+ freeAndReset(pidList);
+ }
+ else
+ snprintf(newCommand(cmd), MAXLINE,
+ "killall -u %s -9 postgres;" /* Kill the postmaster and all its children */
+ "rm -rf /tmp/.s.'*'%d'*'", /* Remove the socket */
+ sval(VAR_pgxcUser), atoi(aval(VAR_datanodePorts)[dnIndex]));
+ return(cmd);
+}
+
+int kill_datanode_master_all(void)
+{
+ return(kill_datanode_master(aval(VAR_datanodeNames)));
+}
+
+int kill_datanode_master(char **nodeList)
+{
+ int ii;
+ int rc;
+ char **actualNodeList;
+ cmdList_t *cmdList = NULL;
+ cmd_t *cmd;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((cmd = prepare_killDatanodeMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ if (cmdList)
+ {
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ }
+ else
+ rc = 0;
+ return(rc);
+}
+
+/*
+ * Kill datanode slaves -----------------------------------------------------
+ *
+ * You should not kill datanodes in such a manner. It is just for emergence.
+ * You should try to stop it gracefully.
+ */
+cmd_t *prepare_killDatanodeSlave(char *nodeName)
+{
+ pid_t postmasterPid;
+ int dnIndex;
+ cmd_t *cmd;
+
+ if (is_none(nodeName))
+ return(NULL);
+ if ((dnIndex = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: \"%s\" is not a datanode name, skipping.\n", nodeName);
+ return(NULL);
+ }
+ if (!doesExist(VAR_datanodeSlaveServers, dnIndex) || is_none(aval(VAR_datanodeSlaveServers)[dnIndex]))
+ {
+ elog(WARNING, "WARNING: datanode slave %s is not found.\n", nodeName);
+ return NULL;
+ }
+ cmd = initCmd(aval(VAR_datanodeSlaveServers)[dnIndex]);
+ postmasterPid = get_postmaster_pid(aval(VAR_datanodeSlaveServers)[dnIndex], aval(VAR_datanodeSlaveDirs)[dnIndex]);
+ if (postmasterPid == -1)
+ {
+ /* No postmaster pid found */
+ elog(WARNING, "WARNING: pid for datanode slave \"%s\" slave was not found. Remove socket only.\n", nodeName);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf /tmp/.s.'*'%s'*'", /* Remove the socket */
+ aval(VAR_datanodePorts)[dnIndex]);
+ }
+ else
+ {
+ char *pidList = getChPidList(aval(VAR_datanodeSlaveServers)[dnIndex], postmasterPid);
+
+ snprintf(newCommand(cmd), MAXLINE,
+ "kill -9 %d %s;" /* Kill the postmaster and all its children */
+ "rm -rf /tmp/.s.'*'%d'*'", /* Remove the socket */
+ postmasterPid,
+ pidList,
+ atoi(aval(VAR_datanodePorts)[dnIndex]));
+ freeAndReset(pidList);
+ }
+ return(cmd);
+}
+
+int kill_datanode_slave_all(void)
+{
+ return(kill_datanode_slave(aval(VAR_datanodeNames)));
+}
+
+int kill_datanode_slave(char **nodeList)
+{
+ int ii;
+ int rc;
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: Datanode slave is not configured.\n");
+ return 1;
+ }
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((cmd = prepare_killDatanodeSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+}
+
+/*
+ * Checks if all the coordinators are running
+ *
+ * Returns FALSE if any of them are not running.
+ */
+int check_AllDatanodeRunning(void)
+{
+ int ii;
+
+ for (ii = 0; aval(VAR_datanodeMasterServers)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeMasterServers)[ii]))
+ if (pingNode(aval(VAR_datanodeMasterServers)[ii], aval(VAR_datanodePorts)[ii]) != 0)
+ return FALSE;
+ }
+ return TRUE;
+}
+
+
diff --git a/contrib/pgxc_ctl/datanode_cmd.h b/contrib/pgxc_ctl/datanode_cmd.h
new file mode 100644
index 0000000000..d8e58dfe36
--- /dev/null
+++ b/contrib/pgxc_ctl/datanode_cmd.h
@@ -0,0 +1,71 @@
+/*-------------------------------------------------------------------------
+ *
+ * datanode_cmd.h
+ *
+ * Datanode command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef DATANODE_CMD_H
+#define DATANODE_CMD_H
+
+#include "utils.h"
+
+extern int init_datanode_master(char **nodeList);
+extern int init_datanode_master_all(void);
+extern int init_datanode_slave(char **nodeList);
+extern int init_datanode_slave_all(void);
+extern cmd_t *prepare_initDatanodeMaster(char *nodeName);
+extern cmd_t *prepare_initDatanodeSlave(char *nodeName);
+
+
+extern int start_datanode_master(char **nodeList);
+extern int start_datanode_master_all(void);
+extern int start_datanode_slave(char **nodeList);
+extern int start_datanode_slave_all(void);
+extern cmd_t *prepare_startDatanodeMaster(char *nodeName);
+extern cmd_t *prepare_startDatanodeSlave(char *nodeName);
+
+extern int stop_datanode_master(char **nodeList, char *immediate);
+extern int stop_datanode_master_all(char *immediate);
+extern int stop_datanode_slave(char **nodeList, char *immediate);
+extern int stop_datanode_slave_all(char *immediate);
+extern cmd_t *prepare_stopDatanodeSlave(char *nodeName, char *immediate);
+extern cmd_t *prepare_stopDatanodeMaster(char *nodeName, char *immediate);
+
+extern int failover_datanode(char **nodeList);
+
+extern int kill_datanode_master(char **nodeList);
+extern int kill_datanode_master_all(void);
+extern int kill_datanode_slave(char **nodeList);
+extern int kill_datanode_slave_all(void);
+extern cmd_t *prepare_killDatanodeMaster(char *nodeName);
+extern cmd_t *prepare_killDatanodeSlave(char *nodeName);
+
+extern int clean_datanode_master(char **nodeList);
+extern int clean_datanode_master_all(void);
+extern int clean_datanode_slave(char **nodeList);
+extern int clean_datanode_slave_all(void);
+extern cmd_t *prepare_cleanDatanodeMaster(char *nodeName);
+extern cmd_t *prepare_cleanDatanodeSlave(char *nodeName);
+
+#ifdef XCP
+extern int add_datanodeMaster(char *name, char *host, int port, int pooler, char *dir, char *restore_dname);
+#else
+extern int add_datanodeMaster(char *name, char *host, int port, char *dir, char *restore_dname);
+#endif
+extern int add_datanodeSlave(char *name, char *host, char *dir, char *archDir);
+extern int remove_datanodeMaster(char *name, int clean_opt);
+extern int remove_datanodeSlave(char *name, int clean_opt);
+
+extern int show_config_datanodeMasterSlaveMulti(char **nodeList);
+extern int show_config_datanodeMasterMulti(char **nodeList);
+extern int show_config_datanodeSlaveMulti(char **nodeList);
+extern int show_config_datanodeMaster(int flag, int idx, char *hostname);
+extern int show_config_datanodeSlave(int flag, int idx, char *hostname);
+
+extern int check_AllDatanodeRunning(void);
+
+#endif /* DATANODE_CMD_H */
diff --git a/contrib/pgxc_ctl/do_command.c b/contrib/pgxc_ctl/do_command.c
new file mode 100644
index 0000000000..d7382de8d5
--- /dev/null
+++ b/contrib/pgxc_ctl/do_command.c
@@ -0,0 +1,2493 @@
+/*-------------------------------------------------------------------------
+ *
+ * do_command.c
+ *
+ * Main command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This file provides a frontend module to pgxc_ctl operation.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "do_command.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+#include "gtm_cmd.h"
+#include "coord_cmd.h"
+#include "datanode_cmd.h"
+#include "gtm_util.h"
+#include "monitor.h"
+
+extern char *pgxc_ctl_conf_prototype[];
+
+#define Exit(c) exit(myWEXITSTATUS(c))
+#define GetToken() (line = get_word(line, &token))
+#define TestToken(word) ((token != NULL) && (strcasecmp(token, word) == 0))
+#define testToken(word) ((token != NULL) && (strcmp(token, word) == 0))
+
+static void kill_something(char *token);
+static void do_deploy(char *line);
+static void deploy_xc(char **hostlist);
+static void show_config_something(char *nodeName);
+static void show_config_something_multi(char **nodeList);
+extern void show_config_hostList(char **hostList);
+static void show_config_host(char *hostname);
+static void show_basicConfig(void);
+static void show_config_servers(char **hostList);
+static void do_clean_command(char *line);
+static void do_start_command(char *line);
+static void start_all(void);
+static void do_stop_command(char *line);
+static void stop_all(char *immediate);
+static int show_Resource(char *datanodeName, char *databasename, char *username);
+
+static void do_echo_command(char * line)
+{
+ printf("do_echo_command\n");
+}
+
+static void do_prepareConfFile(char *Path)
+{
+ char *path = NULL;
+ FILE *conf;
+ int ii;
+
+
+ if (Path)
+ path = Path;
+ else
+ {
+ if (find_var(VAR_configFile) && sval(VAR_configFile))
+ path = sval(VAR_configFile);
+ else
+ {
+ elog(ERROR, "ERROR: Configuration file path was not specified.\n");
+ return;
+ }
+ }
+ conf = fopen(path, "w");
+ if (conf == NULL)
+ {
+ elog(ERROR, "ERROR: Could not open the configuration file \"%s\", %s.\n", path, strerror(errno));
+ return;
+ }
+ for (ii = 0; pgxc_ctl_conf_prototype[ii]; ii++)
+ {
+ fprintf(conf, "%s\n", pgxc_ctl_conf_prototype[ii]);
+ }
+ fclose(conf);
+ return;
+}
+
+/*
+ * Deploy pgxc binaries
+ */
+
+static void do_deploy(char *line)
+{
+ char *token;
+ char **hostlist = NULL;
+
+ if (GetToken() == NULL)
+ {
+ elog(ERROR, "ERROR: Please specify option for deploy command.\n");
+ return;
+ }
+ if (TestToken("all"))
+ {
+#ifdef XCP
+ elog(NOTICE, "Deploying Postgres-XL components to all the target servers.\n");
+#else
+ elog(NOTICE, "Deploying Postgres-XC materials to all the target servers.\n");
+#endif
+ deploy_xc(aval(VAR_allServers));
+ }
+ else
+ {
+#ifdef XCP
+ elog(NOTICE, "Deploying Postgres-XL components.\n");
+#else
+ elog(NOTICE, "Deploying Postgres-XC materials.\n");
+#endif
+ /*
+ * Please note that the following code does not check if the specified nost
+ * appears in the configuration file.
+ * We should deploy xc binary to the target not in the current configuraiton
+ * to add gtm slave, gtm_proxy, coordinator/datanode master/slave online.
+ */
+ do {
+ AddMember(hostlist, token);
+ } while(GetToken());
+ deploy_xc(hostlist);
+ CleanArray(hostlist);
+ }
+}
+
+static void deploy_xc(char **hostlist)
+{
+ char tarFile[MAXPATH+1];
+ cmdList_t *cmdList;
+ int ii;
+
+ /* Build tarball --> need to do foreground */
+ elog(NOTICE, "Prepare tarball to deploy ... \n");
+ snprintf(tarFile, MAXPATH, "%d.tgz", getpid());
+ doImmediate(NULL, NULL, "tar czCf %s %s/%s bin include lib share",
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_localTmpDir), tarFile);
+
+ /* Backgroud jobs */
+
+ cmdList = initCmdList();
+ /* Build install dir */
+ for (ii = 0; hostlist[ii]; ii++)
+ {
+ cmd_t *cmd;
+ cmd_t *cmdScp;
+ cmd_t *cmdTarExtract;
+
+ elog(NOTICE, "Deploying to the server %s.\n", hostlist[ii]);
+ /* Build target directory */
+ addCmd(cmdList, (cmd = initCmd(hostlist[ii])));
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s/bin %s/include %s/lib %s/share; mkdir -p %s",
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_pgxcInstallDir));
+ /* SCP tarball */
+ appendCmdEl(cmd, (cmdScp = initCmd(NULL)));
+ snprintf(newCommand(cmdScp), MAXLINE,
+ "scp %s/%s %s@%s:%s",
+ sval(VAR_localTmpDir), tarFile, sval(VAR_pgxcUser), hostlist[ii], sval(VAR_tmpDir));
+ /* Extract Tarball and remove it */
+ appendCmdEl(cmd, (cmdTarExtract = initCmd(hostlist[ii])));
+ snprintf(newCommand(cmdTarExtract), MAXLINE,
+ "tar xzCf %s %s/%s; rm %s/%s",
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_tmpDir), tarFile,
+ sval(VAR_tmpDir), tarFile);
+ }
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ doImmediate(NULL, NULL, "rm -f %s/%s",
+ sval(VAR_tmpDir), tarFile);
+ elog(NOTICE, "Deployment done.\n");
+}
+
+static void do_set(char *line)
+{
+
+ char *token;
+ char *varname;
+ pgxc_ctl_var *var;
+
+ if (GetToken() == NULL)
+ {
+ elog(ERROR, "ERROR: No variable name was given\n");
+ return;
+ }
+ varname = Strdup(token);
+ var = confirm_var(varname);
+ reset_value(var);
+ while(GetToken())
+ {
+ add_val(var, token);
+ }
+ print_var(varname);
+ log_var(varname);
+ return;
+}
+
+/*
+ * Failover command ... failover gtm
+ * failover coordinator nodename
+ * failover datanode nodename
+ * failover nodename
+ */
+static void do_failover_command(char *line)
+{
+ char *token;
+ int idx;
+
+ if (GetToken() == NULL)
+ {
+ elog(ERROR, "ERROR: Please specify failover command option.\n");
+ return;
+ }
+ else if (TestToken("gtm"))
+ {
+ if (isVarYes(VAR_gtmSlave) && !is_none(sval(VAR_gtmSlaveServer)))
+ failover_gtm();
+ else
+ elog(ERROR, "ERROR: no gtm slave is configured.\n");
+ return;
+ }
+ else if (TestToken("coordinator"))
+ {
+ if (!isVarYes(VAR_coordSlave))
+ elog(ERROR, "ERROR: coordinator slave is not configured.\n");
+ else if (!GetToken())
+ elog(ERROR, "ERROR: please specify failover coordinator command option.\n");
+ else
+ {
+ char **nodeList = NULL;
+
+ do
+ {
+ if ((idx = coordIdx(token)) < 0)
+ elog(ERROR, "ERROR: %s is not a coordinator\n", token);
+ else if (is_none(aval(VAR_coordSlaveServers)[idx]))
+ elog(ERROR, "ERROR: slave for the coordinator %s is not configured.\n", token);
+ else
+ AddMember(nodeList, token);
+ } while(GetToken());
+ if (nodeList)
+ failover_coordinator(nodeList);
+ CleanArray(nodeList);
+ }
+ return;
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!isVarYes(VAR_datanodeSlave))
+ elog(ERROR, "ERROR: datanode slave is not configired.\n");
+ else if (!GetToken())
+ elog(ERROR, "ERROR: please specify failover datanode command option.\n");
+ else
+ {
+ char **nodeList = NULL;
+
+ do
+ {
+ if ((idx = datanodeIdx(token)) < 0)
+ elog(ERROR, "ERROR: %s is not a datanode.\n", token);
+ else if (is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ elog(ERROR, "ERROR: slave for the datanode %s is not configured,\n", token);
+ else
+ AddMember(nodeList, token);
+ } while(GetToken());
+ if (nodeList)
+ failover_datanode(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: invalid failover command option %s.\n", token);
+}
+
+/*
+ * Reconnect command ... reconnect gtm_proxy [all | nodename ... ]
+ */
+static void do_reconnect_command(char *line)
+{
+ char *token;
+
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specifiy option to reconnect command.\n");
+ else if (TestToken("gtm_proxy"))
+ {
+ if (!isVarYes(VAR_gtmProxy))
+ elog(ERROR, "ERROR: gtm proxy is not configured.\n");
+ else if ((GetToken() == NULL) || TestToken("all"))
+ reconnect_gtm_proxy_all();
+ else
+ {
+ char **nodeList = NULL;
+ int idx;
+ do
+ {
+ if ((idx = gtmProxyIdx(token)) < 0)
+ elog(ERROR, "ERROR: %s is not gtm_proxy.\n", token);
+ else
+ AddMember(nodeList, token);
+ } while(GetToken());
+ if (nodeList)
+ reconnect_gtm_proxy(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: invalid option %s for reconnect command.\n", token);
+ return;
+}
+
+
+
+/*
+ * Kill command ... kill nodename, kill all,
+ * kill gtm [master|slave|all],
+ * kill gtm_proxy [nodename|all] ...
+ * kill coordinator [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ * kill datanode [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ */
+static void do_kill_command(char *line)
+{
+ char *token;
+
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specifiy option to kill command\n");
+ else if (TestToken("gtm"))
+ {
+ if ((GetToken() == NULL) || TestToken("all"))
+ {
+ kill_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ kill_gtm_slave();
+ }
+ else if (TestToken("master"))
+ kill_gtm_master();
+ else if (TestToken("slave"))
+ {
+ if (isVarYes(VAR_gtmSlave))
+ kill_gtm_slave();
+ else
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ }
+ else
+ elog(ERROR, "ERROR: input value \"%s\" is invalid.\n", token);
+ return;
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify additonal option to kill gtm_proxies\n");
+ else if (TestToken("all"))
+ kill_gtm_proxy(aval(VAR_gtmProxyNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char*));
+ do {
+ AddMember(nodeList, token);
+ } while(GetToken());
+ kill_gtm_proxy(nodeList);
+ clean_array(nodeList);
+ }
+ return;
+ }
+ else if (TestToken("coordinator"))
+ {
+ if ((GetToken() == NULL) || TestToken("all"))
+ {
+ kill_coordinator_master(aval(VAR_coordNames));
+ if (isVarYes(VAR_coordSlave))
+ kill_coordinator_slave(aval(VAR_coordNames));
+ }
+ if (TestToken("master"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ kill_coordinator_master(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_coordinator_master(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ kill_coordinator_slave(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ kill_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ return;
+ }
+ else if (TestToken("datanode"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ {
+ kill_datanode_master(aval(VAR_datanodeNames));
+ if (isVarYes(VAR_datanodeSlave))
+ kill_datanode_slave(aval(VAR_coordNames));
+ }
+ else if (TestToken("master"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ kill_datanode_master(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do{
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_datanode_master(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ kill_datanode_slave(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_datanode_slave(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ kill_datanode_slave(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("all"))
+ {
+ if(isVarYes(VAR_datanodeSlave))
+ kill_datanode_slave(aval(VAR_datanodeNames));
+ kill_datanode_master(aval(VAR_datanodeNames));
+ if (isVarYes(VAR_coordSlave))
+ kill_coordinator_slave(aval(VAR_coordNames));
+ kill_coordinator_master(aval(VAR_coordNames));
+ if (isVarYes(VAR_gtmProxy))
+ kill_gtm_proxy(aval(VAR_gtmProxyNames));
+ if (isVarYes(VAR_gtmSlave))
+ kill_gtm_slave();
+ kill_gtm_master();
+ }
+ else
+ {
+ do {
+ kill_something(token);
+ } while (GetToken());
+ }
+ return;
+}
+
+
+static void init_all(void)
+{
+ init_gtm_master();
+ start_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ {
+ init_gtm_slave();
+ start_gtm_slave();
+ }
+ if (isVarYes(VAR_gtmProxy))
+ {
+ init_gtm_proxy_all();
+ start_gtm_proxy_all();
+ }
+ init_coordinator_master_all();
+ start_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ {
+ init_coordinator_slave_all();
+ start_coordinator_slave_all();
+ }
+ init_datanode_master_all();
+ start_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ init_datanode_slave_all();
+ start_datanode_slave_all();
+ }
+ configure_nodes_all();
+}
+
+
+/*
+ * Init command ... init all
+ * init gtm [master|slave|all],
+ * init gtm_proxy [all| nodename ...]
+ * init coordinator [all | master [all | nodename ... ]| slave [all | nodename ... ]| nodename ... ]
+ * init datanode [all | master [all | nodename ...] | slave [all | nodename ... ] | nodename ... ]
+ */
+static void do_init_command(char *line)
+{
+ char *token;
+
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify option to init command.\n");
+ else if (TestToken("all"))
+ init_all();
+ else if (TestToken("gtm"))
+ {
+ if (!GetToken() || (TestToken("all")))
+ {
+ init_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ init_gtm_slave();
+ }
+ else if (TestToken("master"))
+ init_gtm_master();
+ else if (TestToken("slave"))
+ init_gtm_slave();
+ else
+ elog(ERROR, "ERROR: please specify master, slave or all for init gtm command.\n");
+ }
+ else if (TestToken("gtm_proxy"))
+ if (!GetToken() || TestToken("all"))
+ init_gtm_proxy_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while(GetToken());
+ init_gtm_proxy(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("coordinator"))
+ if (!GetToken() || TestToken("all"))
+ {
+ init_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ init_coordinator_slave_all();
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ init_coordinator_master_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while(GetToken());
+ init_coordinator_master(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ init_coordinator_slave_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while(GetToken());
+ init_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ init_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ init_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("datanode"))
+ if (!GetToken() || TestToken("all"))
+ {
+ init_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ init_datanode_slave_all();
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ init_datanode_master_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ init_datanode_master(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ init_datanode_slave_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ init_datanode_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ init_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ init_datanode_slave(nodeList);
+ }
+ else
+ elog(ERROR, "ERROR: invalid option for init command.\n");
+ return;
+}
+
+/*
+ * Start command ... start nodename, start all,
+ * start gtm [master|slave|all],
+ * start gtm_proxy [nodename|all] ...
+ * start coordinator [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ * start datanode [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ */
+static void start_all(void)
+{
+ start_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ start_gtm_slave();
+ if (isVarYes(VAR_gtmProxy))
+ start_gtm_proxy_all();
+ start_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ start_coordinator_slave_all();
+ start_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ start_datanode_slave_all();
+}
+
+static void do_start_command(char *line)
+{
+ char *token;
+
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify option to start command.\n");
+ else if (TestToken("all"))
+ start_all();
+ else if (TestToken("gtm"))
+ {
+ if (!GetToken() || (TestToken("all")))
+ {
+ start_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ start_gtm_slave();
+ }
+ else if (TestToken("master"))
+ start_gtm_master();
+ else if (TestToken("slave"))
+ start_gtm_slave();
+ else
+ elog(ERROR, "ERROR: please specify master, slave or all for start gtm command.\n");
+ }
+ else if (TestToken("gtm_proxy"))
+ if (!GetToken() || TestToken("all"))
+ start_gtm_proxy_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ start_gtm_proxy(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("coordinator"))
+ if (!GetToken() || TestToken("all"))
+ {
+ start_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ start_coordinator_slave_all();
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ start_coordinator_master_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_coordinator_master(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ start_coordinator_slave_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ start_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("datanode"))
+ if (!GetToken() || TestToken("all"))
+ {
+ start_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ start_datanode_slave_all();
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ start_datanode_master_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_datanode_master(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ start_datanode_slave_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_datanode_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ start_datanode_slave(nodeList);
+ }
+ else
+ elog(ERROR, "ERROR: invalid option for start command.\n");
+ return;
+}
+
+/*
+ * Stop command ... stop nodename, start all,
+ * stop gtm [master|slave|all],
+ * stop gtm_proxy [nodename|all] ...
+ * stop coordinator [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ * stop datanode [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ *
+ * Can insert -m immediate option at any place.
+ */
+static void stop_all(char *immediate)
+{
+ if (isVarYes(VAR_coordSlave))
+ stop_coordinator_slave_all(immediate);
+ stop_coordinator_master_all(immediate);
+ if (isVarYes(VAR_datanodeSlave))
+ stop_datanode_slave_all(immediate);
+ stop_datanode_master_all(immediate);
+ if (isVarYes(VAR_gtmProxy))
+ stop_gtm_proxy_all();
+ if (isVarYes(VAR_gtmSlave))
+ stop_gtm_slave();
+ stop_gtm_master();
+}
+
+
+#define GetAndSet(var, msg) do{if(!GetToken()){elog(ERROR, msg); return;} var=Strdup(token);}while(0)
+/*
+ * Add command
+ */
+static void do_add_command(char *line)
+{
+ char *token;
+ char *name;
+ char *host;
+ char *port;
+ char *pooler;
+ char *dir;
+ char *archDir;
+ char *dnode;
+
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: Specify options for add command.\n");
+ return;
+ }
+ if (TestToken("gtm"))
+ {
+ /*
+ * add gtm slave name host port dir
+ */
+
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: Specify option for add gtm command.\n");
+ return;
+ }
+ if (!TestToken("slave"))
+ {
+ elog(ERROR, "ERROR: you can specify only slave to add gtm command. %s is invalid.\n", token);
+ return;
+ }
+ GetAndSet(name, "ERROR: please specify the name of gtm slave\n");
+ GetAndSet(host, "ERROR: please specify the host name for gtm slave\n");
+ GetAndSet(port, "ERROR: please specify the port number for gtm slave\n");
+ GetAndSet(dir, "ERROR: please specify the working director for gtm slave\n");
+ add_gtmSlave(name, host, atoi(port), dir);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+ freeAndReset(dir);
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ /*
+ * Add gtm_proxy name host port dir
+ */
+ GetAndSet(name, "ERROR: please specify the name of gtm_proxy\n");
+ GetAndSet(host, "ERROR: please specify the host name for gtm_proxy\n");
+ GetAndSet(port, "ERROR: please specify the port number for gtm_proxy\n");
+ GetAndSet(dir, "ERROR: please specify the working director for gtm_proxy\n");
+ add_gtmProxy(name, host, atoi(port), dir);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+ freeAndReset(dir);
+ }
+ else if (TestToken("coordinator"))
+ {
+ /*
+ * Add coordinator master name host port pooler dir
+ * Add coordinator slave name host dir
+ */
+ if (!GetToken() || (!TestToken("master") && !TestToken("slave")))
+ {
+ elog(ERROR, "ERROR: please speify master or slave.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of the coordinator master\n");
+ GetAndSet(host, "ERROR: please specify the host for the coordinator masetr\n");
+ GetAndSet(port, "ERROR: please specify the port number for the coordinator master\n");
+ GetAndSet(pooler, "ERROR: please specify the pooler port number for the coordinator master.\n");
+ GetAndSet(dir, "ERROR: please specify the working director for the coordinator master\n");
+ add_coordinatorMaster(name, host, atoi(port), atoi(pooler), dir);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+ freeAndReset(pooler);
+ freeAndReset(dir);
+ }
+ else
+ {
+ GetAndSet(name, "ERROR: please specify the name of the coordinator slave\n");
+ GetAndSet(host, "ERROR: please specify the host for the coordinator slave\n");
+ GetAndSet(dir, "ERROR: please specify the working director for coordinator slave\n");
+ GetAndSet(archDir, "ERROR: please specify WAL archive directory for coordinator slave\n");
+ add_coordinatorSlave(name, host, dir, archDir);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(dir);
+ }
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!GetToken() || (!TestToken("master") && !TestToken("slave")))
+ {
+ elog(ERROR, "ERROR: please speify master or slave.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of the datanode master\n");
+ GetAndSet(host, "ERROR: please specify the host for the datanode masetr\n");
+ GetAndSet(port, "ERROR: please specify the port number for the datanode master\n");
+#ifdef XCP
+ GetAndSet(pooler, "ERROR: please specify the pooler port number for the datanode master.\n");
+#endif
+ GetAndSet(dir, "ERROR: please specify the working director for the datanode master\n");
+ GetAndSet(dnode, "ERROR: please specify name of existing datanode of which this will be a copy of. Specify 'none' for a bare datanode\n");
+#ifdef XCP
+ add_datanodeMaster(name, host, atoi(port), atoi(pooler), dir, dnode);
+#else
+ add_datanodeMaster(name, host, atoi(port), dir, dnode);
+#endif
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+#ifdef XCP
+ freeAndReset(pooler);
+#endif
+ freeAndReset(dir);
+ }
+ else
+ {
+ GetAndSet(name, "ERROR: please specify the name of the datanode slave\n");
+ GetAndSet(host, "ERROR: please specify the host for the datanode slave\n");
+ GetAndSet(dir, "ERROR: please specify the working director for datanode slave\n");
+ GetAndSet(archDir, "ERROR: please specify WAL archive directory for datanode slave\n");
+ add_datanodeSlave(name, host, dir, archDir);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(dir);
+ }
+ }
+ return;
+}
+
+static void do_remove_command(char *line)
+{
+ char *token;
+ char *name;
+ bool clean_opt = FALSE;
+
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: Please specify gtm, gtm_master, coordinator or datanode after add command.\n");
+ return;
+ }
+ if (TestToken("gtm"))
+ {
+ if (!GetToken() || !TestToken("slave"))
+ {
+ elog(ERROR, "ERROR: Please speciy slave to add gtm command\n");
+ return;
+ }
+ if (GetToken() && TestToken("clean"))
+ clean_opt = TRUE;
+ remove_gtmSlave(clean_opt);
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ GetAndSet(name, "ERROR: please specify gtm proxy name to remove.\n");
+ if (TestToken("clean"))
+ {
+ clean_opt = TRUE;
+ freeAndReset(name);
+ GetAndSet(name, "ERROR: please specify gtm proxy name to remove.\n");
+ }
+ remove_gtmProxy(name, clean_opt );
+ freeAndReset(name);
+ }
+ else if (TestToken("coordinator"))
+ {
+ if (!GetToken() || (!TestToken("master") && !TestToken("slave")))
+ {
+ elog(ERROR, "ERROR: please speify master or slave.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of the coordinator master\n");
+ if (TestToken("clean"))
+ {
+ clean_opt = TRUE;
+ freeAndReset(name);
+ GetAndSet(name, "ERROR: please specify the name of the coordinator master\n");
+ }
+ remove_coordinatorMaster(name, clean_opt);
+ freeAndReset(name);
+ }
+ else
+ {
+ GetAndSet(name, "ERROR: please specify the name of the coordinator slave\n");
+ if (TestToken("clean"))
+ {
+ clean_opt = TRUE;
+ freeAndReset(name);
+ GetAndSet(name, "ERROR: please specify the name of the coordinator master\n");
+ }
+ remove_coordinatorSlave(name, clean_opt);
+ freeAndReset(name);
+ }
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!GetToken() || (!TestToken("master") && !TestToken("slave")))
+ {
+ elog(ERROR, "ERROR: please speify master or slave.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of the datanode master\n");
+ if (TestToken("clean"))
+ {
+ clean_opt = TRUE;
+ freeAndReset(name);
+ GetAndSet(name, "ERROR: please specify the name of the coordinator master\n");
+ }
+ remove_datanodeMaster(name, clean_opt);
+ freeAndReset(name);
+ }
+ else
+ {
+ GetAndSet(name, "ERROR: please specify the name of the datanode slave\n");
+ if (TestToken("clean"))
+ {
+ clean_opt = TRUE;
+ freeAndReset(name);
+ GetAndSet(name, "ERROR: please specify the name of the coordinator master\n");
+ }
+ remove_datanodeSlave(name, clean_opt);
+ freeAndReset(name);
+ }
+ }
+ else
+ elog(ERROR, "ERROR:Add command argument %s is invalid.\n", token);
+ return;
+}
+
+
+
+
+
+
+
+
+static char *m_Option;
+
+static char *handle_m_option(char *line, char **m_option)
+{
+ char *token;
+
+ freeAndReset(m_Option);
+ if (GetToken() == NULL)
+ return(line);
+ else if (TestToken("immediate"))
+ m_Option = Strdup("immediate");
+ else if (TestToken("fast"))
+ m_Option = Strdup("fast");
+ else if (TestToken("smart"))
+ m_Option = Strdup("smart");
+ else
+ elog(ERROR, "ERROR: specify smart, fast or immediate for -m option value.\n");
+ return(line);
+}
+
+
+
+static void do_stop_command(char *line)
+{
+ char *token;
+
+ freeAndReset(m_Option);
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify option to stop command.\n");
+ else if (testToken("-m"))
+ {
+ line = handle_m_option(line, &m_Option);
+ GetToken();
+ }
+ if (TestToken("all"))
+ {
+ if (GetToken() && TestToken("-m"))
+ handle_m_option(line, &m_Option);
+ stop_all(m_Option);
+ }
+ else if (TestToken("gtm"))
+ {
+ if (m_Option)
+ elog(WARNING, "-m option is not available with gtm. Ignoring.\n");
+ if (!GetToken() || (TestToken("all")))
+ {
+ stop_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ stop_gtm_slave();
+ }
+ else if (TestToken("master"))
+ stop_gtm_master();
+ else if (TestToken("slave"))
+ stop_gtm_slave();
+ else
+ elog(ERROR, "ERROR: please specify master, slave or all for stop gtm command.\n");
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ if (m_Option)
+ elog(WARNING, "-m option is not available with gtm_prxy. Ignoring.\n");
+ if (!GetToken() || TestToken("all"))
+ stop_gtm_proxy_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ stop_gtm_proxy(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("coordinator"))
+ if (!GetToken() || TestToken("all"))
+ {
+ stop_coordinator_master_all(m_Option);
+ if (isVarYes(VAR_coordSlave))
+ stop_coordinator_slave_all(m_Option);
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ stop_coordinator_master_all(m_Option);
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_coordinator_master(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ stop_coordinator_slave_all(m_Option);
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_coordinator_slave(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_coordinator_master(nodeList, m_Option);
+ if (isVarYes(VAR_coordSlave))
+ stop_coordinator_slave(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else if (TestToken("datanode"))
+ if (!GetToken() || TestToken("all"))
+ {
+ stop_datanode_master_all(m_Option);
+ if (isVarYes(VAR_datanodeSlave))
+ stop_datanode_slave_all(m_Option);
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ stop_datanode_master_all(m_Option);
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_datanode_master(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ stop_datanode_slave_all(m_Option);
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_datanode_slave(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_datanode_master(nodeList, m_Option);
+ if (isVarYes(VAR_datanodeSlave))
+ stop_datanode_slave(nodeList, m_Option);
+ }
+ else
+ elog(ERROR, "ERROR: invalid option for stop command.\n");
+ return;
+}
+
+/*
+ * Test staff
+ */
+static void do_test(char *line)
+{
+ char *token;
+ int logLevel;
+ int printLevel;
+
+ logLevel = setLogMsgLevel(DEBUG3);
+ printLevel = setPrintMsgLevel(DEBUG3);
+
+ GetToken();
+ if (TestToken("ssh"))
+ {
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ GetToken();
+ cmdList = initCmdList();
+ cmd = Malloc0(sizeof(cmd_t));
+ cmd->host = Strdup(token);
+ cmd->command = Strdup(line);
+ cmd->localStdin = NULL;
+ addCmd(cmdList, cmd);
+ elog(INFO, "INFO: Testing ssh %s \"%s\"\n", token, line);
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ }
+ else if (TestToken("ssh-stdin"))
+ {
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ cmdList = initCmdList();
+ cmd = Malloc0(sizeof(cmd_t));
+ GetToken();
+ cmd->host = Strdup(token);
+ GetToken();
+ cmd->localStdin = Strdup(token);
+ cmd->command = Strdup(line);
+ addCmd(cmdList, cmd);
+ elog(INFO, "Testing ssh %s \"%s\" < %s\n", cmd->host, cmd->command, cmd->localStdin);
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ }
+ else if (TestToken("local"))
+ {
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ cmdList = initCmdList();
+ addCmd(cmdList, (cmd = initCmd(NULL)));
+ cmd->command = Strdup(line);
+ elog(INFO, "Testing local, \"%s\"\n", cmd->command);
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ }
+ else if (TestToken("local-stdin"))
+ {
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ cmdList = initCmdList();
+ addCmd(cmdList, (cmd = initCmd(NULL)));
+ GetToken();
+ cmd->localStdin = Strdup(token);
+ cmd->command = Strdup(line);
+ elog(INFO, "Testing local-stdin, \"%s\"\n", cmd->command);
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ }
+ setLogMsgLevel(logLevel);
+ setPrintMsgLevel(printLevel);
+}
+
+
+/* ==================================================================
+ *
+ * Staff specified by "node name", not node type
+ *
+ * ==================================================================
+ */
+static void kill_something(char *nodeName)
+{
+ char *nodeList[2];
+
+ nodeList[1] = NULL;
+ switch(getNodeType(nodeName))
+ {
+ case NodeType_UNDEF:
+ elog(ERROR, "ERROR: Could not find name \"%s\" in any node type.\n", nodeName);
+ return;
+ case NodeType_GTM:
+ elog(ERROR, "ERROR: Issue kill gtm command to kill gtm master/slave\n");
+ return;
+ case NodeType_GTM_PROXY:
+ nodeList[0] = nodeName;
+ kill_gtm_proxy(nodeList);
+ return;
+ case NodeType_COORDINATOR:
+ nodeList[0] = nodeName;
+ kill_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ kill_coordinator_slave(nodeList);
+ return;
+ case NodeType_DATANODE:
+ nodeList[0] = nodeName;
+ kill_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ kill_datanode_slave(nodeList);
+ return;
+ default:
+ elog(ERROR, "ERROR: internal error. Should not come here!\n");
+ return;
+ }
+}
+
+static void show_config_something_multi(char **nodeList)
+{
+ int ii;
+
+ for (ii = 0; nodeList[ii]; ii++)
+ show_config_something(nodeList[ii]);
+}
+
+static void show_config_something(char *nodeName)
+{
+ int idx;
+
+ switch(getNodeType(nodeName))
+ {
+ case NodeType_UNDEF:
+ elog(ERROR, "ERROR: Could not find name \"%s\" in any node type.\n", nodeName);
+ return;
+ case NodeType_GTM:
+ show_config_gtmMaster(TRUE, sval(VAR_gtmMasterServer));
+ if (isVarYes(VAR_gtmSlave))
+ show_config_gtmSlave(TRUE, sval(VAR_gtmSlaveServer));
+ return;
+ case NodeType_GTM_PROXY:
+ idx = gtmProxyIdx(nodeName);
+ show_config_gtmProxy(TRUE, idx, aval(VAR_gtmProxyServers)[idx]);
+ return;
+ case NodeType_COORDINATOR:
+ idx = coordIdx(nodeName);
+ show_config_coordMaster(TRUE, idx, aval(VAR_coordMasterServers)[idx]);
+ if (isVarYes(VAR_coordSlave))
+ show_config_coordSlave(TRUE, idx, aval(VAR_coordSlaveServers)[idx]);
+ return;
+ case NodeType_DATANODE:
+ idx = datanodeIdx(nodeName);
+ show_config_datanodeMaster(TRUE, idx, aval(VAR_datanodeMasterServers)[idx]);
+ if (isVarYes(VAR_datanodeSlave))
+ show_config_datanodeSlave(TRUE, idx, aval(VAR_datanodeSlaveServers)[idx]);
+ return;
+ case NodeType_SERVER:
+ {
+ char *hostList[2];
+ hostList[0] = nodeName;
+ hostList[1] = NULL;
+ show_config_servers(hostList);
+ return;
+ }
+ default:
+ elog(ERROR, "ERROR: internal error. Should not come here!\n");
+ return;
+ }
+}
+
+
+
+/* ========================================================================================
+ *
+ * Configuration staff
+ *
+ * ========================================================================================
+ */
+static void show_config_servers(char **hostList)
+{
+ int ii;
+ for (ii = 0; hostList[ii]; ii++)
+ if (!is_none(hostList[ii]))
+ show_config_host(hostList[ii]);
+ return;
+}
+
+/*
+ * show {config|configuration} [all | name .... | gtm [master|slave|all] | gtm_proxy [all | name ...] |
+ * coordinator [all | master | slave | name ... ] |
+ * host name .... ]
+ * With no option, will print common configuartion parameters and exit.
+ *
+ */
+static void show_basicConfig(void)
+{
+#ifdef XCP
+ elog(NOTICE, "========= Postgres-XL configuration Common Info ========================\n");
+ elog(NOTICE, "=== Overall ===\n");
+ elog(NOTICE, "Postgres-XL owner: %s\n", sval(VAR_pgxcOwner));
+ elog(NOTICE, "Postgres-XL user: %s\n", sval(VAR_pgxcUser));
+ elog(NOTICE, "Postgres-XL install directory: %s\n", sval(VAR_pgxcInstallDir));
+ elog(NOTICE, "pgxc_ctl home: %s\n", pgxc_ctl_home);
+ elog(NOTICE, "pgxc_ctl configuration file: %s\n", pgxc_ctl_config_path);
+ elog(NOTICE, "pgxc_ctl tmpDir: %s\n", sval(VAR_tmpDir));
+ elog(NOTICE, "pgxc_ctl localTempDir: %s\n", sval(VAR_localTmpDir));
+ elog(NOTICE, "pgxc_ctl log file: %s\n", logFileName);
+ elog(NOTICE, "pgxc_ctl configBackup: %s\n", isVarYes(VAR_configBackup) ? "y" : "n");
+ elog(NOTICE, "pgxc_ctl configBackupHost: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupHost) : "none");
+ elog(NOTICE, "pgxc_ctl configBackupFile: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupFile) : "none");
+ elog(NOTICE, "========= Postgres-XL configuration End Common Info ===================\n");
+#else
+ elog(NOTICE, "========= Postgres-XC configuration Common Info ========================\n");
+ elog(NOTICE, "=== Overall ===\n");
+ elog(NOTICE, "Postgres-XC owner: %s\n", sval(VAR_pgxcOwner));
+ elog(NOTICE, "Postgres-XC user: %s\n", sval(VAR_pgxcUser));
+ elog(NOTICE, "Postgres-XC install directory: %s\n", sval(VAR_pgxcInstallDir));
+ elog(NOTICE, "pgxc_ctl home: %s\n", pgxc_ctl_home);
+ elog(NOTICE, "pgxc_ctl configuration file: %s\n", pgxc_ctl_config_path);
+ elog(NOTICE, "pgxc_ctl tmpDir: %s\n", sval(VAR_tmpDir));
+ elog(NOTICE, "pgxc_ctl localTempDir: %s\n", sval(VAR_localTmpDir));
+ elog(NOTICE, "pgxc_ctl log file: %s\n", logFileName);
+ elog(NOTICE, "pgxc_ctl configBackup: %s\n", isVarYes(VAR_configBackup) ? "y" : "n");
+ elog(NOTICE, "pgxc_ctl configBackupHost: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupHost) : "none");
+ elog(NOTICE, "pgxc_ctl configBackupFile: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupFile) : "none");
+ elog(NOTICE, "========= Postgres-XC configuration End Common Info ===================\n");
+#endif
+}
+
+
+static void show_configuration(char *line)
+{
+ char *token;
+
+ GetToken();
+ if (line == NULL)
+ elog(ERROR, "ERROR: No configuration option is specified. Retruning.\n");
+ else if (TestToken("basic"))
+ show_basicConfig();
+ else if (TestToken("all"))
+ {
+ show_basicConfig();
+ show_config_servers(aval(VAR_allServers));
+ }
+ else if (TestToken("basic"))
+ {
+ show_basicConfig();
+ }
+ else if (TestToken("host"))
+ {
+ char **hostList = Malloc0(sizeof(char *));
+ do {
+ AddMember(hostList, token);
+ } while(GetToken());
+ if (hostList[0])
+ show_config_servers(hostList);
+ clean_array(hostList);
+ }
+ else if (TestToken("gtm"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ {
+ show_config_gtmMaster(TRUE, sval(VAR_gtmMasterServer));
+ if (isVarYes(VAR_gtmSlave))
+ show_config_gtmSlave(TRUE, sval(VAR_gtmSlaveServer));
+ }
+ else if (TestToken("master"))
+ show_config_gtmMaster(TRUE, sval(VAR_gtmMasterServer));
+ else if (TestToken("slave"))
+ {
+ if (isVarYes(VAR_gtmSlave))
+ show_config_gtmSlave(TRUE, sval(VAR_gtmSlaveServer));
+ else
+ elog(NOTICE, "NOTICE: gtm slave is not configured.\n");
+ }
+ else
+ elog(ERROR, "ERROR: invalid option %s for 'show config gtm' command.\n", token);
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: gtm proxies are not configured.\n");
+ }
+ else if ((GetToken() == NULL) || (TestToken("all")))
+ show_config_gtmProxies(aval(VAR_gtmProxyNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do{
+ int idx;
+ idx = gtmProxyIdx(token);
+ if (idx < 0)
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy.\n", token);
+ else
+ AddMember(nodeList, token);
+ } while(GetToken());
+ show_config_gtmProxies(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("coordinator"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ show_config_coordMasterSlaveMulti(aval(VAR_coordNames));
+ else if (TestToken("master"))
+ {
+ if (GetToken() == NULL)
+ show_config_coordMasterMulti(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_coordMasterMulti(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if (!isVarYes(VAR_coordSlave))
+ elog(ERROR, "ERROR: Coordinator slave is not configured.\n");
+ else if (GetToken() == NULL)
+ show_config_coordMasterMulti(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_coordMasterMulti(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: Invalid option %s for 'show config coordinator' command.\n", token);
+ }
+ else if (TestToken("datanode"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ show_config_datanodeMasterSlaveMulti(aval(VAR_datanodeNames));
+ else if (TestToken("master"))
+ {
+ if (GetToken() == NULL)
+ show_config_datanodeMasterMulti(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_datanodeMasterMulti(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if (!isVarYes(VAR_datanodeSlave))
+ elog(ERROR, "ERROR: Datanode slave is not configured.\n");
+ else if (GetToken() == NULL)
+ show_config_datanodeMasterMulti(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_datanodeMasterMulti(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: Invalid option %s for 'show config datanode' command.\n", token);
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_something_multi(nodeList);
+ clean_array(nodeList);
+ }
+ return;
+}
+
+void print_simple_node_info(char *nodeName, char *port, char *dir,
+ char *extraConfig, char *specificExtraConfig)
+{
+ elog(NOTICE,
+ " Nodename: '%s', port: %s, dir: '%s'"
+ " ExtraConfig: '%s', Specific Extra Config: '%s'\n",
+ nodeName, port, dir, extraConfig, specificExtraConfig);
+
+}
+
+
+static void show_config_host(char *hostname)
+{
+ int ii;
+
+ lockLogFile();
+ elog(NOTICE, "====== Server: %s =======\n", hostname);
+ /* GTM Master */
+ if (strcmp(hostname, sval(VAR_gtmMasterServer)) == 0)
+ show_config_gtmMaster(TRUE, NULL);
+ /* GTM Slave */
+ if (isVarYes(VAR_gtmSlave) && (strcmp(sval(VAR_gtmSlaveServer), hostname) == 0))
+ show_config_gtmSlave(TRUE, NULL);
+ /* GTM Proxy */
+ if (isVarYes(VAR_gtmProxy))
+ for (ii = 0; aval(VAR_gtmProxyServers)[ii]; ii++)
+ if (strcmp(aval(VAR_gtmProxyServers)[ii], hostname) == 0)
+ show_config_gtmProxy(TRUE, ii, NULL);
+ /* Coordinator Master */
+ for (ii = 0; aval(VAR_coordMasterServers)[ii]; ii++)
+ if (strcmp(aval(VAR_coordMasterServers)[ii], hostname) == 0)
+ show_config_coordMaster(TRUE, ii, NULL);
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ for (ii = 0; aval(VAR_coordSlaveServers)[ii]; ii++)
+ if (strcmp(aval(VAR_coordSlaveServers)[ii], hostname) == 0)
+ show_config_coordSlave(TRUE, ii, NULL);
+ /* Datanode Master */
+ for (ii = 0; aval(VAR_datanodeMasterServers)[ii]; ii++)
+ if (strcmp(aval(VAR_datanodeMasterServers)[ii], hostname) == 0)
+ show_config_datanodeMaster(TRUE, ii, NULL);
+ /* Datanode Slave */
+ if (isVarYes(VAR_datanodeSlave))
+ for (ii = 0; aval(VAR_datanodeSlaveServers)[ii]; ii++)
+ if (strcmp(aval(VAR_datanodeSlaveServers)[ii], hostname) == 0)
+ show_config_datanodeSlave(TRUE, ii, NULL);
+ unlockLogFile();
+}
+
+void show_config_hostList(char **hostList)
+{
+ int ii;
+ for (ii = 0; hostList[ii]; ii++)
+ show_config_host(hostList[ii]);
+}
+/*
+ * Clean command
+ *
+ * clean {all |
+ * gtm [ all | master | slave ] |
+ * gtm_proxy [ all | nodename ... ]
+ * coordinator [[all | master | slave ] [nodename ... ]] |
+ * datanode [ [all | master | slave] [nodename ... ]}
+ */
+static void do_clean_command(char *line)
+{
+ char *token;
+ cmdList_t *cmdList = NULL;
+
+ GetToken();
+ if (token == NULL)
+ {
+ elog(ERROR, "ERROR: Please specify options for clean command.\n");
+ return;
+ }
+ if (TestToken("all"))
+ {
+ elog(INFO, "Cleaning all the directories and sockets.\n");
+ clean_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ clean_gtm_slave();
+ if (isVarYes(VAR_gtmProxy))
+ clean_gtm_proxy_all();
+ clean_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ clean_coordinator_slave_all();
+ clean_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ clean_datanode_slave_all();
+ }
+ else if (TestToken("gtm"))
+ {
+ GetToken();
+ if ((token == NULL) || TestToken("all"))
+ {
+ elog(INFO, "Cleaning GTM slave/master directories and sockets.\n");
+ clean_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ clean_gtm_slave();
+ }
+ else if (TestToken("master"))
+ {
+ clean_gtm_master();
+ }
+ else if (TestToken("slave"))
+ {
+ if (isVarYes(VAR_gtmSlave))
+ clean_gtm_slave();
+ else
+ elog(ERROR, "ERROR: gtm slave is not configured.\n");
+ }
+ else
+ elog(ERROR, "ERROR: invalid clean command option %s.\n", token);
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ elog(INFO, "Cleaning specified gtm_proxy.\n");
+ GetToken();
+ if (!isVarYes(VAR_gtmProxy))
+ elog(ERROR, "ERROR: gtm proxy is not configured.\n");
+ else if ((token == NULL) || TestToken("all"))
+ clean_gtm_proxy_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ clean_gtm_proxy(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("coordinator"))
+ {
+ GetToken();
+ if (token == NULL)
+ {
+ elog(INFO, "Clearing coordinator master and slave.\n");
+ clean_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ clean_coordinator_slave_all();
+ }
+ else if (TestToken("all"))
+ {
+ elog(INFO, "Clearing coordinator master and slave.\n");
+ GetToken();
+ if (token == NULL)
+ {
+ clean_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ clean_coordinator_slave_all();
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ clean_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ clean_coordinator_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("master"))
+ {
+ elog(INFO, "Cleaning specified coordinator master.\n");
+ GetToken();
+ if (token == NULL)
+ clean_coordinator_master_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ clean_coordinator_master(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ elog(INFO, "Cleaning specified coordinator slave.\n");
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slave is not configured.\n");
+ return;
+ }
+ GetToken();
+ if (token == NULL)
+ clean_coordinator_slave_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ clean_coordinator_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList = NULL;
+ elog(INFO, "Cleaning specified coordinator.\n");
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ clean_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ clean_coordinator_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if(TestToken("datanode"))
+ {
+ GetToken();
+ if (token == NULL)
+ {
+ elog(INFO, "Cleaning all the datanodes.\n");
+ clean_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ clean_datanode_slave_all();
+ }
+ else if (TestToken("all"))
+ {
+ GetToken();
+ if (token == NULL)
+ {
+ elog(INFO, "Cleaning all the datanodes.\n");
+ clean_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ clean_datanode_slave_all();
+ }
+ else
+ {
+ char **nodeList = NULL;
+ elog(INFO, "Cleaning specified datanodes\n");
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ clean_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ clean_datanode_slave(nodeList);
+ }
+ }
+ else if (TestToken("master"))
+ {
+ GetToken();
+ if (token == NULL)
+ {
+ elog(INFO, "Cleaning all the datanode masters.\n");
+ clean_datanode_master_all();
+ }
+ else
+ {
+ char **nodeList = NULL;
+ elog(INFO, "Cleaning specified datanode masters.\n");
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ clean_datanode_master(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ elog(INFO, "Cleaning specified datanode slaves.\n");
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: Datanode slave is not configured.\n");
+ return;
+ }
+ GetToken();
+ if (token == NULL)
+ clean_datanode_slave_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ clean_datanode_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ clean_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ clean_datanode_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ elog(INFO, "Cleaning specifieid nodes.\n");
+ do
+ {
+ switch(getNodeType(token))
+ {
+ case NodeType_UNDEF:
+ elog(ERROR, "ERROR: %s is not found, skipping\n", token);
+ continue;
+ case NodeType_GTM:
+ elog(INFO, "Cleaning GTM.\n");
+ if (cmdList == NULL)
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_cleanGtmMaster());
+ if (isVarYes(VAR_gtmSlave))
+ addCmd(cmdList, prepare_cleanGtmSlave());
+ continue;
+ case NodeType_GTM_PROXY:
+ elog(INFO, "Cleaning GTM proxy %s.\n", token);
+ if (cmdList == NULL)
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_cleanGtmProxy(token));
+ continue;
+ case NodeType_COORDINATOR:
+ elog(INFO, "Cleaning coordinator %s\n", token);
+ if (cmdList == NULL)
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_cleanCoordinatorMaster(token));
+ if (isVarYes(VAR_coordSlave))
+ addCmd(cmdList, prepare_cleanCoordinatorSlave(token));
+ continue;
+ case NodeType_DATANODE:
+ elog(INFO, "Cleaning datanode %s\n", token);
+ if (cmdList == NULL)
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_cleanDatanodeMaster(token));
+ if (isVarYes(VAR_coordSlave))
+ addCmd(cmdList, prepare_cleanDatanodeSlave(token));
+ continue;
+ case NodeType_SERVER:
+ elog(ERROR, "ERROR: clearing host is not supported yet. Skipping\n");
+ continue;
+ default:
+ elog(ERROR, "ERROR: internal error.\n");
+ continue;
+ }
+ } while(GetToken());
+ if (cmdList)
+ {
+ int rc;
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(INFO, "Done.\n");
+ }
+ return;
+ }
+}
+
+static void do_configure_command(char *line)
+{
+ char *token;
+ char **nodeList = NULL;
+
+ if (!GetToken() || TestToken("all"))
+ {
+ configure_nodes(aval(VAR_coordNames));
+ }
+ else
+ {
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ configure_nodes(nodeList);
+ CleanArray(nodeList);
+ }
+}
+
+static int selectCoordinator(void)
+{
+ int sz = arraySizeName(VAR_coordNames);
+ int i;
+
+ for (;;)
+ {
+ i = rand() % sz;
+ if (is_none(aval(VAR_coordMasterServers)[i]))
+ continue;
+ else
+ return i;
+ }
+ return -1;
+}
+
+
+static int show_Resource(char *datanodeName, char *databasename, char *username)
+{
+ int cdIdx = selectCoordinator();
+ int dnIdx = datanodeIdx(datanodeName);
+ FILE *f;
+ char queryFname[MAXPATH+1];
+
+ elog(NOTICE, "NOTICE: showing tables in the datanode '%s', database %s, user %s\n",
+ datanodeName,
+ databasename ? databasename : "NULL",
+ username ? username : "NULL");
+ if (dnIdx < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode.\n", datanodeName);
+ return 1;
+ }
+ createLocalFileName(GENERAL, queryFname, MAXPATH);
+ if ((f = fopen(queryFname, "w")) == NULL)
+ {
+ elog(ERROR, "ERROR: Could not create temporary file %s, %s\n", queryFname, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "SELECT pg_class.relname relation,\n"
+ " CASE\n"
+ " WHEN pclocatortype = 'H' THEN 'Hash'\n"
+ " WHEN pclocatortype = 'M' THEN 'Modulo'\n"
+ " WHEN pclocatortype = 'N' THEN 'Round Robin'\n"
+ " WHEN pclocatortype = 'R' THEN 'Replicate'\n"
+ " ELSE 'Unknown'\n"
+ " END AS distribution,\n"
+ " pg_attribute.attname attname,\n"
+ " pgxc_node.node_name nodename\n"
+ " FROM pg_class, pgxc_class, pg_attribute, pgxc_node\n"
+ " WHERE pg_class.oid = pgxc_class.pcrelid\n"
+ " and pg_class.oid = pg_attribute.attrelid\n"
+ " and pgxc_class.pcattnum = pg_attribute.attnum\n"
+ " and pgxc_node.node_name = '%s'\n"
+ " and pgxc_node.oid = ANY (pgxc_class.nodeoids)\n"
+ " UNION\n"
+ " SELECT pg_class.relname relation,\n"
+ " CASE\n"
+ " WHEN pclocatortype = 'H' THEN 'Hash'\n"
+ " WHEN pclocatortype = 'M' THEN 'Modulo'\n"
+ " WHEN pclocatortype = 'N' THEN 'Round Robin'\n"
+ " WHEN pclocatortype = 'R' THEN 'Replicate'\n"
+ " ELSE 'Unknown'\n"
+ " END AS distribution,\n"
+ " '- none -' attname,\n"
+ " pgxc_node.node_name nodename\n"
+ " FROM pg_class, pgxc_class, pg_attribute, pgxc_node\n"
+ " WHERE pg_class.oid = pgxc_class.pcrelid\n"
+ " and pg_class.oid = pg_attribute.attrelid\n"
+ " and pgxc_class.pcattnum = 0\n"
+ " and pgxc_node.node_name = '%s'\n"
+ " and pgxc_node.oid = ANY (pgxc_class.nodeoids)\n"
+ " ;\n",
+ datanodeName, datanodeName);
+ fclose(f);
+ if (databasename == NULL)
+ doImmediateRaw("psql -p %d -h %s --quiet -f %s",
+ atoi(aval(VAR_coordPorts)[cdIdx]), aval(VAR_coordMasterServers)[cdIdx],
+ queryFname);
+ else if (username == NULL)
+ doImmediateRaw("psql -p %d -h %s --quiet -f %s -d %s",
+ atoi(aval(VAR_coordPorts)[cdIdx]), aval(VAR_coordMasterServers)[cdIdx],
+ queryFname, databasename);
+ else
+ doImmediateRaw("psql -p %d -h %s --quiet -f %s -d %s -U %s",
+ atoi(aval(VAR_coordPorts)[cdIdx]), aval(VAR_coordMasterServers)[cdIdx],
+ queryFname, databasename, username);
+ doImmediateRaw("rm -f %s", queryFname);
+ return 0;
+}
+
+/*
+ * =======================================================================================
+ *
+ * Loop of main command processor
+ *
+ * ======================================================================================
+ */
+void do_command(FILE *inf, FILE *outf)
+{
+ int istty = ((inf == stdin) && isatty(fileno(stdin)));
+ int interactive = ((inf == stdin) && (outf == stdout));
+ char *wkline = NULL;
+ char buf[MAXLINE+1];
+ int rc;
+
+ for (;;)
+ {
+ if (wkline)
+ free(wkline);
+ if (istty)
+ {
+ wkline = readline(sval(VAR_xc_prompt));
+ if (wkline == NULL)
+ {
+ wkline = Strdup("q\n");
+ putchar('\n');
+ }
+ else
+ add_history(wkline);
+ strncpy(buf, wkline, MAXLINE);
+ }
+ else
+ {
+ if (interactive)
+ fputs(sval(VAR_xc_prompt), stdout);
+ if (fgets(buf, MAXLINE+1, inf) == NULL)
+ break;
+ }
+ trimNl(buf);
+ writeLogOnly("PGXC %s\n", buf);
+ rc = do_singleLine(buf, wkline);
+ freeAndReset(wkline);
+ if (rc) /* "q" command was found */
+ return;
+ }
+}
+
+
+
+/*
+ * ---------------------------------------------------------------------------
+ *
+ * Single line command processor
+ *
+ * -----------------------------------------------------------------------------
+ */
+int do_singleLine(char *buf, char *wkline)
+{
+ char *token;
+ char *line = buf;
+ GetToken();
+ /*
+ * Parsecommand
+ */
+ if (!token) return 0;
+ if (TestToken("q") || TestToken("quit") || TestToken("exit"))
+ /* Exit command */
+ return 1;
+ else if (TestToken("echo"))
+ {
+ do_echo_command(line);
+ return 0;
+ }
+ else if (TestToken("deploy"))
+ {
+ do_deploy(line);
+ return 0;
+ }
+ else if (TestToken("prepare"))
+ {
+ if (GetToken() == NULL)
+ do_prepareConfFile(NULL);
+ if (!TestToken("config"))
+ do_prepareConfFile(token);
+ else if (GetToken() == NULL)
+ do_prepareConfFile(NULL);
+ else
+ do_prepareConfFile(token);
+ return 0;
+ }
+ else if (TestToken("kill"))
+ {
+ do_kill_command(line);
+ return 0;
+ }
+ else if (TestToken("init"))
+ {
+ do_init_command(line);
+ return 0;
+ }
+ else if (TestToken("start"))
+ {
+ do_start_command(line);
+ return 0;
+ }
+ else if (TestToken("stop"))
+ {
+ do_stop_command(line);
+ return 0;
+ }
+ else if (TestToken("monitor"))
+ {
+ do_monitor_command(line);
+ return 0;
+ }
+ else if (TestToken("failover"))
+ {
+ do_failover_command(line);
+ return 0;
+ }
+ else if (TestToken("reconnect"))
+ {
+ do_reconnect_command(line);
+ return 0;
+ }
+ else if (TestToken("add"))
+ {
+ do_add_command(line);
+ return 0;
+ }
+ else if (TestToken("remove"))
+ {
+ do_remove_command(line);
+ return 0;
+ }
+ /*
+ * Show commnand ... show [variable | var] varname ...
+ * show [variable | var] all
+ * show config[uration] ....
+ */
+ else if (TestToken("show"))
+ {
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify what to show\n");
+ else
+ {
+ if (TestToken("variable") || TestToken("var"))
+ {
+ /* Variable */
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify variable name to print\n");
+ else if (TestToken("all"))
+ print_vars();
+ else while (line)
+ {
+ print_var(token);
+ GetToken();
+ }
+ }
+ else if (TestToken("configuration") || TestToken("config") || TestToken("configure"))
+ /* Configuration */
+ show_configuration(line);
+ else if (TestToken("resource"))
+ {
+ if ((GetToken() == NULL) || !TestToken("datanode"))
+ elog(ERROR, "ERROR: please specify datanode for show resource command.\n");
+ else
+ {
+ char *datanodeName = NULL;
+ char *dbname = NULL;
+ char *username = NULL;
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: please specify datanode name\n");
+ else
+ {
+ datanodeName = Strdup(token);
+ if (GetToken())
+ {
+ dbname = Strdup(token);
+ if (GetToken())
+ username = Strdup(token);
+ }
+ show_Resource(datanodeName, dbname, username);
+ Free(datanodeName);
+ Free(dbname);
+ Free(username);
+ }
+ }
+ }
+ else
+ elog(ERROR, "ERROR: Cannot show %s now, sorry.\n", token);
+ }
+ return 0;
+ }
+ /*
+ * Log command log variable varname ...
+ * log variable all
+ * log msg artitrary_message_to_the_end_of_the_line
+ */
+ else if (TestToken("log"))
+ {
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify what to log\n");
+ else
+ {
+ if (TestToken("variable") || TestToken("var"))
+ {
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify variable name to log\n");
+ else if (TestToken("all"))
+ print_vars();
+ else while (line)
+ {
+ print_var(token);
+ GetToken();
+ }
+ fflush(logFile);
+ }
+ else if (TestToken("msg") || TestToken("message"))
+ writeLogOnly("USERLOG: \"%s\"\n", line);
+ else
+ elog(ERROR, "ERROR: Cannot log %s in this version.\n", token);
+ }
+ return 0;
+ }
+ else if (TestToken("deploy"))
+ {
+ do_deploy(line);
+ return 0;
+ }
+ else if (TestToken("configure"))
+ {
+ do_configure_command(line);
+ return 0;
+ }
+ else if (testToken("Psql"))
+ {
+ int idx;
+ char *cmdLine;
+
+ cmdLine = Strdup(line);
+ if (GetToken() && TestToken("-"))
+ {
+ if (!GetToken())
+ elog(ERROR, "ERROR: Please specify coordinator name after '-'.\n");
+ else if ((idx = coordIdx(token)) < 0)
+ elog(ERROR, "ERROR: Specified node %s is not a coordinator.\n", token);
+ else
+ doImmediateRaw("psql -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ line);
+ }
+ else
+ {
+ idx = selectCoordinator();
+ elog(INFO, "Selected %s.\n", aval(VAR_coordNames)[idx]);
+ doImmediateRaw("psql -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ cmdLine);
+ }
+ Free(cmdLine);
+ return 0;
+ }
+ else if (testToken("Createdb"))
+ {
+ int idx;
+ char *cmdLine;
+
+ cmdLine = Strdup(line);
+ if (GetToken() && TestToken("-"))
+ {
+ if (!GetToken())
+ elog(ERROR, "ERROR: Please specify coordinator name after '-'.\n");
+ else if ((idx = coordIdx(token)) < 0)
+ elog(ERROR, "ERROR: Specified node %s is not a coordinator.\n", token);
+ else
+ doImmediateRaw("createdb -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ line);
+ }
+ else
+ {
+ idx = selectCoordinator();
+ elog(INFO, "Selected %s.\n", aval(VAR_coordNames)[idx]);
+ doImmediateRaw("createdb -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ cmdLine);
+ }
+ Free(cmdLine);
+ return 0;
+ }
+ else if (testToken("Createuser"))
+ {
+ int idx;
+ char *cmdLine;
+
+ cmdLine = Strdup(line);
+ if (GetToken() && TestToken("-"))
+ {
+ if (!GetToken())
+ elog(ERROR, "ERROR: Please specify coordinator name after '-'.\n");
+ else if ((idx = coordIdx(token)) < 0)
+ elog(ERROR, "ERROR: Specified node %s is not a coordinator.\n", token);
+ else
+ doImmediateRaw("createuser -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ line);
+ }
+ else
+ {
+ idx = selectCoordinator();
+ elog(INFO, "Selected %s.\n", aval(VAR_coordNames)[idx]);
+ doImmediateRaw("createuser -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ cmdLine);
+ }
+ Free(cmdLine);
+ return 0;
+ }
+ else if (TestToken("unregister"))
+ {
+ /*
+ * unregiseter [-n myname] -Z nodetype nodename
+ */
+ unregisterFromGtm(line);
+ return 0;
+ }
+ else if (TestToken("test"))
+ {
+ do_test(line);
+ return 0;
+ }
+ else if (TestToken("set"))
+ {
+ do_set(line);
+ return 0;
+ }
+ /*
+ * Clean command
+ *
+ * clean [all |
+ * gtm [ all | master | slave ] |
+ * gtm_proxy [ all | nodename ... ]
+ * coordinator [[all | master | slave ] [nodename ... ]] |
+ * datanode [ [all | master | slave] [nodename ... ]
+ */
+ else if (TestToken("clean"))
+ {
+ do_clean_command(line);
+ }
+ else if (TestToken("cd"))
+ {
+ /*
+ * CD command
+ */
+ if (GetToken() == NULL)
+ Chdir(pgxc_ctl_home, FALSE);
+ else
+ Chdir(token, FALSE);
+ return 0;
+ }
+ else if (TestToken("ssh"))
+ {
+ doImmediateRaw("%s", wkline);
+ }
+ else
+ {
+ doImmediateRaw("%s", wkline);
+ return 0;
+ }
+ return 0;
+}
+
+
+
+
+
diff --git a/contrib/pgxc_ctl/do_command.h b/contrib/pgxc_ctl/do_command.h
new file mode 100644
index 0000000000..57c5bb3513
--- /dev/null
+++ b/contrib/pgxc_ctl/do_command.h
@@ -0,0 +1,16 @@
+/*-------------------------------------------------------------------------
+ *
+ * do_command.h
+ *
+ * Main command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef DO_COMMAND_H
+#define DO_COMMAND_H
+
+extern void do_command(FILE *inf, FILE *outf);
+extern int do_singleLine(char *buf, char *wkline);
+#endif /* DO_COMMAND_H */
diff --git a/contrib/pgxc_ctl/do_shell.c b/contrib/pgxc_ctl/do_shell.c
new file mode 100644
index 0000000000..65c6fa68f8
--- /dev/null
+++ b/contrib/pgxc_ctl/do_shell.c
@@ -0,0 +1,729 @@
+/*-------------------------------------------------------------------------
+ *
+ * do_shell.c
+ *
+ * Shell control module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/*
+ * This module provides a basic infrastructure to run various shell script.
+ *
+ * Basically, for a single operation, when more than one server are involved,
+ * they can be run in parallel. Within each parallel execution, we can have
+ * more than one command to be run in series.
+ *
+ * cmdList_t contains more than one command trains can be done in parallel.
+ * cmd_t will be contained in cmdList_t structure which represents a train
+ * of shell script.
+ *
+ * For each command, stdout will be handled automatically in this module.
+ * Stdin can be provided by callers.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+
+typedef unsigned int xc_status;
+static int file_sn = 0;
+static int nextSize(int size);
+static char *getCleanHostname(char *buf, int len);
+#if 0
+static void waitTypeReturn(void);
+static void echoPid(pid_t pid);
+#endif
+static char *allocActualCmd(cmd_t *cmd);
+static void prepareStdout(cmdList_t *cmdList);
+
+/*
+ * SIGINT handler
+ */
+jmp_buf *whereToJumpDoShell = NULL;
+jmp_buf dcJmpBufDoShell;
+pqsigfunc old_HandlerDoShell = NULL;
+void do_shell_SigHandler(int signum);
+
+/*
+ * Signal handler (SIGINT only)
+ */
+void do_shell_SigHandler(int signum)
+{
+ if (whereToJumpDoShell)
+ longjmp(*whereToJumpDoShell, 1);
+ else
+ signal(SIGINT,do_shell_SigHandler);
+}
+
+/*
+ * Stdout/stderr/stdin will be created at $LocalTmpDir.
+ *
+ */
+char *createLocalFileName(FileType type, char *buf, int len)
+{
+ /*
+ * Filename is $LocalTmpDir/type_pid_serno.
+ */
+ switch (type)
+ {
+ case STDIN:
+ snprintf(buf, len-1, "%s/STDIN_%d_%d", sval(VAR_localTmpDir), getpid(), file_sn++);
+ break;
+ case STDOUT:
+ snprintf(buf, len-1, "%s/STDOUT_%d_%d", sval(VAR_localTmpDir), getpid(), file_sn++);
+ break;
+ case STDERR:
+ snprintf(buf, len-1, "%s/STDERR_%d_%d", sval(VAR_localTmpDir), getpid(), file_sn++);
+ break;
+ case GENERAL:
+ snprintf(buf, len-1, "%s/GENERAL_%d_%d", sval(VAR_localTmpDir), getpid(), file_sn++);
+ default:
+ return NULL;
+ }
+ return buf;
+}
+
+/*
+ * Please note that remote stdout is not in pgxc_ctl so far. It will directly be written
+ * to local stdout.
+ */
+char *createRemoteFileName(FileType type, char *buf, int len)
+{
+ char hostname[MAXPATH+1];
+ /*
+ * Filename is $TmpDir/hostname_type_serno.
+ */
+ getCleanHostname(hostname, MAXPATH);
+ switch (type)
+ {
+ case STDIN:
+ snprintf(buf, len-1, "%s/%s_STDIN_%d_%d", sval(VAR_tmpDir), hostname, getpid(), file_sn++);
+ break;
+ case STDOUT:
+ snprintf(buf, len-1, "%s/%s_STDOUT_%d_%d", sval(VAR_tmpDir), hostname, getpid(), file_sn++);
+ break;
+ case STDERR:
+ snprintf(buf, len-1, "%s/%s_STDERR_%d_%d", sval(VAR_tmpDir), hostname, getpid(), file_sn++);
+ break;
+ case GENERAL:
+ snprintf(buf, len-1, "%s/%s_GENERAL_%d_%d", sval(VAR_tmpDir), hostname, getpid(), file_sn++);
+ break;
+ default:
+ return NULL;
+ }
+ return buf;
+}
+
+/*
+ * ==============================================================================================
+ *
+ * Tools to run a command foreground.
+ *
+ * ==============================================================================================
+ */
+/*
+ * Run any command foreground locally. No more redirection.
+ * Return value same as system();
+ * Stdout will be set to outF. The content will also be written to log if specified.
+ * If stdIn is NULL or stdiIn[0] == 0, then stdin will not be used.
+ * If host == NULL or host[0] == 0, then the command will be run locally.
+ */
+
+/* Does not handle stdin/stdout. If needed, they should be included in the cmd. */
+int doImmediateRaw(const char *cmd_fmt, ...)
+{
+ char actualCmd[MAXLINE+1];
+ va_list arg;
+ va_start(arg, cmd_fmt);
+ vsnprintf(actualCmd, MAXLINE, cmd_fmt, arg);
+ va_end(arg);
+ return(system(actualCmd));
+}
+
+FILE *pgxc_popen_wRaw(const char *cmd_fmt, ...)
+{
+ va_list arg;
+ char actualCmd[MAXLINE+1];
+
+ va_start(arg, cmd_fmt);
+ vsnprintf(actualCmd, MAXLINE, cmd_fmt, arg);
+ va_end(arg);
+ return(popen(actualCmd, "w"));
+}
+
+FILE *pgxc_popen_w(char *host, const char *cmd_fmt, ...)
+{
+ FILE *f;
+ va_list arg;
+ char actualCmd[MAXLINE+1];
+ char sshCmd[MAXLINE+1];
+
+ va_start(arg, cmd_fmt);
+ vsnprintf(actualCmd, MAXLINE, cmd_fmt, arg);
+ va_end(arg);
+ snprintf(sshCmd, MAXLINE, "ssh %s@%s \" %s \"", sval(VAR_pgxcUser), host, actualCmd);
+ if ((f = popen(sshCmd, "w")) == NULL)
+ elog(ERROR, "ERROR: could not open the command \"%s\" to write, %s\n", sshCmd, strerror(errno));
+ return f;
+}
+
+int doImmediate(char *host, char *stdIn, const char *cmd_fmt, ...)
+{
+ char cmd_wk[MAXLINE+1];
+ char actualCmd[MAXLINE+1];
+ char remoteStdout[MAXPATH+1];
+ char localStdout[MAXPATH+1];
+ va_list arg;
+ int rc;
+
+ va_start(arg, cmd_fmt);
+ vsnprintf(cmd_wk, MAXLINE, cmd_fmt, arg);
+ va_end(arg);
+ if (host == NULL || host[0] == '\0')
+ {
+ /* Local case */
+ snprintf(actualCmd, MAXLINE, "( %s ) < %s > %s 2>&1",
+ cmd_wk,
+ ((stdIn == NULL) || (stdIn[0] == 0)) ? "/dev/null" : stdIn,
+ createLocalFileName(STDOUT, localStdout, MAXPATH));
+ elog(DEBUG1, "Actual command: %s\n", actualCmd);
+ rc = system(actualCmd);
+ }
+ else
+ {
+ int rc1;
+ /* Remote case */
+ snprintf(actualCmd, MAXLINE, "ssh %s@%s \"( %s ) > %s 2>&1\" < %s > /dev/null 2>&1",
+ sval(VAR_pgxcUser), host, cmd_wk,
+ createRemoteFileName(STDOUT, remoteStdout, MAXPATH),
+ ((stdIn == NULL) || (stdIn[0] == 0)) ? "/dev/null" : stdIn);
+ elog(INFO, "Actual Command: %s\n", actualCmd);
+ rc = system(actualCmd);
+ snprintf(actualCmd, MAXLINE, "scp %s@%s:%s %s > /dev/null 2>&1",
+ sval(VAR_pgxcUser), host, remoteStdout,
+ createLocalFileName(STDOUT, localStdout, MAXPATH));
+ elog(INFO, "Bring remote stdout: %s\n", actualCmd);
+ rc1 = system(actualCmd);
+ if (WEXITSTATUS(rc1) != 0)
+ elog(WARNING, "WARNING: Stdout transfer not successful, file: %s:%s->%s\n",
+ host, remoteStdout, localStdout);
+ doImmediateRaw("ssh %s@%s \"rm -f %s < /dev/null > /dev/null\" < /dev/null > /dev/null",
+ sval(VAR_pgxcUser), host, remoteStdout);
+ }
+ elogFile(INFO, localStdout);
+ unlink(localStdout);
+ if (stdIn && stdIn[0])
+ unlink(stdIn);
+ return((rc));
+}
+
+/*
+ * =======================================================================================
+ *
+ * Command list handlers
+ *
+ * =======================================================================================
+ */
+cmdList_t *initCmdList(void)
+{
+ cmdList_t *rv = (cmdList_t *)Malloc0(sizeof(cmdList_t));
+
+ rv->allocated = 1;
+ return(rv);
+}
+
+cmd_t *initCmd(char *host)
+{
+ cmd_t *rv = (cmd_t *)Malloc0(sizeof(cmd_t));
+ if (host)
+ rv->host = Strdup(host);
+ return rv;
+}
+
+static void clearStdin(cmd_t *cmd)
+{
+ unlink(cmd->localStdin);
+ freeAndReset(cmd->localStdin);
+}
+
+static void touchStdout(cmd_t *cmd)
+{
+ if (cmd->remoteStdout)
+ if (cmd->remoteStdout)
+ doImmediateRaw("(ssh %s@%s touch %s) < /dev/null > /dev/null 2>&1",
+ sval(VAR_pgxcUser), cmd->host,
+ cmd->remoteStdout);
+ if (cmd->localStdout)
+ doImmediateRaw("(touch %s) < /dev/null > /dev/null", cmd->localStdout);
+}
+
+#if 0
+static void setStdout(cmd_t *cmd)
+{
+ if (cmd->host != NULL)
+ {
+ if (cmd->remoteStdout == NULL)
+ /* Remote cmd */
+ cmd->remoteStdout = createRemoteFileName(STDOUT, Malloc(MAXPATH+1), MAXPATH);
+ else
+ freeAndReset(cmd->remoteStdout);
+ }
+ if (cmd->localStdout == NULL)
+ cmd->localStdout = createLocalFileName(STDOUT, Malloc(MAXPATH+1), MAXPATH);
+}
+#endif
+
+int doCmd(cmd_t *cmd)
+{
+ int rc = 0;
+
+ cmd_t *curr;
+
+ for(curr = cmd; curr; curr = curr->next)
+ {
+ rc = doCmdEl(curr);
+ }
+ return rc;
+}
+
+static char *allocActualCmd(cmd_t *cmd)
+{
+ return (cmd->actualCmd) ? cmd->actualCmd : (cmd->actualCmd = Malloc(MAXLINE+1));
+}
+
+/* localStdout has to be set by the caller */
+int doCmdEl(cmd_t *cmd)
+{
+ if (cmd->isInternal)
+ {
+ if (*cmd->callback)
+ (*cmd->callback)(cmd->callback_parm);
+ else
+ elog(ERROR, "ERROR: no function entry was found in cmd_t.\n");
+ freeAndReset(cmd->callback_parm);
+ return 0;
+ }
+ if (cmd->host)
+ {
+ /* Build actual command */
+ snprintf(allocActualCmd(cmd), MAXLINE,
+ "ssh %s@%s \"( %s ) > %s 2>&1\" < %s > /dev/null 2>&1",
+ sval(VAR_pgxcUser),
+ cmd->host,
+ cmd->command,
+ cmd->remoteStdout ? cmd->remoteStdout : "/dev/null",
+ cmd->localStdin ? cmd->localStdin : "/dev/null");
+ /* Do it */
+ elog(DEBUG1, "Remote command: \"%s\", actual: \"%s\"\n", cmd->command, cmd->actualCmd);
+ cmd->excode = system(cmd->actualCmd);
+ /* Handle stdout */
+ clearStdin(cmd);
+ touchStdout(cmd);
+ doImmediateRaw("(scp %s@%s:%s %s; ssh %s@%s rm -rf %s) < /dev/null > /dev/null",
+ sval(VAR_pgxcUser), cmd->host, cmd->remoteStdout, cmd->localStdout,
+ sval(VAR_pgxcUser), cmd->host, cmd->remoteStdout);
+ freeAndReset(cmd->remoteStdout);
+ /* Handle stdin */
+ return (cmd->excode);
+ }
+ else
+ {
+ freeAndReset(cmd->remoteStdout);
+ /* Build actual command */
+ snprintf(allocActualCmd(cmd), MAXLINE,
+ "( %s ) > %s 2>&1 < %s",
+ cmd->command,
+ cmd->localStdout ? cmd->localStdout : "/dev/null",
+ cmd->localStdin ? cmd->localStdin : "/dev/null");
+ /* Do it */
+ elog(DEBUG1, "Local command: \"%s\", actual: \"%s\"\n", cmd->command, cmd->actualCmd);
+ cmd->excode = system(cmd->actualCmd);
+ /* Handle stdout */
+ clearStdin(cmd);
+ touchStdout(cmd);
+ /* Handle stdin */
+ return (cmd->excode);
+ }
+}
+
+/*
+ * Here, we should handle exit code.
+ *
+ * If each command ran and exit normally, maximum (worst) value of the status code
+ * will be returned.
+ *
+ * If SIGINT is detected, then the status will be set with EC_IFSTOPPED flag, as well as
+ * EC_STOPSIG to SIGINT. In this case, EC_IFSTOPPED will be set and EC_SIGNAL will be
+ * set to SIGKILL as well. Exit status will be set to 2.
+ */
+int doCmdList(cmdList_t *cmds)
+{
+ int ii, jj;
+ xc_status rc = 0;
+
+ dump_cmdList(cmds);
+ if (cmds->cmds == NULL)
+ return(0);
+ old_HandlerDoShell = signal(SIGINT, do_shell_SigHandler);
+ whereToJumpDoShell = &dcJmpBufDoShell;
+ /*
+ * Invoke remote command with SSH
+ */
+ prepareStdout(cmds);
+ if (setjmp(dcJmpBufDoShell) == 0)
+ {
+ for (ii = 0; cmds->cmds[ii]; ii++)
+ {
+ if (!isVarYes(VAR_debug))
+ {
+ if ((cmds->cmds[ii]->pid = fork()) != 0)
+ {
+ if (cmds->cmds[ii]->pid == -1)
+ {
+ elog(ERROR, "Process for \"%s\" failed to start. %s\n",
+ cmds->cmds[ii]->actualCmd,
+ strerror(errno));
+ cmds->cmds[ii]->pid = 0;
+ }
+ continue;
+ }
+ else
+ exit(doCmd(cmds->cmds[ii]));
+ }
+ else
+ {
+ cmds->cmds[ii]->excode = doCmd(cmds->cmds[ii]);
+ rc = WEXITSTATUS(cmds->cmds[ii]->excode);
+ }
+ }
+ }
+ else
+ {
+ /* Signal exit here */
+ for (ii = 0; cmds->cmds[ii]; ii++)
+ {
+ if (!isVarYes(VAR_debug))
+ {
+ if (cmds->cmds[ii]->pid)
+ {
+ /*
+ * We don't care if the process is alive or not.
+ * Try to kill anyway. Then handle remote/local
+ * stdin/stdout in the next step.
+ *
+ * If it's bothering to wait for printing, the user can
+ * issue a SIGINT again.
+ */
+ kill(cmds->cmds[ii]->pid, SIGKILL);
+ cmds->cmds[ii]->pid = 0;
+ }
+ }
+ else
+ {
+ /* Something to do at non-parallel execution */
+ }
+ }
+ elog(NOTICE, "%s:%d Finish by interrupt\n", __FUNCTION__, __LINE__);
+ return 2;
+ }
+ /*
+ * Handle remote/local stdin/stdout
+ */
+ signal(SIGINT, do_shell_SigHandler);
+ if (setjmp(dcJmpBufDoShell) == 0)
+ {
+ for (ii = 0; cmds->cmds[ii]; ii++)
+ {
+ int status;
+ cmd_t *cur;
+
+ if (!isVarYes(VAR_debug))
+ {
+ if (cmds->cmds[ii]->pid)
+ {
+ int rc_new;
+
+ rc_new = waitpid(cmds->cmds[ii]->pid, &status, 0);
+ rc = WEXITSTATUS(rc_new);
+ }
+ }
+ cmds->cmds[ii]->pid = 0;
+ for (cur = cmds->cmds[ii]; cur; cur = cur->next)
+ {
+ elogFile(MANDATORY, cur->localStdout);
+ doImmediateRaw("(rm -f %s) < /dev/null > /dev/null", cur->localStdout);
+ freeAndReset(cur->actualCmd);
+ freeAndReset(cur->localStdout);
+ freeAndReset(cur->msg);
+ }
+ }
+ }
+ else
+ {
+ /* Captured SIGINT */
+ signal(SIGINT, old_HandlerDoShell);
+
+ for (jj = 0; cmds->cmds[jj]; jj++)
+ {
+ /* Need to handle the case with non-parallel execution */
+ if (cmds->cmds[jj]->pid)
+ {
+ kill(cmds->cmds[jj]->pid, SIGKILL);
+ cmds->cmds[jj]->pid = 0;
+ }
+ if (cmds->cmds[jj]->localStdout)
+ doImmediate(NULL, NULL, "rm -f %s", cmds->cmds[jj]->localStdout);
+ if (cmds->cmds[jj]->remoteStdout) /* Note that remote stdout will be removed anyway */
+ doImmediate(cmds->cmds[jj]->host, NULL, "rm -f %s",
+ cmds->cmds[jj]->remoteStdout);
+ freeAndReset(cmds->cmds[jj]->actualCmd);
+ freeAndReset(cmds->cmds[jj]->localStdout);
+ freeAndReset(cmds->cmds[jj]->msg);
+ freeAndReset(cmds->cmds[jj]->remoteStdout);
+ }
+ elog(NOTICE, "%s:%d Finish by interrupt\n", __FUNCTION__, __LINE__);
+ return(2);
+ }
+ signal(SIGINT, old_HandlerDoShell);
+ whereToJumpDoShell = NULL;
+ return(rc);
+}
+
+void appendCmdEl(cmd_t *src, cmd_t *new)
+{
+ cmd_t *curr;
+
+ for(curr = src; src->next; src = src->next);
+ src->next = new;
+}
+
+void do_cleanCmdEl(cmd_t *cmd)
+{
+ if (cmd)
+ {
+ if (cmd->localStdout)
+ unlink(cmd->localStdout);
+ Free(cmd->localStdout);
+ Free(cmd->msg);
+ if (cmd->localStdin)
+ unlink(cmd->localStdin);
+ Free(cmd->localStdin);
+ if (cmd->remoteStdout)
+ doImmediateRaw("ssh %s@%s \"rm -f %s > /dev/null 2>&1\"", sval(VAR_pgxcUser), cmd->host, cmd->remoteStdout);
+ Free(cmd->remoteStdout);
+ Free(cmd->actualCmd);
+ Free(cmd->command);
+ Free(cmd->host);
+ }
+}
+
+void do_cleanCmd(cmd_t *cmd)
+{
+ if (cmd == NULL)
+ return;
+ if (cmd->next == NULL)
+ do_cleanCmdEl(cmd);
+ else
+ {
+ do_cleanCmd(cmd->next);
+ freeAndReset(cmd->next);
+ }
+}
+
+void do_cleanCmdList(cmdList_t *cmdList)
+{
+ int ii;
+
+ if (cmdList->cmds)
+ {
+ for (ii = 0; cmdList->cmds[ii]; ii++)
+ {
+ cleanCmd(cmdList->cmds[ii]);
+ Free(cmdList->cmds[ii]);
+ }
+ }
+ Free(cmdList);
+}
+
+void addCmd(cmdList_t *cmds, cmd_t *cmd)
+{
+ cmd->pid = 0;
+ cmd->actualCmd = cmd->remoteStdout = cmd->msg = cmd->localStdout = NULL;
+ if (cmds->used + 1 >= cmds->allocated)
+ {
+ int newsize = nextSize(cmds->allocated);
+ cmds->cmds = (cmd_t **)Realloc(cmds->cmds, sizeof(cmd_t *) * newsize);
+ cmds->allocated = newsize;
+ }
+ cmds->cmds[cmds->used++] = cmd;
+ cmds->cmds[cmds->used] = NULL;
+}
+
+void cleanLastCmd(cmdList_t *cmdList)
+{
+ int ii;
+
+ if ((cmdList == NULL) || (cmdList->cmds[0] == NULL))
+ return;
+ for (ii = 0; cmdList->cmds[ii+1]; ii++);
+ cleanCmd(cmdList->cmds[ii]);
+}
+
+/*
+ * ====================================================================================
+ *
+ * Miscellaneous
+ *
+ * ====================================================================================
+ */
+static int nextSize(int size)
+{
+ if (size == 0)
+ return 1;
+ if (size < 128)
+ return (size*2);
+ return (size + 32);
+}
+
+/*
+ * Get my hostname to prevent remote file name conflist
+ * Take only the first part of the hostname and ignore
+ * domain part
+ */
+static char *getCleanHostname(char *buf, int len)
+{
+ char hostname[MAXPATH+1];
+ int ii;
+
+ gethostname(hostname, MAXPATH);
+ for (ii = 0; hostname[ii] && hostname[ii] != '.'; ii++);
+ if (hostname[ii])
+ hostname[ii] = 0;
+ strncpy(buf, hostname, len);
+ return buf;
+}
+
+/*
+ * Wait for typing something only when debug option is specified.
+ * Used to synchronize child processes to start to help gdb.
+ *
+ * May be not useful if input file is not stdin.
+ */
+#if 0
+static void waitTypeReturn(void)
+{
+ char buf[MAXLINE+1];
+
+ fputs("Type Return: ", outF);
+ fgets(buf, MAXLINE, inF);
+}
+
+static void echoPid(pid_t pid)
+{
+ fprintf(outF, "INFO: pid = %d\n", pid);
+}
+#endif
+
+static void prepareStdout(cmdList_t *cmdList)
+{
+ int ii;
+
+ if (cmdList == NULL)
+ return;
+ if (cmdList->cmds == NULL)
+ return;
+ for (ii = 0; cmdList->cmds[ii]; ii++)
+ {
+ cmd_t *curr;
+ for (curr = cmdList->cmds[ii]; curr; curr = curr->next)
+ {
+ if (curr->localStdout == NULL)
+ createLocalFileName(STDOUT, (curr->localStdout = Malloc(sizeof(char) * (MAXPATH+1))), MAXPATH);
+ if (curr->host)
+ {
+ if (curr->remoteStdout == NULL)
+ createRemoteFileName(STDOUT, (curr->remoteStdout = Malloc(sizeof(char) * (MAXPATH+1))), MAXPATH);
+ }
+ else
+ freeAndReset(curr->remoteStdout);
+ }
+ }
+}
+
+cmd_t *makeConfigBackupCmd(void)
+{
+ cmd_t *rv = Malloc0(sizeof(cmd_t));
+ snprintf((rv->command = Malloc(MAXLINE+1)), MAXLINE,
+ "ssh %s@%s mkdir -p %s;scp %s %s@%sp:%s",
+ sval(VAR_pgxcUser), sval(VAR_configBackupHost), sval(VAR_configBackupDir),
+ pgxc_ctl_config_path, sval(VAR_pgxcUser), sval(VAR_configBackupHost),
+ sval(VAR_configBackupFile));
+ return(rv);
+}
+
+int doConfigBackup(void)
+{
+ int rc;
+
+ rc = doImmediateRaw("ssh %s@%s mkdir -p %s;scp %s %s@%sp:%s",
+ sval(VAR_pgxcUser), sval(VAR_configBackupHost), sval(VAR_configBackupDir),
+ pgxc_ctl_config_path, sval(VAR_pgxcUser), sval(VAR_configBackupHost),
+ sval(VAR_configBackupFile));
+ return(rc);
+}
+
+void dump_cmdList(cmdList_t *cmdList)
+{
+ int ii, jj;
+ cmd_t *cur;
+
+ lockLogFile(); /* We don't like this output interrupted by other process log */
+ elog(DEBUG1,
+ "*** cmdList Dump *******************************\n"
+ "allocated = %d, used = %d\n", cmdList->allocated, cmdList->used);
+ if (cmdList->cmds == NULL)
+ {
+ elog(DEBUG1, "=== No command dfined. ===\n");
+ return;
+ }
+ for (ii = 0; cmdList->cmds[ii]; ii++)
+ {
+ elog(DEBUG1,
+ "=== CMD: %d ===\n", ii);
+ for (cur = cmdList->cmds[ii], jj=0; cur; cur = cur->next, jj++)
+ {
+ elog(DEBUG1,
+ " --- CMD-EL: %d:"
+ "host=\"%s\", command=\"%s\", localStdin=\"%s\", localStdout=\"%s\"\n",
+ jj, cur->host ? cur->host : "NULL",
+ cur->command ? cur->command : "NULL",
+ cur->localStdin ? cur->localStdin : "NULL",
+ cur->localStdout ? cur->localStdout : "NULL");
+ if (cur->localStdin)
+ {
+ elogFile(DEBUG1, cur->localStdin);
+ elog(DEBUG1, " ----------\n");
+ }
+ }
+ }
+ unlockLogFile();
+}
diff --git a/contrib/pgxc_ctl/do_shell.h b/contrib/pgxc_ctl/do_shell.h
new file mode 100644
index 0000000000..a84e8a74be
--- /dev/null
+++ b/contrib/pgxc_ctl/do_shell.h
@@ -0,0 +1,106 @@
+/*-------------------------------------------------------------------------
+ *
+ * do_shell.h
+ *
+ * Shell control module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef DO_SHELL_H
+#define DO_SHELL_H
+
+extern void dcSigHandler(int signum);
+typedef enum FileType { STDIN, STDOUT, STDERR, GENERAL } FileType;
+typedef void (*pqsigfunc) (int);
+extern char *createLocalFileName(FileType type, char *buf, int len);
+extern char *createRemoteFileName(FileType type, char *buf, int len);
+extern int doImmediate(char *host, char *stdIn, const char *cmd_fmt, ...) __attribute__((format(printf, 3, 4)));
+extern int doImmediateRaw(const char *cmd_fmt, ...) __attribute__((format(printf, 1,2)));
+extern FILE *pgxc_popen_wRaw(const char *cmd_fmt, ...) __attribute__((format(printf, 1,2)));
+extern FILE *pgxc_popen_w(char *host, const char *cmd_fmt, ...) __attribute__((format(printf, 2,3)));
+
+/*
+ * Flags
+ */
+#define PrintLog 0x01
+#define PrintErr 0x02
+#define LogOnly PrintLog
+#define ErrOnly PrintErr
+#define LogErr (PrintLog | PrintErr)
+#define LeaveRemoteStdin 0x04
+#define LeaveLocalStdin 0x08
+#define LeaveStdout 0x10
+#define InternalFunc(cmd, func, parm) \
+ do \
+ {(cmd)->isInternal = TRUE; (cmd)->callback = (func); (cmd)->callback_parm = (parm);} \
+ while(0)
+#define ShellCall(cmd) \
+ do \
+ {(cmd)->isInternal = FALSE; (cmd)->callback = NULL; (cmd)->callback_parm = NULL;} \
+ while(0)
+
+
+
+typedef struct cmd_t
+{
+ struct cmd_t *next; /* Next to do --> done in the same shell */
+ int isInternal; /* If true, do not invoke shell. Call internal function */
+ void (*callback)(char *line); /* Callback function */
+ char *callback_parm;/* Argument to the callback function. Will be freed here. */
+ char *host; /* target host -> If null, then local command */
+ char *command; /* Will be double-quoted. Double-quote has to be escaped by the caller */
+ char *localStdin; /* Local stdin name --> Supplied by the caller. */
+ char *actualCmd; /* internal use --> local ssh full command. */
+ char *localStdout; /* internal use, local stdout name --> Generated. Stderr will be copied here too */
+ /* Messages from the child process may be printed to this file too. */
+ pid_t pid; /* internal use: valid only for cmd at the head of the list */
+ int flag; /* flags */
+ int excode; /* exit code -> not used in parallel execution. */
+ char *msg; /* internal use: messages to write. Has to be comsumed only by child process. */
+ char *remoteStdout; /* internal use: remote stdout name. Generated for remote case */
+} cmd_t;
+
+typedef struct cmdList_t
+{
+ int allocated;
+ int used;
+ cmd_t **cmds;
+} cmdList_t;
+
+extern cmdList_t *initCmdList(void);
+extern cmd_t *initCmd(char *host);
+#define newCommand(a) ((a)->command=Malloc(sizeof(char) * (MAXLINE+1)))
+#define newCmd(a) ((a)=initCmd())
+#define newFilename(a) ((a)=Malloc(sizeof(char) *(MAXPATH+1)))
+
+/*
+ * Return valie from doCmd() and doCmdList(): This include
+ * exit code from the shell (and their command), as well as
+ * other status of the code.
+ *
+ * Exit status should include WIFSIGNALED() and their signal information,
+ * as well as other seen in wait(2). Such information should be composed
+ * using individual command status. Because functions to compose them is
+ * not available, we provide corresponding local implementation for them.
+ */
+
+extern int doCmdEl(cmd_t *cmd);
+extern int doCmd(cmd_t *cmd);
+extern int doCmdList(cmdList_t *cmds);
+extern void do_cleanCmdList(cmdList_t *cmds);
+#define cleanCmdList(x) do{do_cleanCmdList(x); (x) = NULL;}while(0)
+extern void do_cleanCmd(cmd_t *cmd);
+#define cleanCmd(x) do{do_cleanCmd(x); (x) = NULL;}while(0)
+extern void do_cleanCmdEl(cmd_t *cmd);
+#define cleanCmdEl(x) do{do_cleanCmeEl(x); (x) = NULL;}while(0)
+extern void addCmd(cmdList_t *cmdList, cmd_t *cmd);
+extern void appendCmdEl(cmd_t *src, cmd_t *new);
+extern void cleanLastCmd(cmdList_t *cmdList);
+extern cmd_t *makeConfigBackupCmd(void);
+extern int doConfigBackup(void);
+extern void dump_cmdList(cmdList_t *cmdList);
+
+#endif /* DO_SHELL_H */
+
diff --git a/contrib/pgxc_ctl/gtm_cmd.c b/contrib/pgxc_ctl/gtm_cmd.c
new file mode 100644
index 0000000000..b537870ad5
--- /dev/null
+++ b/contrib/pgxc_ctl/gtm_cmd.c
@@ -0,0 +1,1372 @@
+/*-------------------------------------------------------------------------
+ *
+ * gtm_cmd.c
+ *
+ * GTM command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This module provides various gtm-related pgxc_operation.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "do_command.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+#include "gtm_cmd.h"
+#include "monitor.h"
+
+static char date[MAXTOKEN+1];
+
+
+/* ======================================================================================
+ *
+ * GTM Staff
+ *
+ * =======================================================================================
+ */
+/*
+ * Init gtm master -----------------------------------------------------------------
+ */
+cmd_t *prepare_initGtmMaster(void)
+{
+ cmd_t *cmdInitGtmMaster, *cmdGtmConf, *cmdGxid;
+ char date[MAXTOKEN+1];
+ FILE *f;
+ char **fileList = NULL;
+
+ /* Kill current gtm, bild work directory and run initgtm */
+ cmdInitGtmMaster = initCmd(sval(VAR_gtmMasterServer));
+ snprintf(newCommand(cmdInitGtmMaster), MAXLINE,
+ "killall -u %s -9 gtm; rm -rf %s; mkdir -p %s;initgtm -Z gtm -D %s",
+ sval(VAR_pgxcUser),
+ sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir));
+
+ /* Then prepare gtm.conf file */
+
+ /* Prepare local Stdin */
+ appendCmdEl(cmdInitGtmMaster, (cmdGtmConf = initCmd(sval(VAR_gtmMasterServer))));
+ if ((f = prepareLocalStdin(newFilename(cmdGtmConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmdInitGtmMaster);
+ return(NULL);
+ }
+ fprintf(f,
+ "#===============================================\n"
+ "# Added at initialization, %s\n"
+ "listen_addresses = '*'\n",
+ timeStampString(date, MAXTOKEN));
+ if (!is_none(sval(VAR_gtmExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmExtraConfig));
+ if (!is_none(sval(VAR_gtmMasterSpecificExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmMasterSpecificExtraConfig));
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+ fprintf(f,
+ "port = %s\n"
+ "nodename = '%s'\n"
+ "startup = ACT\n"
+ "# End of addition\n",
+ sval(VAR_gtmMasterPort), sval(VAR_gtmName));
+ fclose(f);
+ /* other options */
+ snprintf(newCommand(cmdGtmConf), MAXLINE,
+ "cat >> %s/gtm.conf",
+ sval(VAR_gtmMasterDir));
+
+ /* Setup GTM with appropriate GXID value */
+
+ appendCmdEl(cmdGtmConf, (cmdGxid = initCmd(sval(VAR_gtmMasterServer))));
+ snprintf(newCommand(cmdGxid), MAXLINE,
+ "(gtm -x 2000 -D %s &); sleep 1; gtm_ctl stop -Z gtm -D %s",
+ sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir));
+
+ return cmdInitGtmMaster;
+}
+int init_gtm_master(void)
+{
+ int rc;
+ cmdList_t *cmdList;
+
+ elog(INFO, "Initialize GTM master\n");
+ cmdList = initCmdList();
+
+ /* Kill current gtm, build work directory and run initgtm */
+
+ addCmd(cmdList, prepare_initGtmMaster());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Add gtm slave: to be used after all the configuration is done.
+ *
+ * This function only maintains internal configuration, updte configuration file,
+ * and make backup if configured. You should run init_gtm_slave and stat_gtm_slave
+ * separately.
+ */
+int add_gtmSlave(char *name, char *host, int port, char *dir)
+{
+ char port_s[MAXTOKEN+1];
+ char date[MAXTOKEN+1];
+ FILE *f;
+ int rc;
+
+ if (isVarYes(VAR_gtmSlave))
+ {
+ elog(ERROR, "ERROR: GTM slave is already configured.\n");
+ return 1;
+ }
+ if (is_none(host))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm slave with the name \"none\".\n");
+ return 1;
+ }
+ if (is_none(dir))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm slave with the directory \"none\".\n");
+ return 1;
+ }
+ if (checkSpecificResourceConflict(name, host, port, dir, TRUE))
+ {
+ elog(ERROR, "ERROR: New specified name:%s, host:%s, port:%d and dir:\"%s\" conflicts with existing node.\n",
+ name, host, port, dir);
+ return 1;
+ }
+ assign_sval(VAR_gtmSlave, Strdup("y"));
+ assign_sval(VAR_gtmSlaveServer, Strdup(host));
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ assign_sval(VAR_gtmSlavePort, Strdup(port_s));
+ assign_sval(VAR_gtmSlaveDir, Strdup(dir));
+ makeServerList();
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM slave addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_gtmSlave);
+ fprintSval(f, VAR_gtmSlaveServer);
+ fprintSval(f, VAR_gtmSlavePort);
+ fprintSval(f, VAR_gtmSlaveDir);
+ fprintf(f, "%s","#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+ if ((rc = init_gtm_slave()) != 0)
+ return rc;
+ return(start_gtm_slave());
+}
+
+int remove_gtmSlave(bool clean_opt)
+{
+ FILE *f;
+
+ /* Check if gtm_slave is configured */
+ if (!isVarYes(VAR_gtmSlave) || !sval(VAR_gtmSlaveServer) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return 1;
+ }
+ /* Check if gtm_slave is not running */
+ if (!do_gtm_ping(sval(VAR_gtmSlaveServer), atoi(sval(VAR_gtmSlavePort))))
+ {
+ elog(ERROR, "ERROR: GTM slave is now running. Cannot remove it.\n");
+ return 1;
+ }
+ elog(NOTICE, "Removing gtm slave.\n");
+ /* Clean */
+ if (clean_opt)
+ clean_gtm_slave();
+ /* Reconfigure */
+ reset_var(VAR_gtmSlave);
+ assign_sval(VAR_gtmSlave, Strdup("n"));
+ reset_var(VAR_gtmSlaveServer);
+ assign_sval(VAR_gtmSlaveServer, Strdup("none"));
+ reset_var(VAR_gtmSlavePort);
+ assign_sval(VAR_gtmSlavePort, Strdup("-1"));
+ reset_var(VAR_gtmSlaveDir);
+ assign_sval(VAR_gtmSlaveDir, Strdup("none"));
+ /* Write the configuration file and bakup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM slave removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_gtmSlave);
+ fprintSval(f, VAR_gtmSlaveServer);
+ fprintSval(f, VAR_gtmSlavePort);
+ fprintSval(f, VAR_gtmSlaveDir);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+ elog(NOTICE, "Done.\n");
+ return 0;
+}
+
+
+/*
+ * Init gtm slave -------------------------------------------------------------
+ */
+
+/*
+ * Assumes Gtm Slave is configured.
+ * Caller should check this.
+ */
+cmd_t *prepare_initGtmSlave(void)
+{
+ char date[MAXTOKEN+1];
+ cmd_t *cmdInitGtm, *cmdGtmConf;
+ FILE *f;
+ char **fileList = NULL;
+
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return(NULL);
+ }
+ /* Kill current gtm, build work directory and run initgtm */
+ cmdInitGtm = initCmd(sval(VAR_gtmSlaveServer));
+ snprintf(newCommand(cmdInitGtm), MAXLINE,
+ "killall -u %s -9 gtm; rm -rf %s; mkdir -p %s; initgtm -Z gtm -D %s",
+ sval(VAR_pgxcUser),
+ sval(VAR_gtmSlaveDir), sval(VAR_gtmSlaveDir), sval(VAR_gtmSlaveDir));
+
+ /* Prepare gtm.conf file */
+
+ /* Prepare local Stdin */
+ appendCmdEl(cmdInitGtm, (cmdGtmConf = initCmd(sval(VAR_gtmSlaveServer))));
+ snprintf(newCommand(cmdGtmConf), MAXLINE,
+ "cat >> %s/gtm.conf",
+ sval(VAR_gtmSlaveDir));
+ if ((f = prepareLocalStdin(newFilename(cmdGtmConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmdInitGtm);
+ return(NULL);
+ }
+ fprintf(f,
+ "#===============================================\n"
+ "# Added at initialization, %s\n"
+ "listen_addresses = '*'\n",
+ timeStampString(date, MAXPATH+1));
+ if (!is_none(sval(VAR_gtmExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmExtraConfig));
+ if (!is_none(sval(VAR_gtmMasterSpecificExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmMasterSpecificExtraConfig));
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+ fprintf(f,
+ "port = %s\n"
+ "nodename = '%s'\n"
+ "startup = STANDBY\n"
+ "active_host = '%s'\n"
+ "active_port = %d\n"
+ "# End of addition\n",
+ sval(VAR_gtmSlavePort), sval(VAR_gtmName),
+ sval(VAR_gtmMasterServer), atoi(sval(VAR_gtmMasterPort)));
+ fclose(f);
+ return (cmdInitGtm);
+}
+
+int init_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmdInitGtm;
+ int rc;
+
+ elog(INFO, "Initialize GTM slave\n");
+ cmdList = initCmdList();
+ if ((cmdInitGtm = prepare_initGtmSlave()))
+ {
+ addCmd(cmdList, cmdInitGtm);
+ /* Do all the commands and clean */
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(INFO, "Done.\n");
+ return(rc);
+ }
+ return 1;
+}
+
+/*
+ * Start gtm master -----------------------------------------------------
+ */
+cmd_t *prepare_startGtmMaster(void)
+{
+ cmd_t *cmdGtmCtl;
+
+ cmdGtmCtl = initCmd(sval(VAR_gtmMasterServer));
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "gtm_ctl stop -Z gtm -D %s;"
+ "rm -f %s/register.node;"
+ "gtm_ctl start -Z gtm -D %s",
+ sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir));
+ return cmdGtmCtl;
+}
+
+int start_gtm_master(void)
+{
+ cmdList_t *cmdList;
+ int rc;
+
+ elog(INFO, "Start GTM master\n");
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_startGtmMaster());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+}
+
+/*
+ * Start gtm slave ----------------------------------------------------
+ */
+cmd_t *prepare_startGtmSlave(void)
+{
+ cmd_t *cmdGtmCtl;
+
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return(NULL);
+ }
+ cmdGtmCtl = initCmd(sval(VAR_gtmSlaveServer));
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "gtm_ctl stop -Z gtm -D %s;"
+ "rm -rf %s/register.node;"
+ "gtm_ctl start -Z gtm -D %s",
+ sval(VAR_gtmSlaveDir), sval(VAR_gtmSlaveDir), sval(VAR_gtmSlaveDir));
+ return (cmdGtmCtl);
+}
+
+int start_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ elog(INFO, "Start GTM slave");
+ cmdList = initCmdList();
+ if ((cmd = prepare_startGtmSlave()))
+ {
+ addCmd(cmdList, cmd);
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(INFO, "Done.\n");
+ return(rc);
+ }
+ return 1;
+}
+
+/*
+ * Stop gtm master ---------------------------------------------------------
+ */
+cmd_t *prepare_stopGtmMaster(void)
+{
+ cmd_t *cmdGtmCtl;
+
+ cmdGtmCtl = initCmd(sval(VAR_gtmMasterServer));
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "gtm_ctl stop -Z gtm -D %s",
+ sval(VAR_gtmMasterDir));
+ return(cmdGtmCtl);
+}
+
+int stop_gtm_master(void)
+{
+ cmdList_t *cmdList;
+ int rc;
+
+ elog(INFO, "Stop GTM master\n");
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_stopGtmMaster());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+}
+
+/*
+ * Stop gtm slave ---------------------------------------------------------------
+ */
+cmd_t *prepare_stopGtmSlave(void)
+{
+ cmd_t *cmdGtmCtl;
+
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return(NULL);
+ }
+ cmdGtmCtl = initCmd(sval(VAR_gtmSlaveServer));
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "gtm_ctl stop -Z gtm -D %s",
+ sval(VAR_gtmSlaveDir));
+ return(cmdGtmCtl);
+}
+
+int stop_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ elog(INFO, "Stop GTM slave\n");
+ cmdList = initCmdList();
+ if ((cmd = prepare_stopGtmSlave()))
+ {
+ addCmd(cmdList, cmd);
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+ }
+ return 1;
+}
+
+/*
+ * Kill gtm master -----------------------------------------------------
+ *
+ * You should not kill gtm master in this way. This may discard the latest
+ * gtm status. This is just in case. You must try to stop gtm master
+ * gracefully.
+ */
+cmd_t *prepare_killGtmMaster(void)
+{
+ cmd_t *cmdKill;
+ pid_t gtmPid;
+
+ cmdKill = initCmd(sval(VAR_gtmMasterServer));
+ gtmPid = get_gtm_pid(sval(VAR_gtmMasterServer), sval(VAR_gtmMasterDir));
+ if (gtmPid > 0)
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "kill -9 %d; rm -rf /tmp/.s.'*'%d'*' %s/gtm.pid",
+ gtmPid, atoi(sval(VAR_gtmMasterPort)), sval(VAR_gtmMasterDir));
+ else
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "killall -u %s -9 gtm; rm -rf /tmp/.s.'*'%d'*' %s/gtm.pid",
+ sval(VAR_pgxcUser), atoi(sval(VAR_gtmMasterPort)), sval(VAR_gtmMasterDir));
+ return(cmdKill);
+}
+
+
+int kill_gtm_master(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmd_killGtmMaster;
+ int rc;
+
+ elog(INFO, "Kill GTM master\n");
+ cmdList = initCmdList();
+ if ((cmd_killGtmMaster = prepare_killGtmMaster()))
+ {
+ addCmd(cmdList, cmd_killGtmMaster);
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+ }
+ return 1;
+}
+
+/*
+ * Kill gtm slave --------------------------------------------------------
+ *
+ * GTM slave has no significant informaion to carry over. But it is a good
+ * habit to stop gtm slave gracefully with stop command.
+ */
+cmd_t *prepare_killGtmSlave(void)
+{
+ cmd_t *cmdKill;
+ pid_t gtmPid;
+
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return(NULL);
+ }
+ cmdKill = initCmd(sval(VAR_gtmSlaveServer));
+ gtmPid = get_gtm_pid(sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ if (gtmPid > 0)
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "kill -9 %d; rm -rf /tmp/.s.'*'%d'*' %s/gtm.pid",
+ gtmPid, atoi(sval(VAR_gtmSlavePort)), sval(VAR_gtmSlaveDir));
+ else
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "killall -u %s -9 gtm; rm -rf /tmp/.s.'*'%d'*' %s/gtm.pid",
+ sval(VAR_pgxcUser), atoi(sval(VAR_gtmSlavePort)), sval(VAR_gtmSlaveDir));
+ cmdKill = initCmd(sval(VAR_gtmSlaveServer));
+ return(cmdKill);
+}
+
+
+int kill_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmdKill;
+ int rc;
+
+ elog(INFO, "Kill GTM slave\n");
+ cmdList = initCmdList();
+ if ((cmdKill = prepare_killGtmSlave()))
+ {
+ addCmd(cmdList, cmdKill);
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+ }
+ else return 1;
+}
+
+/*
+ * Failover the gtm ------------------------------------------------------
+ */
+int failover_gtm(void)
+{
+ char date[MAXTOKEN+1];
+ char *stdIn;
+ int rc;
+ FILE *f;
+
+ elog(INFO, "Failover gtm\n");
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured. Cannot failover.\n");
+ return(1);
+ }
+
+ if (do_gtm_ping(sval(VAR_gtmSlaveServer), atoi(sval(VAR_gtmSlavePort))) != 0)
+ {
+ elog(ERROR, "ERROR: GTM slave is not running\n");
+ return(1);
+ }
+
+ /* Promote the slave */
+ elog(NOTICE, "Running \"gtm_ctl promote -Z gtm -D %s\"\n", sval(VAR_gtmSlaveDir));
+ rc = doImmediate(sval(VAR_gtmSlaveServer), NULL,
+ "gtm_ctl promote -Z gtm -D %s", sval(VAR_gtmSlaveDir));
+ if (WEXITSTATUS(rc) != 0)
+ {
+ elog(ERROR, "ERROR: could not promote gtm (host:%s, dir:%s)\n", sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ return 1;
+ }
+
+ /* Configure promoted gtm */
+ if ((f = prepareLocalStdin(newFilename(stdIn), MAXPATH, NULL)) == NULL)
+ return(1);
+ fprintf(f,
+ "#===================================================\n"
+ "# Updated due to GTM failover\n"
+ "# %s\n"
+ "startup = ACT\n"
+ "#----End of reconfiguration -------------------------\n",
+ timeStampString(date, MAXTOKEN+1));
+ fclose(f);
+ elog(NOTICE, "Updating gtm.conf at %s:%s\n", sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ rc = doImmediate(sval(VAR_gtmSlaveServer), stdIn, "cat >> %s/gtm.conf", sval(VAR_gtmSlaveDir));
+ if (WEXITSTATUS(rc) != 0)
+ {
+ elog(ERROR, "ERROR: could not update gtm.conf (host: %s, dir:%s)\n", sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ return 1;
+ }
+
+ /* Update and backup configuration file */
+ if ((f = prepareLocalStdin(stdIn, MAXPATH, NULL)) == NULL)
+ return(1);
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM failover\n"
+ "# %s\n"
+ "gtmMasterServer=%s\n"
+ "gtmMasterPort=%s\n"
+ "gtmMasterDir=%s\n"
+ "gtmSlave=n\n"
+ "gtmSlaveServer=none\n"
+ "gtmSlavePort=0\n"
+ "gtmSlaveDir=none\n"
+ "#----End of reconfiguration -------------------------\n",
+ timeStampString(date, MAXTOKEN+1),
+ sval(VAR_gtmSlaveServer),
+ sval(VAR_gtmSlavePort),
+ sval(VAR_gtmSlaveDir));
+ fclose(f);
+ rc = doImmediate(NULL, stdIn, "cat >> %s", pgxc_ctl_config_path);
+ if (WEXITSTATUS(rc) != 0)
+ {
+ elog(ERROR, "ERROR: could not update gtm.conf (host: %s, dir:%s)\n", sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ return 1;
+ }
+ freeAndReset(stdIn);
+ backup_configuration();
+
+ /* Reconfigure myself */
+ assign_val(VAR_gtmMasterServer, VAR_gtmSlaveServer); reset_var(VAR_gtmSlaveServer);
+ assign_val(VAR_gtmMasterPort, VAR_gtmSlavePort); reset_var(VAR_gtmSlavePort);
+ assign_val(VAR_gtmMasterDir, VAR_gtmSlaveDir); reset_var(VAR_gtmSlaveDir);
+ assign_sval(VAR_gtmSlaveServer, "none");
+ assign_sval(VAR_gtmSlavePort, "0");
+ assign_sval(VAR_gtmSlaveDir, "none");
+ assign_sval(VAR_gtmSlave, "n");
+
+ return 0;
+}
+
+/*
+ * Clean gtm master resources -- directory and socket --------------------------
+ */
+cmd_t *prepare_cleanGtmMaster(void)
+{
+ cmd_t *cmd;
+
+ /* Remote work dir and clean the socket */
+ cmd = initCmd(sval(VAR_gtmMasterServer));
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s;rm -f /tmp/.s.*%d*",
+ sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir),
+ atoi(VAR_gtmMasterPort));
+ return cmd;
+}
+
+int clean_gtm_master(void)
+{
+ cmdList_t *cmdList;
+ int rc;
+
+ elog(INFO, "Clearing gtm master directory and socket.\n");
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_cleanGtmMaster());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+}
+
+/*
+ * Clean gtm master resources -- direcotry and socket --------------------------
+ */
+/*
+ * Null will be retruend if gtm slave is not configured.
+ * Be careful. If you configure gtm slave and gtm master on a same server,
+ * bott slave amd master process will be killed.
+ */
+cmd_t *prepare_cleanGtmSlave(void)
+{
+ cmd_t *cmd;
+
+ if (!isVarYes(VAR_gtmSlave) || is_none(VAR_gtmSlaveServer))
+ return(NULL);
+ cmd = initCmd(sval(VAR_gtmSlaveServer));
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s;rm -f /tmp/.s.*%d*",
+ sval(VAR_gtmSlaveDir), sval(VAR_gtmSlaveDir), sval(VAR_gtmMasterDir),
+ atoi(VAR_gtmSlavePort));
+ return cmd;
+}
+
+int clean_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ int rc;
+
+ elog(NOTICE, "Clearing gtm slave resources.\n");
+ if (!isVarYes(VAR_gtmSlave) || is_none(VAR_gtmSlaveServer))
+ {
+ elog(ERROR, "ERROR: gtm slave is not configured.\n");
+ return 1;
+ }
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_cleanGtmSlave());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+/*
+ * ==================================================================================
+ *
+ * Gtm Proxy Staff
+ *
+ * ==================================================================================
+ */
+
+/*
+ * Add gtm proxy: to be used after all the configuration is done.
+ *
+ * This function only maintains internal configuration, updte configuration file,
+ * and make backup if configured. You should run init and start it separately.
+ */
+int add_gtmProxy(char *name, char *host, int port, char *dir)
+{
+ char port_s[MAXTOKEN+1];
+ char date[MAXTOKEN+1];
+ FILE *f;
+ char **nodelist = NULL;
+ int rc;
+
+ if (is_none(host))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm proxy with the name \"none\".\n");
+ return 1;
+ }
+ if (is_none(dir))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm proxy with the directory \"none\".\n");
+ return 1;
+ }
+ if (checkSpecificResourceConflict(name, host, port, dir, TRUE))
+ {
+ elog(ERROR, "ERROR: New specified name:%s, host:%s, port:%d and dir:\"%s\" conflicts with existing node.\n",
+ name, host, port, dir);
+ return 1;
+ }
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ assign_sval(VAR_gtmProxy, Strdup("y"));
+ reset_var(VAR_gtmProxyNames);
+ }
+ add_val(find_var(VAR_gtmProxyNames), Strdup(name));
+ add_val(find_var(VAR_gtmProxyServers), Strdup(host));
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ add_val(find_var(VAR_gtmProxyPorts), Strdup(port_s));
+ add_val(find_var(VAR_gtmProxyDirs), Strdup(dir));
+ add_val(find_var(VAR_gtmPxySpecificExtraConfig), Strdup("none"));
+ makeServerList();
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM proxy (%s) addition\n"
+ "# %s\n",
+ name,
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_gtmProxy);
+ fprintAval(f, VAR_gtmProxyNames);
+ fprintAval(f, VAR_gtmProxyServers);
+ fprintAval(f, VAR_gtmProxyPorts);
+ fprintAval(f, VAR_gtmProxyDirs);
+ fprintAval(f, VAR_gtmPxySpecificExtraConfig);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ AddMember(nodelist, name);
+ init_gtm_proxy(nodelist);
+ rc = start_gtm_proxy(nodelist);
+ CleanArray(nodelist);
+ return rc;
+}
+
+int remove_gtmProxy(char *name, bool clean_opt)
+{
+ FILE *f;
+ int idx;
+
+ /* Check if gtm proxy exists */
+ if ((idx = gtmProxyIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a gtm proxy.\n", name);
+ return 1;
+ }
+ /* Check if it is in use */
+ if (ifExists(VAR_coordMasterServers, aval(VAR_gtmProxyServers)[idx]) ||
+ ifExists(VAR_coordSlaveServers, aval(VAR_gtmProxyServers)[idx]) ||
+ ifExists(VAR_datanodeMasterServers, aval(VAR_gtmProxyServers)[idx]) ||
+ ifExists(VAR_datanodeSlaveServers, aval(VAR_gtmProxyServers)[idx]))
+ {
+ elog(ERROR, "ERROR: GTM Proxy %s is in use\n", name);
+ return 1;
+ }
+ elog(NOTICE, "NOTICE: removing gtm_proxy %s\n", name);
+ /* Clean */
+ if (clean_opt)
+ {
+ char **nodelist = NULL;
+
+ elog(NOTICE, "NOTICE: cleaning target resources.\n");
+ AddMember(nodelist, name);
+ clean_gtm_proxy(nodelist);
+ CleanArray(nodelist);
+ }
+ /* Reconfigure */
+ var_assign(&aval(VAR_gtmProxyNames)[idx], Strdup("none"));
+ var_assign(&aval(VAR_gtmProxyServers)[idx], Strdup("none"));
+ var_assign(&aval(VAR_gtmProxyPorts)[idx], Strdup("-1"));
+ var_assign(&aval(VAR_gtmProxyDirs)[idx], Strdup("none"));
+ handle_no_slaves();
+ makeServerList();
+ /* Update configuration file and backup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM proxy addition\n"
+ "# %s\n"
+ "%s=%s\n" /* gtmProxy */
+ "%s=( %s )\n" /* gtmProxyNames */
+ "%s=( %s )\n" /* gtmProxyServers */
+ "%s=( %s )\n" /* gtmProxyPorts */
+ "%s=( %s )\n" /* gtmProxyDirs */
+ "#----End of reconfiguration -------------------------\n",
+ timeStampString(date, MAXTOKEN+1),
+ VAR_gtmProxy, sval(VAR_gtmProxy),
+ VAR_gtmProxyNames, listValue(VAR_gtmProxyNames),
+ VAR_gtmProxyServers, listValue(VAR_gtmProxyServers),
+ VAR_gtmProxyPorts, listValue(VAR_gtmProxyPorts),
+ VAR_gtmProxyDirs, listValue(VAR_gtmProxyDirs));
+ fclose(f);
+ backup_configuration();
+ elog(NOTICE, "Done.\n");
+ return 0;
+}
+
+/*
+ * Does not check if node name is valid.
+ */
+
+cmd_t *prepare_initGtmProxy(char *nodeName)
+{
+ cmd_t *cmdInitGtm, *cmdGtmProxyConf;
+ int idx;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy configuration.\n", nodeName);
+ return NULL;
+ }
+
+ /* Build directory and run initgtm */
+ cmdInitGtm = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmdInitGtm), MAXLINE,
+ "killall -u %s -9 gtm_proxy;"
+ "rm -rf %s;"
+ "mkdir -p %s;"
+ "initgtm -Z gtm_proxy -D %s",
+ sval(VAR_pgxcUser),
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx]);
+
+ /* Configure gtm_proxy.conf */
+ appendCmdEl(cmdInitGtm, (cmdGtmProxyConf = initCmd(aval(VAR_gtmProxyServers)[idx])));
+ if ((f = prepareLocalStdin(newFilename(cmdGtmProxyConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmdInitGtm);
+ return NULL;
+ }
+ fprintf(f,
+ "#===========================\n"
+ "# Added at initialization, %s\n"
+ "nodename = '%s'\n"
+ "listen_addresses = '*'\n"
+ "port = %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "worker_threads = 1\n"
+ "gtm_connect_retry_interval = 1\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_gtmProxyNames)[idx],
+ aval(VAR_gtmProxyPorts)[idx],
+ sval(VAR_gtmMasterServer),
+ sval(VAR_gtmMasterPort));
+ fclose(f);
+ snprintf(newCommand(cmdGtmProxyConf), MAXLINE,
+ "cat >> %s/gtm_proxy.conf", aval(VAR_gtmProxyDirs)[idx]);
+ return(cmdInitGtm);
+}
+
+/*
+ * Initialize gtm proxy -------------------------------------------------------
+ */
+int init_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmdInitGtmPxy;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return 1;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(NOTICE, "Initializing gtm proxy %s.\n", actualNodeList[ii]);
+ if ((cmdInitGtmPxy = prepare_initGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmdInitGtmPxy);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+
+int init_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Initialize all the gtm proxies.\n");
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return(1);
+ }
+ return(init_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Start gtm proxy -----------------------------------------------------------
+ */
+cmd_t *prepare_startGtmProxy(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy\n", nodeName);
+ return(NULL);
+ }
+ cmd = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "killall -u %s -9 gtm_proxy;"
+ "gtm_ctl start -Z gtm_proxy -D %s",
+ sval(VAR_pgxcUser),
+ aval(VAR_gtmProxyDirs)[idx]);
+ return(cmd);
+}
+
+int start_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+ elog(NOTICE, "Starting gtm proxy %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_startGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+int start_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Starting all the gtm proxies.\n");
+ return(start_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Stop gtm proxy -------------------------------------------------------------
+ */
+cmd_t *prepare_stopGtmProxy(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy\n", nodeName);
+ return NULL;
+ }
+ cmd = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "gtm_ctl stop -Z gtm_proxy -D %s",
+ aval(VAR_gtmProxyDirs)[idx]);
+ return(cmd);
+}
+
+
+int stop_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+
+ elog(NOTICE, "Stopping gtm proxy %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+int stop_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Stopping all the gtm proxies.\n");
+ return(stop_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Kill gtm proxy -------------------------------------------------------------------
+ *
+ * Although gtm proxy does not have significant resources to carry over to the next
+ * run, it is a good habit to stop gtm proxy with stop command gracefully.
+ */
+cmd_t *prepare_killGtmProxy(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+ pid_t gtmPxyPid;
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy\n", nodeName);
+ return NULL;
+ }
+ cmd = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ gtmPxyPid = get_gtmProxy_pid(aval(VAR_gtmProxyServers)[idx], aval(VAR_gtmProxyDirs)[idx]);
+ if (gtmPxyPid > 0)
+ snprintf(newCommand(cmd), MAXLINE,
+ "kill -9 %d; rm -rf /tmp/.s.'*'%d'*' %s/gtm_proxy.pid",
+ gtmPxyPid, atoi(aval(VAR_gtmProxyPorts)[idx]), aval(VAR_gtmProxyDirs)[idx]);
+ else
+ snprintf(newCommand(cmd), MAXLINE,
+ "killall -u %s -9 gtm; rm -rf /tmp/.s.'*'%d'*' %s/gtm_proxy.pid",
+ sval(VAR_pgxcUser), atoi(aval(VAR_gtmProxyPorts)[idx]), aval(VAR_gtmProxyDirs)[idx]);
+ return(cmd);
+}
+
+int kill_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+
+ elog(NOTICE, "Killing process of gtm proxy %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_killGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+int kill_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Killing all the gtm proxy processes.\n");
+ return(kill_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Reconnect to the current GTM master --------------------------------------------------
+ *
+ * When failed over, the current Master must have been updated.
+ * Remember to update gtm_proxy configuration file so that it
+ * connects to the new master at the next start.
+ * Please note that we assume GTM has already been failed over.
+ * First argument is gtm_proxy nodename
+ */
+cmd_t *prepare_reconnectGtmProxy(char *nodeName)
+{
+ cmd_t *cmdGtmCtl, *cmdGtmProxyConf;
+ int idx;
+ FILE *f;
+ char date[MAXTOKEN+1];
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy\n", nodeName);
+ return(NULL);
+ }
+
+ /* gtm_ctl reconnect */
+ cmdGtmCtl = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "gtm_ctl reconnect -Z gtm_proxy -D %s -o \\\"-s %s -t %s\\\"",
+ aval(VAR_gtmProxyDirs)[idx], sval(VAR_gtmMasterServer), sval(VAR_gtmMasterPort));
+
+ /* gtm_proxy.conf */
+ appendCmdEl(cmdGtmCtl, (cmdGtmProxyConf = initCmd(aval(VAR_gtmProxyServers)[idx])));
+ if ((f = prepareLocalStdin(newFilename(cmdGtmProxyConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmdGtmCtl);
+ return(NULL);
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# Updated due to GTM Proxy reconnect\n"
+ "# %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "#----End of reconfiguration -------------------------\n",
+ timeStampString(date, MAXTOKEN),
+ sval(VAR_gtmMasterServer),
+ sval(VAR_gtmMasterPort));
+ fclose(f);
+ snprintf(newCommand(cmdGtmProxyConf), MAXLINE,
+ "cat >> %s/gtm_proxy.conf", aval(VAR_gtmProxyDirs)[idx]);
+ return(cmdGtmCtl);
+}
+
+
+int reconnect_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "GTM Proxy is not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+
+ elog(NOTICE, "Reconnecting gtm proxy %s.\n", actualNodeList[ii]);
+ if((cmd = prepare_reconnectGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+int reconnect_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Reconnecting all the gtm proxies to the new one.\n");
+ return(reconnect_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Cleanup -- nodeName must be valid. Instead, NULL will bereturned.
+ */
+cmd_t *prepare_cleanGtmProxy(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ return NULL;
+ cmd = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s;rm -f /tmp/.s.*%d*",
+ aval(VAR_gtmProxyDirs)[idx], aval(VAR_gtmProxyDirs)[idx], aval(VAR_gtmProxyDirs)[idx],
+ atoi(aval(VAR_gtmProxyPorts)[idx]));
+ return cmd;
+}
+
+int clean_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int ii;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(NOTICE, "Clearing resources for gtm_proxy %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "%s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+int clean_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Clearing all the gtm_proxy resources.\n");
+ return(clean_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * configuration --------------------------------------------------------------------
+ */
+int show_config_gtmMaster(int flag, char *hostname)
+{
+ char lineBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ lineBuf[0] = 0;
+ if (flag)
+ strncat(lineBuf, "GTM Master: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(lineBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(lineBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (lineBuf[0])
+ elog(NOTICE, "%s", lineBuf);
+ print_simple_node_info(sval(VAR_gtmName), sval(VAR_gtmMasterPort), sval(VAR_gtmMasterDir),
+ sval(VAR_gtmExtraConfig), sval(VAR_gtmMasterSpecificExtraConfig));
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_gtmSlave(int flag, char *hostname)
+{
+ char lineBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ if (!isVarYes(VAR_gtmSlave) || is_none(VAR_gtmSlaveServer))
+ {
+ elog(ERROR, "ERROR: gtm slave is not configured.\n");
+ return 0;
+ }
+ lineBuf[0] = 0;
+ if (flag)
+ strncat(lineBuf, "GTM Slave: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(lineBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(lineBuf, "\n", MAXLINE);
+ lockLogFile();
+ elog(NOTICE, "%s", lineBuf);
+ print_simple_node_info(sval(VAR_gtmName), sval(VAR_gtmSlavePort), sval(VAR_gtmSlaveDir),
+ sval(VAR_gtmExtraConfig), sval(VAR_gtmSlaveSpecificExtraConfig));
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_gtmProxies(char **nameList)
+{
+ int ii;
+
+ lockLogFile();
+ for(ii = 0; nameList[ii]; ii++)
+ show_config_gtmProxy(TRUE, ii, aval(VAR_gtmProxyServers)[ii]);
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_gtmProxy(int flag, int idx, char *hostname)
+{
+ char lineBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ lineBuf[0] = 0;
+ if (flag)
+ strncat(lineBuf, "GTM Proxy: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(lineBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(lineBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (lineBuf[0])
+ elog(NOTICE, "%s", lineBuf);
+ print_simple_node_info(aval(VAR_gtmProxyNames)[idx], aval(VAR_gtmProxyPorts)[idx],
+ aval(VAR_gtmProxyDirs)[idx], sval(VAR_gtmPxyExtraConfig),
+ aval(VAR_gtmPxySpecificExtraConfig)[idx]);
+ unlockLogFile();
+ return 0;
+}
diff --git a/contrib/pgxc_ctl/gtm_cmd.h b/contrib/pgxc_ctl/gtm_cmd.h
new file mode 100644
index 0000000000..79d0f06290
--- /dev/null
+++ b/contrib/pgxc_ctl/gtm_cmd.h
@@ -0,0 +1,73 @@
+/*-------------------------------------------------------------------------
+ *
+ * gtm_cmd.h
+ *
+ * GTM command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef GTM_CMD_H
+#define GTM_CMD_H
+
+#include "gtm/gtm_c.h"
+#include "utils.h"
+
+extern int init_gtm_master(void);
+extern int init_gtm_slave(void);
+extern int init_gtm_proxy(char **nodeList);
+extern int init_gtm_proxy_all(void);
+extern cmd_t *prepare_initGtmMaster(void);
+extern cmd_t *prepare_initGtmSlave(void);
+extern cmd_t *prepare_initGtmProxy(char *nodeName);
+
+extern int add_gtmSlave(char *name, char *host, int port, char *dir);
+extern int add_gtmProxy(char *name, char *host, int port, char *dir);
+extern int remove_gtmSlave(bool clean_opt);
+extern int remove_gtmProxy(char *name, bool clean_opt);
+
+extern int kill_gtm_master(void);
+extern int kill_gtm_slave(void);
+extern int kill_gtm_proxy(char **nodeList);
+extern int kill_gtm_proxy_all(void);
+extern cmd_t *prepare_killGtmMaster(void);
+extern cmd_t *prepare_killGtmSlave(void);
+extern cmd_t *prepare_killGtmProxy(char *nodeName);
+
+extern int show_config_gtmMaster(int flag, char *hostname);
+extern int show_config_gtmSlave(int flag, char *hostname);
+extern int show_config_gtmProxy(int flag, int idx, char *hostname);
+extern int show_config_gtmProxies(char **nameList);
+
+extern int start_gtm_master(void);
+extern int start_gtm_slave(void);
+extern int start_gtm_proxy(char **nodeList);
+extern int start_gtm_proxy_all(void);
+extern cmd_t *prepare_startGtmMaster(void);
+extern cmd_t *prepare_startGtmSlave(void);
+extern cmd_t *prepare_startGtmProxy(char *nodeName);
+
+extern int stop_gtm_master(void);
+extern int stop_gtm_slave(void);
+extern int stop_gtm_proxy(char **nodeList);
+extern int stop_gtm_proxy_all(void);
+extern cmd_t *prepare_stopGtmMaster(void);
+extern cmd_t *prepare_stopGtmSlave(void);
+extern cmd_t *prepare_stopGtmProxy(char *nodeName);
+
+extern int failover_gtm(void);
+extern int reconnect_gtm_proxy(char **nodeList);
+extern int reconnect_gtm_proxy_all(void);
+extern cmd_t *prepare_reconnectGtmProxy(char *nodeName);
+
+extern int clean_gtm_master(void);
+extern int clean_gtm_slave(void);
+extern cmd_t *prepare_cleanGtmMaster(void);
+extern cmd_t *prepare_cleanGtmSlave(void);
+
+extern int clean_gtm_proxy(char **nodeList);
+extern int clean_gtm_proxy_all(void);
+extern cmd_t *prepare_cleanGtmProxy(char *nodeName);
+
+#endif /* GTM_CMD_H */
diff --git a/contrib/pgxc_ctl/gtm_util.c b/contrib/pgxc_ctl/gtm_util.c
new file mode 100644
index 0000000000..1abcecf980
--- /dev/null
+++ b/contrib/pgxc_ctl/gtm_util.c
@@ -0,0 +1,167 @@
+/*-------------------------------------------------------------------------
+ *
+ * gtm_util.c
+ *
+ * GTM utility module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This module was imported from Koichi's personal development.
+ *
+ * Provides unregistration of the nodes from gtm. This operation might be
+ * needed after some node crashes and its registration information remains
+ * in GTM.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+/*
+#include "gtm/gtm_c.h"
+*/
+#include "gtm/gtm_client.h"
+#include "gtm/libpq-fe.h"
+#include "utils.h"
+#include "variables.h"
+/* This is an ugly hack to avoid conflict between gtm_c.h and pgxc_ctl.h */
+#undef true
+#undef false
+#include "pgxc_ctl_log.h"
+#include "varnames.h"
+#include "config.h"
+#include "gtm_util.h"
+
+typedef enum command_t
+{
+ CMD_INVALID = 0,
+ CMD_UNREGISTER
+} command_t;
+
+static char *nodename = NULL;
+static char *myname = NULL;
+static GTM_PGXCNodeType nodetype = 0; /* Invalid */
+#define GetToken() (line = get_word(line, &token))
+#define testToken(word) ((token != NULL) && (strcmp(token, word) == 0))
+#define TestToken(word) ((token != NULL) && (strcasecmp(token, word) == 0))
+
+static int inputError(char *msg)
+{
+ elog(ERROR, "%s\n", msg);
+ return -1;
+}
+
+int unregisterFromGtm(char *line)
+{
+ char *token;
+ int rc;
+
+ for(;GetToken();)
+ {
+ if (testToken("-n"))
+ {
+ if (!GetToken())
+ return(inputError("No -n option value was found."));
+ Free(myname);
+ myname = Strdup(token);
+ continue;
+ }
+ else if (testToken("-Z"))
+ {
+ if (!GetToken())
+ return(inputError("No -Z option value was found."));
+ if (testToken("gtm"))
+ {
+ nodetype = GTM_NODE_GTM;
+ continue;
+ }
+ else if (testToken("gtm_proxy"))
+ {
+ nodetype = GTM_NODE_GTM_PROXY;
+ break;
+ }
+ else if (testToken("gtm_proxy_postmaster"))
+ {
+ nodetype = GTM_NODE_GTM_PROXY_POSTMASTER;
+ break;
+ }
+ else if (testToken("coordinator"))
+ {
+ nodetype = GTM_NODE_COORDINATOR;
+ break;
+ }
+ else if (testToken("datanode"))
+ {
+ nodetype = GTM_NODE_DATANODE;
+ break;
+ }
+ else
+ {
+ elog(ERROR, "ERROR: Invalid -Z option value, %s\n", token);
+ return(-1);
+ }
+ continue;
+ }
+ else
+ break;
+ }
+ if (nodetype == 0)
+ {
+ elog(ERROR, "ERROR: no node type was specified.\n");
+ return(-1);
+ }
+
+ if (myname == NULL)
+ myname = Strdup(DefaultName);
+
+ if (!token)
+ {
+ fprintf(stderr,"%s: No command specified.\n", progname);
+ exit(2);
+ }
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: unregister: no node name was found to unregister.\n");
+ return(-1);
+ }
+ nodename = Strdup(token);
+ rc = process_unregister_command(nodetype, nodename);
+ Free(nodename);
+ return(rc);
+}
+
+static GTM_Conn *connectGTM()
+{
+ char connect_str[MAXLINE+1];
+
+ snprintf(connect_str, MAXLINE, "host=%s port=%d node_name=%s remote_type=%d postmaster=0",
+ sval(VAR_gtmMasterServer), atoi(sval(VAR_gtmMasterPort)), (myname == NULL) ? DefaultName : myname, GTM_NODE_COORDINATOR);
+ return(PQconnectGTM(connect_str));
+}
+
+int process_unregister_command(GTM_PGXCNodeType type, char *nodename)
+{
+ GTM_Conn *conn;
+ int res;
+
+ conn = connectGTM();
+ if (conn == NULL)
+ {
+ elog(ERROR, "ERROR: failed to connect to GTM\n");
+ return -1;
+ }
+ res = node_unregister(conn, type, nodename);
+ if (res == GTM_RESULT_OK){
+ elog(NOTICE, "unregister %s from GTM.\n", nodename);
+ GTMPQfinish(conn);
+ return 0;
+ }
+ else
+ {
+ elog(ERROR, "ERROR: Failed to unregister %s from GTM.\n", nodename);
+ GTMPQfinish(conn);
+ return res;
+ }
+}
diff --git a/contrib/pgxc_ctl/gtm_util.h b/contrib/pgxc_ctl/gtm_util.h
new file mode 100644
index 0000000000..c4e209977e
--- /dev/null
+++ b/contrib/pgxc_ctl/gtm_util.h
@@ -0,0 +1,23 @@
+/*-------------------------------------------------------------------------
+ *
+ * gtm_util.h
+ *
+ * GTM utility module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef GTM_UTIL_H
+#define GTM_UTIL_H
+
+#include "gtm/gtm_client.h"
+#include "gtm/libpq-fe.h"
+
+extern int unregisterFromGtm(char *line);
+extern int process_unregister_command(GTM_PGXCNodeType type, char *nodename);
+#define unregister_gtm_proxy(name) do{process_unregister_command(GTM_NODE_GTM_PROXY, name);}while(0)
+#define unregister_coordinator(name) do{process_unregister_command(GTM_NODE_COORDINATOR, name);}while(0)
+#define unregister_datanode(name) do{process_unregister_command(GTM_NODE_DATANODE, name);}while(0)
+
+#endif /* GTM_UTIL_H */
diff --git a/contrib/pgxc_ctl/make_signature b/contrib/pgxc_ctl/make_signature
new file mode 100755
index 0000000000..aef05b83cc
--- /dev/null
+++ b/contrib/pgxc_ctl/make_signature
@@ -0,0 +1,136 @@
+#!/bin/bash
+#--------------------------------------------------------------------
+#
+# make_signature
+#
+# Bash script building module of pgxc_ctl.
+#
+# Copyright (c) 2012 Postgres-XC Development Group
+#
+#---------------------------------------------------------------------
+#
+# This module is used to create signature.h and pgxc_ctl_bash.c files.
+#
+# pgxc_ctl_bash.c files contains two information,
+# 1. Bash script to read pgxc_ctl configuration information and write
+# it back to pgxc_ctl. This way, users can use their familiar bash
+# script to configure postgres-xc cluster.
+# This includes typical (test) configuration so that pgxc_ctl
+# can run even with incomplete configuration.
+# 2. Template postgres-xc cluster configuration used by pgxc_ctl.
+# You can get this template by typing "prepare configu" command.
+#
+# signature.h contains signature information which is useful in
+# checking the bash script and pgxc_ctl binary build.
+#
+# At present, the bash script is installed each time pgxc_ctl is invoked
+# and uninstalled, this has no significant role. In the future,
+# when we need to maintain this bash script, it will work to enforce
+# the integrity between the two.
+#------------------------------------------------------------------------
+
+sig=`date +%y%m%d_%H%M_%N`
+cat > signature.h <<EOF
+/*-------------------------------------------------------------------------
+ *
+ * signature.h
+ *
+ * Signature of module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef SIGNATURE_H
+#define SIGNATURE_H
+/* Signature file to identify the make */
+EOF
+echo '#'define signature \"$sig\" >> signature.h
+cat >> signature.h <<EOF
+#endif /* SIGNATURE_H */
+EOF
+
+
+
+cp pgxc_ctl_bash_2 pgxc_ctl_bash.c.wk
+ex pgxc_ctl_bash.c.wk <<EOF
+%s/"/\\\"/g
+w
+%s/^\(.*\)$/"\1",/
+wq
+EOF
+
+cat > pgxc_ctl_bash.c <<EOF
+/*
+ *-----------------------------------------------------------------------
+ *
+ * pgxc_ctl_bash.c
+ *
+ * Bash script body for Postrgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *------------------------------------------------------------------------
+ *
+ * This file was created by make_signature utility when pgxc_ctl was built.
+ *
+ * pgxc_ctl uses this bash script to configure postgres-xc and read
+ * configuration.
+ *
+ * This provides users very flexible way to configure their own
+ * postgres-xc cluster. For example, by using extra variables and script,
+ * you can save most of your specific hours typing same (or similar)
+ * variable values again and again.
+ */
+
+#include <stddef.h>
+
+/*
+ * Bash script to read pgxc_ctl configuration parameters and write
+ * back to itself.
+ *
+ * This part is written to pgxc_ctl work directory and reads
+ * configuration file, which is also written in bash script.
+ */
+
+char *pgxc_ctl_bash_script[] = {
+EOF
+
+cat pgxc_ctl_bash.c.wk >> pgxc_ctl_bash.c
+
+cat >> pgxc_ctl_bash.c <<EOF
+NULL
+};
+
+EOF
+
+rm pgxc_ctl_bash.c.wk
+
+cp pgxc_ctl_conf_part pgxc_ctl_conf_part.wk
+
+ex pgxc_ctl_conf_part.wk <<EOF
+%s/"/\\\"/g
+w
+%s/^\(.*\)$/"\1",/
+wq
+EOF
+
+cat >> pgxc_ctl_bash.c <<EOF
+/*
+ * Prototype of pgxc_ctl configuration file.
+ *
+ * It should be self descripting. Can be extracted to your pgxc_ctl
+ * work directory with 'prepare config' command.
+ */
+
+char *pgxc_ctl_conf_prototype[] = {
+EOF
+
+cat pgxc_ctl_conf_part.wk >> pgxc_ctl_bash.c
+
+cat >> pgxc_ctl_bash.c <<EOF
+NULL
+};
+EOF
+
+rm pgxc_ctl_conf_part.wk
diff --git a/contrib/pgxc_ctl/mcxt.c b/contrib/pgxc_ctl/mcxt.c
new file mode 100644
index 0000000000..fcb31f8208
--- /dev/null
+++ b/contrib/pgxc_ctl/mcxt.c
@@ -0,0 +1,77 @@
+/*----------------------------------------------------------------------------------
+ *
+ * mxct.c
+ * Postgres-XC memory context management code for applications.
+ *
+ * This module is for Postgres-XC application/utility programs. Sometimes,
+ * applications/utilities may need Postgres-XC internal functions which
+ * depends upon mcxt.c of gtm or Postgres.
+ *
+ * This module "virtualize" such module-dependent memory management.
+ *
+ * This code is for general use, which depends only upon confentional
+ * memory management functions.
+ *
+ * Copyright (c) 2013, Postgres-XC Development Group
+ *
+ *---------------------------------------------------------------------------------
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include "gen_alloc.h"
+
+static void *current_cxt;
+
+static void *memCxtAlloc(void *, size_t);
+static void *memCxtRealloc(void *, size_t);
+static void *memCxtAlloc0(void *, size_t);
+static void memCxtFree(void *);
+static void *memCxtAllocTop(size_t);
+static void *memCxtCurrentContext(void);
+
+
+static void *memCxtAlloc(void* current, size_t needed)
+{
+ return(malloc(needed));
+}
+
+static void *memCxtRealloc(void *addr, size_t needed)
+{
+ return(realloc(addr, needed));
+}
+
+static void *memCxtAlloc0(void *current, size_t needed)
+{
+ void *allocated;
+
+ allocated = malloc(needed);
+ if (allocated == NULL)
+ return(NULL);
+ memset(allocated, 0, needed);
+ return(allocated);
+}
+
+static void memCxtFree(void *addr)
+{
+ free(addr);
+ return;
+}
+
+static void *memCxtCurrentContext()
+{
+ return((void *)&current_cxt);
+}
+
+static void *memCxtAllocTop(size_t needed)
+{
+ return(malloc(needed));
+}
+
+
+Gen_Alloc genAlloc_class = {(void *)memCxtAlloc,
+ (void *)memCxtAlloc0,
+ (void *)memCxtRealloc,
+ (void *)memCxtFree,
+ (void *)memCxtCurrentContext,
+ (void *)memCxtAllocTop};
diff --git a/contrib/pgxc_ctl/monitor.c b/contrib/pgxc_ctl/monitor.c
new file mode 100644
index 0000000000..2323bd00a1
--- /dev/null
+++ b/contrib/pgxc_ctl/monitor.c
@@ -0,0 +1,469 @@
+/*-------------------------------------------------------------------------
+ *
+ * monitor.c
+ *
+ * Monitoring module of Postgres-XC configuration and operation tool.
+ *
+ * Portions Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This module is imported from /contrib/pgxc_monitor, to provide monitoring
+ * feature of each pgstgres-xc components.
+ */
+#include "gtm/gtm_client.h"
+#include "gtm/libpq-fe.h"
+
+#include <stdlib.h>
+#include <getopt.h>
+#include "utils.h"
+#include "variables.h"
+/* This is an ugly hack to avoid conflict between gtm_c.h and pgxc_ctl.h */
+#undef true
+#undef false
+#include "pgxc_ctl_log.h"
+#include "varnames.h"
+#include "config.h"
+#include "monitor.h"
+
+/* Define all the node types */
+typedef enum
+{
+ NONE = 0,
+ GTM, /* GTM or GTM-proxy */
+ NODE /* Coordinator or Datanode */
+} nodetype_t;
+
+#define GetToken() (line = get_word(line, &token))
+#define testToken(word) ((token != NULL) && (strcmp(token, word) == 0))
+#define TestToken(word) ((token != NULL) && (strcasecmp(token, word) == 0))
+
+static void printResult(int res, char *what, char *name)
+{
+ if (res == 0)
+ {
+ if (name)
+ elog(NOTICE, "Running: %s %s\n", what, name);
+ else
+ elog(NOTICE, "Running: %s\n", what);
+ }
+ else
+ {
+ if (name)
+ elog(NOTICE, "Not running: %s %s\n", what, name);
+ else
+ elog(NOTICE, "Not running: %s\n", what);
+ }
+}
+
+static void monitor_gtm_master(void)
+{
+ return(printResult(do_gtm_ping(sval(VAR_gtmMasterServer), atoi(sval(VAR_gtmMasterPort))), "gtm master", NULL));
+}
+
+static void monitor_gtm_slave(void)
+{
+ if (doesExist(VAR_gtmSlaveServer, 0) && doesExist(VAR_gtmSlavePort, 0))
+ return(printResult(do_gtm_ping(sval(VAR_gtmSlaveServer), atoi(sval(VAR_gtmSlavePort))), "gtm slave", NULL));
+}
+
+static void monitor_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = gtmProxyIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(do_gtm_ping(aval(VAR_gtmProxyServers)[idx], atoi(aval(VAR_gtmProxyPorts)[idx])),
+ "gtm proxy", actualNodeList[ii]);
+ }
+}
+
+
+static void monitor_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]),
+ "coordinator master", actualNodeList[ii]);
+ }
+}
+
+static void monitor_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: coordinator slave is not configured.\n");
+ return;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator\n", actualNodeList[ii]);
+ continue;
+ }
+ /* Need to check again if the slave is configured */
+ if (!doesExist(VAR_coordSlaveServers, idx) || is_none(aval(VAR_coordSlaveServers)[idx]))
+ elog(ERROR, "ERROR: coordinator slave %s is not configured\n", actualNodeList[ii]);
+ else
+ printResult(pingNode(aval(VAR_coordSlaveServers)[idx], aval(VAR_coordPorts)[idx]),
+ "coordinator slave", actualNodeList[ii]);
+ }
+}
+
+static void monitor_coordinator(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]),
+ "coordinator master", actualNodeList[ii]);
+ if (doesExist(VAR_coordSlaveServers, idx) && !is_none(aval(VAR_coordSlaveServers)[idx]))
+ printResult(pingNode(aval(VAR_coordSlaveServers)[idx], aval(VAR_coordPorts)[idx]),
+ "coordinatr slave", actualNodeList[ii]);
+ }
+}
+static void monitor_datanode_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]),
+ "datanode master", actualNodeList[ii]);
+ }
+}
+
+static void monitor_datanode_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", actualNodeList[ii]);
+ continue;
+ }
+ if (doesExist(VAR_datanodeSlaveServers, idx) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ printResult(pingNode(aval(VAR_datanodeSlaveServers)[idx], aval(VAR_datanodePorts)[idx]),
+ "datanode slave", actualNodeList[ii]);
+ else
+ elog(ERROR, "ERROR: datanode slave %s is not configured.\n", actualNodeList[ii]);
+ }
+}
+
+static void monitor_datanode(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]),
+ "datanode master", actualNodeList[ii]);
+ if (doesExist(VAR_datanodeSlaveServers, idx) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ printResult(pingNode(aval(VAR_datanodeSlaveServers)[idx], aval(VAR_datanodePorts)[idx]),
+ "datanode slave", actualNodeList[ii]);
+ }
+}
+
+static void monitor_something(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ char *wkNodeList[2];
+ NodeType type;
+
+ wkNodeList[1] = NULL;
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((type = getNodeType(actualNodeList[ii])) == NodeType_GTM)
+ {
+ monitor_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ monitor_gtm_slave();
+ continue;
+ }
+ else if (type == NodeType_GTM_PROXY)
+ {
+ wkNodeList[0] = actualNodeList[ii];
+ monitor_gtm_proxy(wkNodeList);
+ continue;
+ }
+ else if (type == NodeType_COORDINATOR)
+ {
+ wkNodeList[0] = actualNodeList[ii];
+ monitor_coordinator(wkNodeList);
+ continue;
+ }
+ else if (type == NodeType_DATANODE)
+ {
+ wkNodeList[0] = actualNodeList[ii];
+ monitor_datanode(wkNodeList);
+ continue;
+ }
+ else
+ {
+ elog(ERROR, "ERROR: %s is not found in any node.\n", actualNodeList[ii]);
+ continue;
+ }
+ }
+}
+
+
+
+void do_monitor_command(char *line)
+{
+ char *token;
+ int rc = 0;
+
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: no monitor command options found.\n");
+ return;
+ }
+ if (TestToken("gtm"))
+ {
+ if (!GetToken() || TestToken("all"))
+ {
+ /* Ping GTM */
+ monitor_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ monitor_gtm_slave();
+ }
+ else if (TestToken("master"))
+ monitor_gtm_master();
+ else if (TestToken("slave"))
+ {
+ if (isVarYes(VAR_gtmSlave))
+ monitor_gtm_slave();
+ else
+ elog(ERROR, "ERROR: gtm slave is not configured.\n"), rc=-1;
+ }
+ else
+ elog(ERROR, "Invalid monitor gtm command option.\n"), rc=-1;
+ return;
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ if (!GetToken() || TestToken("all"))
+ monitor_gtm_proxy(aval(VAR_gtmProxyNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_gtm_proxy(nodeList);
+ CleanArray(nodeList);
+ }
+ return;
+ }
+ else if (TestToken("coordinator"))
+ {
+ if (!GetToken() || TestToken("all"))
+ {
+ monitor_coordinator_master(aval(VAR_coordNames));
+ if (isVarYes(VAR_coordSlave))
+ monitor_coordinator_slave(aval(VAR_coordNames));
+ return;
+ }
+ else if (TestToken("master"))
+ {
+ if (!GetToken() || TestToken("all"))
+ monitor_coordinator_master(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_coordinator_master(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if (!isVarYes(VAR_coordSlave))
+ elog(ERROR, "ERROR: coordinator slave is not configured.\n"), rc = -1;
+ else
+ if (!GetToken() || TestToken("all"))
+ monitor_coordinator_slave(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_coordinator_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList= NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ monitor_coordinator(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!GetToken() || TestToken("all"))
+ {
+ monitor_datanode_master(aval(VAR_datanodeNames));
+ if (isVarYes(VAR_coordSlave))
+ monitor_datanode_slave(aval(VAR_datanodeNames));
+ }
+ else if (TestToken("master"))
+ {
+ if (!GetToken() || TestToken("all"))
+ monitor_datanode_master(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_datanode_master(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if (!isVarYes(VAR_coordSlave))
+ elog(ERROR, "ERROR: datanode slave is not configured.\n"), rc = -1;
+ else
+ if (!GetToken() || TestToken("all"))
+ monitor_datanode_slave(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_datanode_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList= NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ monitor_datanode(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("all"))
+ {
+ monitor_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ monitor_gtm_slave();
+ if (isVarYes(VAR_gtmProxy))
+ monitor_gtm_proxy(aval(VAR_gtmProxyNames));
+ monitor_coordinator(aval(VAR_coordNames));
+ monitor_datanode(aval(VAR_datanodeNames));
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_something(nodeList);
+ CleanArray(nodeList);
+ }
+ return;
+}
+
+/*
+ * Ping a given GTM or GTM-proxy
+ */
+int
+do_gtm_ping(char *host, int port)
+{
+ char connect_str[MAXPATH+1];
+ GTM_Conn *conn;
+
+ if (host == NULL)
+ {
+ elog(ERROR, "ERROR: no hostname is specified.\n");
+ return -1;
+ }
+ if (port <= 0)
+ {
+ elog(ERROR, "ERROR: Invalid port number, %d.\n", port);
+ return -1;
+ }
+ sprintf(connect_str, "host=%s port=%d node_name=%s remote_type=%d postmaster=0",
+ host, port, myName, GTM_NODE_COORDINATOR);
+ if ((conn = PQconnectGTM(connect_str)) == NULL)
+ {
+ elog(DEBUG3, "DEBUG3: Could not connect to %s, %d\n", host, port);
+ return -1;
+ }
+ GTMPQfinish(conn);
+ return 0;
+}
diff --git a/contrib/pgxc_ctl/monitor.h b/contrib/pgxc_ctl/monitor.h
new file mode 100644
index 0000000000..f78a864624
--- /dev/null
+++ b/contrib/pgxc_ctl/monitor.h
@@ -0,0 +1,18 @@
+/*-------------------------------------------------------------------------
+ *
+ * monitor.h
+ *
+ * Monitoring module of Postgres-XC configuration and operation tool.
+ *
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef MONITOR_H
+#define MONITOR_H
+
+extern void do_monitor_command(char *line);
+extern int do_gtm_ping(char *host, int port);
+
+#endif /* MONITOR_H */
diff --git a/contrib/pgxc_ctl/pgxc_ctl.bash b/contrib/pgxc_ctl/pgxc_ctl.bash
new file mode 100755
index 0000000000..6cf5b95d85
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl.bash
@@ -0,0 +1,5361 @@
+#!/bin/bash
+#
+# TODO
+# * Backup configuration file (at update, too) to some remote site for pgxc_ctl HA feature too.
+# * Write output of *_ctl, intdb and intgtm result to log files
+# * Write every operation to the log. Log file can be specified with --l file or --log file
+# * Configure log level
+# * Switch log log file
+# * Log option to the configuration file so that this can be failed over.
+# * Log to a remote server?
+# * Multiple log?
+#
+# Configuration file. Configuration file can be specified as -c option of
+# the command like, or PGXCCONFIG environment variable. If both are
+# not specified, the following configuration file will be used.
+#
+# Change in the cluster status due to failover will be added to the configuration file so that
+# new master can be invoked as the master when restarted.
+#
+# All such addition will be tagged with proper comment and date/time info. If you'd like to
+# cancel such changes, you can remove or comment-out such additional lines.
+#
+#
+#==========================================================================================================================
+#
+# Configuration Section
+#
+# This section should be in the file $configFile for
+# user's configuration.
+#
+# Several assumptons:
+# 1) configuration file will be set to data directory.
+# configuration file name is fixed to postgresql.conf
+# 2) pg_hba.conf will be set to data directory. File name is
+# fixed to pg_hba.conf
+#
+#================================================================
+# MEMO
+#
+# max_connections, min_pool_size, max_pool_size --> should be configurable!
+# They're not cluster specific. So we may give a chance to include
+# these specific options to be included from external files.
+# They should not change by failover so they just have to be
+# configured at first time only.
+#===============================================================
+#
+#---- Configuration File
+pgxcInstallDir=$HOME/pgxc
+configFile=$pgxcInstallDir/pgxcConf
+#---- OVERALL -----------------------------------------------------------------------------------------------------------
+#
+pgxcOwner=koichi # owner of the Postgres-XC database cluster. Here, we use this
+ # both as linus user and database user. This must be
+ # the super user of each coordinator and datanode.
+pgxcUser=$pgxcOwner # OS user of Postgres-XC owner
+
+
+tmpDir=/tmp # temporary dir used in XC servers
+localTmpDir=$tmpDir # temporary dir used here locally
+
+logOpt=y # If you want log
+logDir=$pgxcInstallDir/pgxc_ctl_log # Directory to write pgxc_ctl logs
+
+configBackup=y # If you want config file backup
+configBackupHost=pgxc-linker # host to backup config file
+configBackupDir=$pgxcInstallDir
+configBackupFile=$configFile # Backup file name
+
+#---- GTM --------------------------------------------------------------------------------------------------------------
+
+# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.
+# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update
+# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command
+# will not stop the current GTM. It is up to the operator.
+
+#---- Overall -------
+gtmName=gtm
+
+#---- GTM Master -----------------------------------------------
+
+#---- Overall ----
+gtmMasterServer=node13
+gtmMasterPort=20001
+gtmMasterDir=$HOME/pgxc/nodes/gtm
+
+#---- Configuration ---
+gtmExtraConfig=none # Will be added gtm.conf for both Master and Slave (done at initilization only)
+gtmMasterSpecificExtraConfig=none # Will be added to Master's gtm.conf (done at initialization only)
+
+#---- GTM Slave -----------------------------------------------
+
+# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave
+# for backup.
+
+#---- Overall ------
+gtmSlave=y # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and
+ # all the following variables will be reset.
+gtmSlaveServer=node12 # value none means GTM slave is not available. Give none if you don't configure GTM Slave.
+gtmSlavePort=20001 # Not used if you don't configure GTM slave.
+gtmSlaveDir=$HOME/pgxc/nodes/gtm # Not used if you don't configure GTM slave.
+# Please note that when you have GTM failover, then there will be no slave available until you configure the slave
+# again. (pgxc_add_gtm_slave function will handle it)
+
+#---- Configuration ----
+gtmSlaveSpecificExtraConfig=none # Will be added to Slave's gtm.conf (done at initialization only)
+
+#---- GTM Proxy -------------------------------------------------------------------------------------------------------
+# GTM proxy will be selected based upon which server each component runs on.
+# When fails over to the slave, the slave inherits its master's gtm proxy. It should be
+# reconfigured based upon the new location.
+#
+# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart
+#
+# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects
+# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.
+
+#---- Shortcuts ------
+gtmProxyDir=$HOME/pgxc/nodes/gtm_pxy
+
+#---- Overall -------
+gtmProxy=y # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies
+ # only when you dont' configure GTM slaves.
+ # If you specify this value not to y, the following parameters will be set to default empty values.
+ # If we find there're no valid Proxy server names (means, every servers are specified
+ # as none), then gtmProxy value will be set to "n" and all the entries will be set to
+ # empty values.
+gtmProxyNames=(gtm_pxy1 gtm_pxy2 gtm_pxy3 gtm_pxy4) # No used if it is not configured
+gtmProxyServers=(node06 node07 node08 node09) # Specify none if you dont' configure it.
+gtmProxyPorts=(20001 20001 20001 20001) # Not used if it is not configured.
+gtmProxyDirs=($gtmProxyDir $gtmProxyDir $gtmProxyDir $gtmProxyDir) # Not used if it is not configured.
+
+#---- Configuration ----
+gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy
+gtmPxySpecificExtraConfig=(none none none none)
+
+#---- Coordinators ----------------------------------------------------------------------------------------------------
+
+#---- shortcuts ----------
+coordMasterDir=$HOME/pgxc/nodes/coord
+coordSlaveDir=$HOME/pgxc/nodes/coord_slave
+coordArchLogDir=$HOME/pgxc/nodes/coord_archlog
+
+#---- Overall ------------
+coordNames=(coord1 coord2 coord3 coord4) # Master and slave use the same name
+coordPorts=(20004 20005 20004 20005) # Master and slave use the same port
+poolerPorts=(20010 20011 20010 20011) # Master and slave use the same pooler port
+coordPgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This entry allows only $pgxcOwner to connect.
+ # If you'd like to setup another connection, you should
+ # supply these entries through files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using coordExtraPgHba
+# and/or coordSpecificExtraPgHba variables.
+
+#---- Master -------------
+coordMasterServers=(node06 node07 node08 node09) # none means this master is not available
+coordMasterDirs=($coordMasterDir $coordMasterDir $coordMasterDir $coordMasterDir)
+coordMaxWALsernder=5 # max_wal_senders: needed to configure slave. If zero value is specified,
+ # it is expected to supply this parameter explicitly by external files
+ # specified in the following. If you don't configure slaves, leave this value to zero.
+coordMaxWALSenders=($coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder)
+ # max_wal_senders configuration for each coordinator.
+
+#---- Slave -------------
+coordSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then coordSlave value will be set to n and all the following values will be set to
+ # empty values.
+coordSlaveServers=(node07 node08 node09 node06) # none means this slave is not available
+coordSlaveDirs=($coordSlaveDir $coordSlaveDir $coordSlaveDir $coordSlaveDir)
+coordArchLogDirs=($coordArchLogDir $coordArchLogDir $coordArchLogDir $coordArchLogDir)
+
+#---- Configuration files---
+# Need these when you'd like setup specific non-default configuration
+# These files will go to corresponding files for the master.
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries
+# Or you may supply these files manually.
+coordExtraConfig=none # Extra configuration file for coordinators. This file will be added to all the coordinators'
+ # postgresql.conf
+coordSpecificExraConfig=(none none none none)
+coordExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the coordinators' pg_hba.conf
+coordSpecificExtraPgHba=(none none none none)
+
+#---- Datanodes -------------------------------------------------------------------------------------------------------
+
+#---- Shortcuts --------------
+datanodeMasterDir=$HOME/pgxc/nodes/dn_master
+datanodeSlaveDir=$HOME/pgxc/nodes/dn_slave
+datanodeArchLogDir=$HOME/pgxc/nodes/datanode_archlog
+
+#---- Overall ---------------
+#primaryDatanode=datanode1 # Primary Node.
+# At present, xc has a priblem to issue ALTER NODE against the primay node. Until it is fixed, the test will be done
+# without this feature.
+primaryDatanode=datanode1 # Primary Node.
+datanodeNames=(datanode1 datanode2 datanode3 datanode4)
+datanodePorts=(20008 20009 20008 20009) # Master and slave use the same port!
+datanodePoolerPorts=(20011 20012 20011 20012) # Master and slave use the same port!
+datanodePgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This list sets up pg_hba.conf for $pgxcOwner user.
+ # If you'd like to setup other entries, supply them
+ # through extra configuration files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using datanodeExtraPgHba
+# and/or datanodeSpecificExtraPgHba variables.
+
+#---- Master ----------------
+datanodeMasterServers=(node06 node07 node08 node09) # none means this master is not available.
+ # This means that there should be the master but is down.
+ # The cluster is not operational until the master is
+ # recovered and ready to run.
+datanodeMasterDirs=($datanodeMasterDir $datanodeMasterDir $datanodeMasterDir $datanodeMasterDir)
+datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is
+ # specified, it is expected this parameter is explicitly supplied
+ # by external configuration files.
+ # If you don't configure slaves, leave this value zero.
+datanodeMaxWalSenders=($datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender)
+ # max_wal_senders configuration for each datanode
+
+#---- Slave -----------------
+datanodeSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then datanodeSlave value will be set to n and all the following values will be set to
+ # empty values.
+datanodeSlaveServers=(node07 node08 node09 node06) # value none means this slave is not available
+datanodeSlaveDirs=($datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir)
+datanodeArchLogDirs=( $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir )
+
+# ---- Configuration files ---
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.
+# These files will go to corresponding files for the master.
+# Or you may supply these files manually.
+datanodeExtraConfig=none # Extra configuration file for datanodes. This file will be added to all the
+ # datanodes' postgresql.conf
+datanodeSpecificExtraConfig=(none none none none)
+datanodeExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the datanodes' postgresql.conf
+datanodeSpecificExtraPgHba=(none none none none)
+
+#
+# End of Configuration Section
+#
+#==========================================================================================================================
+
+# Common variables ######################################################################
+xc_prompt='PGXC$ '
+interactive=n
+verbose=n
+progname=$0
+
+# Create config file template
+#
+# If you change the structure of configuration file section, you must reflect the change in the part below.
+#
+function create_config_file_template
+{
+ cat > $configFile <<EOF
+#!/bin/bash
+#==========================================================================================================================
+#
+# Configuration Section
+#
+# This section should be in the file $configFile for
+# user's configuration.
+#
+# Several assumptons:
+# 1) configuration file will be set to data directory.
+# configuration file name is fixed to postgresql.conf
+# 2) pg_hba.conf will be set to data directory. File name is
+# fixed to pg_hba.conf
+#
+#================================================================
+# MEMO
+#
+# max_connections, min_pool_size, max_pool_size --> should be configurable!
+# They're not cluster specific. So we may give a chance to include
+# these specific options to be included from external files.
+# They should not change by failover so they just have to be
+# configured at first time only.
+#===============================================================
+#
+#---- Configuration File
+#
+# If you're using non-default config file, you should specify config file each time you invoke pgxc_ctl
+pgxcInstallDir=$HOME/pgxc
+configFile=$pgxcInstallDir/pgxcConf
+#---- OVERALL -----------------------------------------------------------------------------------------------------------
+#
+pgxcOwner=koichi # owner of the Postgres-XC database cluster. Here, we use this
+ # both as linus user and database user. This must be
+ # the super user of each coordinator and datanode.
+pgxcUser=$pgxcOwner # OS user of Postgres-XC owner
+
+
+tmpDir=/tmp # temporary dir used in XC servers
+localTmpDir=$tmpDir # temporary dir used here locally
+
+logOpt=y # If you want log
+logDir=$pgxcInstallDir/pgxc_ctl_log # Directory to write pgxc_ctl logs
+
+configBackup=y # If you want config file backup
+configBackupHost=pgxc-linker # host to backup config file
+configBackupFile=$configFile # Backup file name
+
+#---- GTM --------------------------------------------------------------------------------------------------------------
+
+# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.
+# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update
+# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command
+# will not stop the current GTM. It is up to the operator.
+
+#---- Overall -------
+gtmName=gtm
+
+#---- GTM Master -----------------------------------------------
+
+#---- Overall ----
+gtmMasterServer=node13
+gtmMasterPort=20001
+gtmMasterDir=$HOME/pgxc/nodes/gtm
+
+#---- Configuration ---
+gtmExtraConfig=none # Will be added gtm.conf for both Master and Slave (done at initilization only)
+gtmMasterSpecificExtraConfig=none # Will be added to Master's gtm.conf (done at initialization only)
+
+#---- GTM Slave -----------------------------------------------
+
+# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave
+# for backup.
+
+#---- Overall ------
+gtmSlave=y # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and
+ # all the following variables will be reset.
+gtmSlaveServer=node12 # value none means GTM slave is not available. Give none if you don't configure GTM Slave.
+gtmSlavePort=20001 # Not used if you don't configure GTM slave.
+gtmSlaveDir=$HOME/pgxc/nodes/gtm # Not used if you don't configure GTM slave.
+# Please note that when you have GTM failover, then there will be no slave available until you configure the slave
+# again. (pgxc_add_gtm_slave function will handle it)
+
+#---- Configuration ----
+gtmSlaveSpecificExtraConfig=none # Will be added to Slave's gtm.conf (done at initialization only)
+
+#---- GTM Proxy -------------------------------------------------------------------------------------------------------
+# GTM proxy will be selected based upon which server each component runs on.
+# When fails over to the slave, the slave inherits its master's gtm proxy. It should be
+# reconfigured based upon the new location.
+#
+# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart
+#
+# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects
+# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.
+
+#---- Shortcuts ------
+gtmProxyDir=$HOME/pgxc/nodes/gtm_pxy
+
+#---- Overall -------
+gtmProxy=y # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies
+ # only when you dont' configure GTM slaves.
+ # If you specify this value not to y, the following parameters will be set to default empty values.
+ # If we find there're no valid Proxy server names (means, every servers are specified
+ # as none), then gtmProxy value will be set to "n" and all the entries will be set to
+ # empty values.
+gtmProxyNames=(gtm_pxy1 gtm_pxy2 gtm_pxy3 gtm_pxy4) # No used if it is not configured
+gtmProxyServers=(node06 node07 node08 node09) # Specify none if you dont' configure it.
+gtmProxyPorts=(20001 20001 20001 20001) # Not used if it is not configured.
+gtmProxyDirs=($gtmProxyDir $gtmProxyDir $gtmProxyDir $gtmProxyDir) # Not used if it is not configured.
+
+#---- Configuration ----
+gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy
+gtmPxySpecificExtraConfig=(none none none none)
+
+#---- Coordinators ----------------------------------------------------------------------------------------------------
+
+#---- shortcuts ----------
+coordMasterDir=$HOME/pgxc/nodes/coord
+coordSlaveDir=$HOME/pgxc/nodes/coord_slave
+coordArchLogDir=$HOME/pgxc/nodes/coord_archlog
+
+#---- Overall ------------
+coordNames=(coord1 coord2 coord3 coord4) # Master and slave use the same name
+coordPorts=(20004 20005 20004 20005) # Master and slave use the same port
+poolerPorts=(20010 20011 20010 20011) # Master and slave use the same pooler port
+coordPgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This entry allows only $pgxcOwner to connect.
+ # If you'd like to setup another connection, you should
+ # supply these entries through files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using coordExtraPgHba
+# and/or coordSpecificExtraPgHba variables.
+
+#---- Master -------------
+coordMasterServers=(node06 node07 node08 node09) # none means this master is not available
+coordMasterDirs=($coordMasterDir $coordMasterDir $coordMasterDir $coordMasterDir)
+coordMaxWALsernder=5 # max_wal_senders: needed to configure slave. If zero value is specified,
+ # it is expected to supply this parameter explicitly by external files
+ # specified in the following. If you don't configure slaves, leave this value to zero.
+coordMaxWALSenders=($coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder)
+ # max_wal_senders configuration for each coordinator.
+
+#---- Slave -------------
+coordSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then coordSlave value will be set to n and all the following values will be set to
+ # empty values.
+coordSlaveServers=(node07 node08 node09 node06) # none means this slave is not available
+coordSlaveDirs=($coordSlaveDir $coordSlaveDir $coordSlaveDir $coordSlaveDir)
+coordArchLogDirs=($coordArchLogDir $coordArchLogDir $coordArchLogDir $coordArchLogDir)
+
+#---- Configuration files---
+# Need these when you'd like setup specific non-default configuration
+# These files will go to corresponding files for the master.
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries
+# Or you may supply these files manually.
+coordExtraConfig=none # Extra configuration file for coordinators. This file will be added to all the coordinators'
+ # postgresql.conf
+coordSpecificExraConfig=(none none none none)
+coordExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the coordinators' pg_hba.conf
+coordSpecificExtraPgHba=(none none none none)
+
+#---- Datanodes -------------------------------------------------------------------------------------------------------
+
+#---- Shortcuts --------------
+datanodeMasterDir=$HOME/pgxc/nodes/dn_master
+datanodeSlaveDir=$HOME/pgxc/nodes/dn_slave
+datanodeArchLogDir=$HOME/pgxc/nodes/datanode_archlog
+
+#---- Overall ---------------
+#primaryDatanode=datanode1 # Primary Node.
+# At present, xc has a priblem to issue ALTER NODE against the primay node. Until it is fixed, the test will be done
+# without this feature.
+primaryDatanode=N/A # Primary Node.
+datanodeNames=(datanode1 datanode2 datanode3 datanode4)
+datanodePorts=(20008 20009 20008 20009) # Master and slave use the same port!
+datanodePoolerPorts=(20011 20012 20011 20012) # Master and slave use the same port!
+datanodePgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This list sets up pg_hba.conf for $pgxcOwner user.
+ # If you'd like to setup other entries, supply them
+ # through extra configuration files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using datanodeExtraPgHba
+# and/or datanodeSpecificExtraPgHba variables.
+
+#---- Master ----------------
+datanodeMasterServers=(node06 node07 node08 node09) # none means this master is not available.
+ # This means that there should be the master but is down.
+ # The cluster is not operational until the master is
+ # recovered and ready to run.
+datanodeMasterDirs=($datanodeMasterDir $datanodeMasterDir $datanodeMasterDir $datanodeMasterDir)
+datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is
+ # specified, it is expected this parameter is explicitly supplied
+ # by external configuration files.
+ # If you don't configure slaves, leave this value zero.
+datanodeMaxWalSenders=($datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender)
+ # max_wal_senders configuration for each datanode
+
+#---- Slave -----------------
+datanodeSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then datanodeSlave value will be set to n and all the following values will be set to
+ # empty values.
+datanodeSlaveServers=(node07 node08 node09 node06) # value none means this slave is not available
+datanodeSlaveDirs=($datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir)
+datanodeArchLogDirs=( $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir )
+
+# ---- Configuration files ---
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.
+# These files will go to corresponding files for the master.
+# Or you may supply these files manually.
+datanodeExtraConfig=none # Extra configuration file for datanodes. This file will be added to all the
+ # datanodes' postgresql.conf
+datanodeSpecificExtraConfig=(none none none none)
+datanodeExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the datanodes' postgresql.conf
+datanodeSpecificExtraPgHba=(none none none none)
+
+#
+# End of Configuration Section
+#
+#==========================================================================================================================
+EOF
+
+ chmod +x $configFile
+}
+#============================================================
+#
+# Common functions
+#
+#============================================================
+
+logfile=none;
+function log_echo
+{
+ if [ $logOpt == "y" ]; then
+ echo $* >> $logfile
+ fi
+}
+
+function log_echo_with_date
+{
+ if [ $logOpt == "y" ]; then
+ echo `date +%y%m%d,%H:%M:%S` $* >> $logfile
+ fi
+}
+
+function set_log_file
+{
+ mkdir -p $logDir
+ if [ -d $logDir ]; then
+ logfile=$logDir/pgxc_ctl_$datetime.log
+ else
+ logOpt=n
+ eecho Log directory $logDir could not found or is not a directory.
+ fi
+}
+
+function change_log_file
+{
+ if [ $logOpt == y ]; then
+ logfile=$logDir/$1
+ else
+ eecho Log disabled. Cannot change log file.
+ fi
+}
+
+function vecho
+{
+ if [ "$verbose" == y ]; then
+ echo $*
+ fi
+ log_echo $*
+}
+
+function iecho
+{
+ if [ "$interactive" == y ]; then
+ echo $*
+ fi
+}
+
+function eecho
+{
+ echo $*
+ log_echo $*
+}
+
+function doit
+{
+ vecho $*
+ $*
+}
+
+function doall
+{
+ local i
+ vecho doall target: "(" ${allServers[@]} ")"
+ for (( i=0; i< ${#allServers[@]}; i++ )); do
+ if [ ${allServers[$i]} != none ] && [ ${allServers[$i]} != N/A ]; then
+ vecho "... ${allServers[$i]}: $* ..."
+ ssh $pgxcUser@${allServers[$i]} $*
+ fi
+ done
+}
+
+function Doall
+{
+ local i
+ vecho Doall target: "(" ${DoallTarget[@]} ")"
+ for (( i=0; i< ${#DoallTarget[@]}; i++ )); do
+ if [ ${DoallTarget[$i]} != none ] && [ ${DoallTarget[$i]} != N/A ]; then
+ vecho "${DoallTarget[$i]}: $* ..."
+ ssh $pgxcUser@${DoallTarget[$i]} $*
+ fi
+ done
+}
+
+function cpall
+{
+ local i
+ vecho cpall target: "(" ${allServers[@]} ")"
+ for (( i=0; i < ${#allServers[@]}; i++ )); do
+ if [ ${allServers[$i]} != none ] && [ ${allServers[$i]} != N/A ]; then
+ vecho scp -r $1 $pgxcUser@${allServers[$i]}:$2
+ scp -r $1 $pgxcUser@${allServers[$i]}:$2
+ fi
+ done
+}
+
+function Cpall
+{
+ local i
+ vecho Cpall target: "(" ${CpallTarget[@]} ")"
+ for (( i=0; i< ${#CpallTarget[@]}; i++ )); do
+ if [ ${CpallTarget[$i]} != none ] && [ ${CpallTarget[$i]} != N/A ]; then
+ vecho scp -r $1 $pgxcUser@${CpallTarget[$i]}:$2
+ scp -r $1 $pgxcUser@${CpallTarget[$i]}:$2
+ fi
+ done
+}
+
+
+function set_verbose
+{
+ if [ $# -le 0 ]; then
+ echo verbose=$verbose
+ return
+ fi
+ case $1 in
+ y )
+ verbose=y;;
+ n )
+ verbose=n;;
+ on )
+ verbose=y;;
+ off )
+ verbose=n;;
+ * )
+ echo Specify y/n/on/off
+ return 1;;
+ esac
+ iecho Verbose set to $verbose
+ return 0
+}
+
+function set_interactive
+{
+ if [ $# -le 0 ]; then
+ echo interactive=$interactive
+ return
+ fi
+ case $1 in
+ y )
+ interactive=y;;
+ n )
+ interactive=n;;
+ on )
+ interactive=y;;
+ off )
+ interactive=n;;
+ * )
+ echo Specify y/n/on/off
+ return 1;;
+ esac
+ iecho Interactive set to $interactive
+ return 0
+}
+function set_prompt
+{
+ if [ $# -le 0 ]; then
+ echo xc_prompt=$xc_prompt
+ return
+ fi
+ xc_prompt=$1
+ iecho Prompt set to $xc_prompt
+}
+
+function readyesno
+{
+ if [ $# -ne 1 ];then
+ echo n
+ return 1
+ fi
+ local yesno
+ read yesno
+ case $yesno in
+ y )
+ echo y;;
+ yes )
+ echo y;;
+ n )
+ echo n;;
+ no )
+ echo n;;
+ * )
+ echo $1;;
+ esac
+}
+
+#
+# Backups configuration file, especially done when configuraiton file is updated due to
+# failover.
+#
+function pgxc_backup_config_file
+{
+ if [ "$configBackup" == "y" ]; then
+ log_echo Backing up configuration file $configFile to $configBackupHost:$configBackupFile
+ doit ssh $pgxcUser@$configBackupHost mkdir -p $configBackupDir
+ doit scp $configFile $pgxcUser@$configBackupHost:$configBackupFile
+ fi
+}
+datetime=`date +%y%m%d_%H%M`
+immediate="-m fast" # option for pg_ctl stop.
+
+allServers=() # List of all the servers which appear in this configuation.
+
+##############################################################################################################
+#
+# FUNCTIONS
+#
+##############################################################################################################
+
+function create_config_file
+{
+ # The configuration file is just a copy of the above configuraiton section. If you modify the above,
+ # you should reflect it to the below too.
+ cat > $configFile <<EOF
+#!/bin/bash
+# The context will be supplied finally...
+EOF
+}
+
+#
+# A couple of following functions helps to kill all the processes of specified
+# coordinator or datanode. Target coordinator or datanode are identified by
+# the server and the working directory.
+#
+# They depend upon ps output format. It depends upon specific operating system
+# and may need rewrite.
+#
+function extract_pid
+{
+ # Caution: ps format deeply depends upon operating system.
+ # Checked for CentOS (RHEL), Ubuntu 10.4, ubuntu 12.4.
+ local uid
+ local pid
+ local extra
+ read uid pid extra
+ if [ $? == 0 ]; then
+ echo $pid
+ else
+ echo none
+ fi
+}
+
+function get_postmaster_pid
+{
+ # arguments are server and directory
+ # Caution: ps format deeply depends upon operating system.
+ # Checked for CentOS (RHEL), Ubuntu 10.4, ubuntu 12.4.
+ # This assumes that grep extracts at most one line.
+ ssh $pgxcUser@$1 ps -f -C postgres | grep $2 | extract_pid
+}
+
+#
+# Get next xid of the node. Used to determine what xid gtm should start with.
+#
+# $1: host
+# $2: dir
+# Result will be set to the variable nextXid, which is declared as number
+#
+declare -i nextXid
+
+function get_nextXid
+{
+ nextXid=`ssh $pgxcUser@$1 pg_controldata $2 | grep NextXid | sed -e "s%.*/%%"`
+}
+
+function kill_all_child_parent
+{
+ # argument is the target node name and the PID of the parent.
+ if [ $# -ne 2 ]; then
+ iecho Specify nodename and parent PID
+ return 1
+ fi
+ if [ "$2" == "" ] || [ $2 == none ] || [ $2 == N/A ]; then
+ return 1
+ fi
+ ssh $pgxcUser@$1 kill -9 $2 `ssh $pgxcUser@$1 pgrep -P $2`
+}
+
+#----------------------------------------------
+# DEBUG Aid
+#----------------------------------------------
+
+# Debug --> Should be cleaned
+
+DEBUG=n
+
+function set_debug
+{
+ if [ $# -le 0 ]; then
+ iecho Specify y/n/on/off
+ return 1
+ fi
+ case $1 in
+ y )
+ DEBUG=y
+ ;;
+ n )
+ DEBUG=n;;
+ on )
+ DEBUG=y;;
+ off )
+ DEBUG=n;;
+ * )
+ iecho Specify y/n/on/off
+ return 1;;
+ esac
+ iecho Debug mode set to $DEBUG
+ return 0
+}
+
+function funcname
+{
+ if [ "$DEBUG" == y ]; then
+ echo '******** ' "$1() called" ' **********'
+ fi
+}
+
+function decho
+{
+ if [ "$DEBUG" == y ]; then
+ echo $*
+ fi
+}
+
+function ddo
+{
+ if [ "$DEBUG" == y ]; then
+ $*
+ fi
+}
+
+# Extract the server list into ${allServers[@]}
+# Check if there's no duplicate elements in ${allServers[@]}. If not, then add the argument
+# to ${allServers[@]}
+function addServer
+{
+ local append
+ local i
+
+ append=y
+ if [ "$1" == 'none' ] || [ "$i" == N/A ]; then
+ return
+ fi
+ for((i=0; i<${#allServers[@]}; i++)); do
+ if [ ${allServers[$i]} == "$1" ]; then
+ append=n
+ break
+ fi
+ done
+ if [ $append == y ]; then
+ allServers[$i]=$1
+ fi
+}
+
+# Build unique server list
+#
+function makeServerList
+{
+ local i
+
+ # GTM Master
+ if [ $gtmMasterServer != none ]; then
+ addServer $gtmMasterServer
+ fi
+ # GTM Slave
+ if [ $gtmSlaveServer != none ]; then
+ addServer $gtmSlaveServer
+ fi
+ # GTM Proxy
+ for ((i=0; i<${#gtmProxyServers[@]};i++)); do
+ if [ ${gtmProxyServers[$i]} != 'none' -a ${gtmProxyServers[$i]} != "" ]; then
+ addServer ${gtmProxyServers[$i]}
+ fi
+ done
+ # Coordinator Master
+ for ((i=0; i<${#coordMasterServers[@]}; i++)); do
+ if [ ${coordMasterServers[$i]} != none ]; then
+ addServer ${coordMasterServers[$i]}
+ fi
+ done
+ # Coordinator Slave
+ for ((i=0; i<${#coordSlaveServers[@]}; i++)); do
+ if [ ${coordSlaveServers[$i]} != none ]; then
+ addServer ${coordSlaveServers[$i]}
+ fi
+ done
+ # Datanode Master
+ for ((i=0; i<${#datanodeMasterServers[@]}; i++)); do
+ if [ ${datanodeMasterServers[$i]} != none ]; then
+ addServer ${datanodeMasterServers[$i]}
+ fi
+ done
+ # Datanode Slave
+ for ((i=0; i<${#datanodeSlaveServers[@]}; i++)); do
+ if [ ${datanodeSlaveServers[$i]} != none ] ; then
+ addServer ${datanodeSlaveServers[$i]}
+ fi
+ done
+ decho '(' ${allServers[@]} ')'
+}
+
+#### Handle Slave Configurations ###################################
+
+# Set GTM Proxy info unconfigured.
+function gtm_proxy_set_to_no
+{
+ local i
+
+ gtmProxy=n
+ gtmProxyNames=()
+ gtmProxyServers=()
+ gtmProxyPorts=()
+ gtmProxyDirs=()
+ gtmPxySpecificExtraConfig=()
+ gtmPxyExtraConfig=""
+ for ((i=0; i< ${#allServers[@]}; i++)); do
+ gtmProxyNames[$i]="none"
+ gtmProxyServers[$i]="none"
+ gtmProxyPorts[$i]=0
+ gtmProxyDirs[$i]="none"
+ gtmProxySpecificExtraConfig[$i]=""
+ done
+}
+
+# Set Coordinator Slave info unconfigured
+function coord_slave_set_to_no
+{
+ local i
+
+ coordSlave=n
+ coordSlaveServers=()
+ coordSlaveDirs=()
+ coordArchLogDirs=()
+ for ((i=0; i<${#coordMasterServers[@]}; i++)); do
+ coordSlaveServers[$i]=none
+ coordSlaveDirs[$i]=none
+ coordArchLogDirs[$i]=none
+ done
+}
+
+# Set Datanode slave info unconfigured
+function datanode_slave_set_to_no
+{
+ local i
+
+ datanodeSlave=n
+ datanodeSlaveServers=()
+ datanodeSlaveDirs=()
+ datanodeSlaveArchLogDirs=()
+ for ((i=0; i<${#datanodeMasterServers[@]}; i++)); do
+ datanodeSlaveServers[$i]=none
+ datanodeSlaveDirs[$i]=none
+ datanodeSlaveArchLogDirs[$i]=none
+ done
+}
+
+# Handle the case where slaves are not configured. --> Construct empty configuration for them
+# We assume that all the server list has been constructed.
+
+function handle_no_slaves
+{
+ local i
+ local isEmpty
+
+ # GTM slave
+ if [ $gtmSlave != y ] || [ "$gtmSlaveServer" == none ] || [ "$gtmSlaveServer" == N/A ]; then
+ gtmSlave=n
+ gtmSlaveServer=none
+ gtmSlavePort=0
+ gtmSlaveDir=none
+ fi
+
+ # GTM Proxy
+ if [ $gtmProxy != y ]; then
+ gtm_proxy_set_to_no
+ else
+ isEmpty=y
+ for ((i=0; i<${#gtmProxyServers[@]}; i++)); do
+ if [ ${gtmProxyServers[$i]} != none ] && [ ${gtmProxyServers[$i]} != N/A ]; then
+ isEmpty=n
+ break
+ fi
+ done
+ if [ "$isEmpty" == y ]; then
+ gtm_proxy_set_to_no
+ gtmProxy=n
+ fi
+ fi
+
+ # Coordinator
+ if [ $coordSlave != y ]; then
+ coord_slave_set_to_no
+ else
+ isEmpty=y
+ for ((i=0; i<${#coordSlaveServers[@]}; i++)); do
+ if [ ${coordSlaveServers[$i]} != none ] && [ ${coordSlaveServers[$i]} != N/A ]; then
+ isEmpty=n
+ break
+ fi
+ done
+ if [ "$isEmpty" == y ]; then
+ coord_slave_set_to_no
+ coordSlave=n
+ fi
+ fi
+
+ # Datanode
+ if [ $datanodeSlave != y ]; then
+ datanode_slave_set_to_no
+ else
+ isEmpty=y
+ for ((i=0; i<${#datanodeSlaveServers[@]}; i++)); do
+ if [ ${datanodeSlaveServers[$i]} != none ] && [ ${coordSlaveServers[$I]} != N/A ]; then
+ isEmpty=n
+ break
+ fi
+ done
+ if [ "$isEmpty" == y ]; then
+ datanode_slave_set_to_no
+ datanodeSlave=n
+ fi
+ fi
+}
+
+
+
+# Check if there're no duplicates in port and working directory assigment
+function verifyResource
+{
+ local i
+ local j
+
+ # Number of array entries
+ # GTM proxies
+ if [ "$gtmProxy" == y ]; then
+ i=${#gtmProxyNames[@]}
+ if [ $i -ne ${#gtmProxyServers[@]} -o $i -ne ${#gtmProxyPorts[@]} -o $i -ne ${#gtmProxyDirs[@]} -o $i -ne ${#gtmPxySpecificExtraConfig[@]} ]; then
+ echo ERROR: Invalid entry numbers in gtm proxy configuration.
+ return 1
+ fi
+ fi
+ # Coordinators
+ i=${#coordNames[@]}
+ if [ $i -ne ${#coordPorts[@]} -o $i -ne ${#poolerPorts[@]} -o $i -ne ${#coordSpecificExraConfig[@]} -o $i -ne ${#coordSpecificExtraPgHba[@]} ]; then
+ echo ERROR: Invalid entry numbers in coordinator configuration.
+ return 1
+ fi
+ if [ $i -ne ${#coordMasterServers[@]} -o $i -ne ${#coordMasterDirs[@]} ]; then
+ echo ERROR: Invalid entry numbers in coordinator configuration.
+ return 1
+ fi
+ if [ "$coordSlave" == y ]; then
+ if [ $i -ne ${#coordSlaveServers[@]} -o $i -ne ${#coordSlaveDirs[@]} -o $i -ne ${#coordArchLogDirs[@]} -o $i -ne ${#coordMaxWALSenders[@]} ]; then
+ echo ERROR: Invalid entry numbers in coordinator configuration.
+ return 1
+ fi
+ fi
+ # Datanodes
+ i=${#datanodeNames[@]}
+ if [ $i -ne ${#datanodePorts[@]} -o $i -ne ${#datanodeSpecificExtraConfig[@]} -o $i -ne ${#datanodeSpecificExtraPgHba[@]} ]; then
+ echo ERROR: Invalid entry numbers in datanode configuration.
+ return 1
+ fi
+ #if XCP
+ if [ $i -ne ${#datanodePoolerPorts[@]} -o $i -ne ${#datanodeSpecificExtraConfig[@]} -o $i -ne ${#datanodeSpecificExtraPgHba[@]} ]; then
+ echo ERROR: Invalid entry numbers in datanode pooler port configuration.
+ return 1
+ fi
+ if [ $i -ne ${#datanodeMasterServers[@]} -o $i -ne ${#datanodeMasterDirs[@]} ]; then
+ echo ERROR: Invalid entry numbers in datanode configuration.
+ return 1
+ fi
+ if [ "$datanodeSlave" == y ]; then
+ if [ $i -ne ${#datanodeSlaveServers[@]} -o $i -ne ${#datanodeSlaveDirs[@]} -o $i -ne ${#datanodeArchLogDirs[@]} -o $i -ne ${#datanodeMaxWalSenders[@]} ]; then
+ echo ERROR: Invalid entry numbers in datanode configuration.
+ return 1
+ fi
+ fi
+
+ # Check if node names don't duplicate
+ # GTM
+ for ((i=0; i<${#gtmProxyNames[@]};i++)); do
+ if [ $gtmName == ${gtmProxyNames[$i]} ]; then
+ echo ERROR: GTM name duplicates one of the GTM Proxies, $gtmName
+ return 1
+ fi
+ done
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [ $gtmName == ${coordNames[$i]} ]; then
+ echo ERROR: GTM name duplicates one of the coordinators, $gtmName
+ return 1
+ fi
+ done
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ if [ $gtmName == ${datanodeNames[$i]} ]; then
+ echo ERROR: GTM name duplicates one of the datanodes, $gtmName
+ return 1
+ fi
+ done
+ # GTM Proxy
+ for ((i=0; i<${#gtmProxyNames[@]}; i++)); do
+ for ((j=$i+1;j<${#gtmProxyNames[@]}; j++)); do
+ if [ ${gtmProxyNames[$i]} == ${gtmProxyNames[$j]} ]; then
+ echo ERROR: GTM proxy name duplicates one of the other GTM proxies, ${gtmProxyNames[$i]}
+ return 1
+ fi
+ done
+ for ((j=0;j<${#coordNames[@]};j++));do
+ if [ ${coordNames[$j]} == ${gtmProxyNames[$i]} ]; then
+ echo ERROR: GTM proxy name duplicates one of the coordinator names, ${gtmProxyNames[$i]}
+ return 1
+ fi
+ done
+ for ((j=0;j<${#datanodeNames[@]};j++));do
+ if [ ${datanodeNames[$j]} == ${gtmProxyNames[$i]} ]; then
+ echo ERROR: GTM proxy name duplicates one of the datanode names, ${gtmProxyNames[$i]}
+ return 1
+ fi
+ done
+ done
+ # Cordinator
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ for ((j=$i+1; j<${#coordNames[@]}; j++)); do
+ if [ ${coordNames[$i]} == ${coordNames[$j]} ]; then
+ echo ERROR: Coordinator name duplicates on of the other coordinators, ${coordNames[$i]}
+ return 1
+ fi
+ done
+ for ((j=0; j<${#datanodeNames[@]}; j++)); do
+ if [ ${coordNames[$i]} == ${datanodeNames[$j]} ]
+ then
+ echo ERROR: Coordinator name duplicates one of the datanodes, ${coordNames[$i]}
+ return 1
+ fi
+ done
+ done
+ # Datanode
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ for ((j=$i+1; j<${#datanodeNames[@]}; j++)); do
+ if [ ${datanodeNames[$i]} == ${datanodeNames[$j]} ]; then
+ echo ERROR: Datanode name duplicates one of the other datanodes, ${datanodeNames[$i]}
+ return 1
+ fi
+ done
+ done
+ # Check if primary datanode is in datanode name list, or none
+ if [ "$primaryDatanode" == "none" ] || [ "$primaryDatanode" == "N/A" ]; then
+ return 0
+ fi
+ # Primary Datanode: is it specified?
+ local xx
+ xx=n
+ for ((i=0;i<${#datanodeNames[@]};i++));do
+ if [ "$primaryDatanode" == ${datanodeNames[$i]} ]; then
+ xx=y;
+ break;
+ fi
+ done
+ if [ $xx == n ]; then
+ echo ERROR: Primary datanode is not in the datanode list, $primaryDatanode
+ return 1
+ fi
+}
+
+function verifyNode
+{
+ local i
+ local j
+ local Ports
+ local Dirs
+
+ Ports=()
+ Dirs=()
+
+ decho $1
+ # Check if there's no duplicate in port/directory assignment
+ if [ $1 == $gtmMasterServer ]; then
+ Ports=( ${Ports[@]} $gtmMasterPort )
+ Dirs=( ${Dirs[@]} $gtmMasterDir )
+ fi
+ if [ $1 == $gtmSlaveServer ]; then
+ Ports=( ${Ports[@]} $gtmSlavePort )
+ Dirs=( ${Dirs[@]} $gtmSlaveDir )
+ fi
+ for ((i=0; i<${#gtmProxyServers[@]}; i++)); do
+ if [ $1 == ${gtmProxyServers[$i]} ]; then
+ Ports=( ${Ports[@]} ${gtmProxyPorts[$i]} )
+ Dirs=( ${Dirs[@]} ${gtmProxyDirs[$i]} )
+ fi
+ done
+ for ((i=0; i<${#coordMasterServers[@]}; i++)); do
+ if [ $1 == ${coordMasterServers[$i]} ]; then
+ Ports=( ${Ports[@]} ${coordPorts[$i]} )
+ Ports=( ${Ports[@]} ${poolerPorts[$i]} )
+ Dirs=( ${Dirs[@]} ${coordMasterDirs[$i]} )
+ fi
+ if [ $1 == ${coordSlaveServers[$i]} ]; then
+ Ports=( ${Ports[@]} ${coordPorts[$i]} )
+ Ports=( ${Ports[@]} ${poolerPorts[$i]} )
+ Dirs=( ${Dirs[@]} ${coordSlaveDirs[$i]} )
+ fi
+ done
+ for ((i=0; i<${#datanodeMasterServers[$i]}; i++)); do
+ if [ $1 == ${datanodeMasterServers[$i]} ]; then
+ Ports=( ${Ports[@]} ${datanodePorts[$i]} )
+ PoolerPorts=( ${PoolerPorts[@]} ${datanodePoolerPorts[$i]} ) #XCP
+ Dirs=( ${Dirs[@]} ${datanodeMasterDirs[$i]} )
+ fi
+ if [ $1 == ${datanodeSlaveServers[$i]} ]; then
+ Ports=( ${Ports[@]} ${datanodePorts[$i]} )
+ PoolerPorts=( ${PoolerPorts[@]} ${datanodePoolerPorts[$i]} ) #XCP
+ Dirs=( ${Ports[@]} ${datanodeSlaveDirs[$i]} ) #mds
+ fi
+ done
+ for ((i=0; i<${#Ports[@]}; i++)); do
+ for ((j=$i+1; j<${#Ports[@]}; j++)); do
+ if [ ${Ports[$i]} -eq ${Ports[$j]} -a ${Ports[$i]} != none ]; then
+ echo ERROR: duplicate port assignment for the server $1
+ return 1
+ fi
+ done
+ done
+ for ((i=0; i<${#Dirs[@]}; i++)); do
+ for ((j=$i+1; j<${#Dirs[@]}; j++)); do
+ if [ ${Dirs[$i]} == ${Dirs[$j]} -a ${Dirs[$i]} != none ]; then
+ echo ERROR: duplicate work directory assignment for the server $1
+ return 1
+ fi
+ done
+ done
+ # We should check if GTM proxy is configured when GTM slave is configured.
+ # We could do this here but it's better to do it when we configure
+ # postgresql.conf of coordinator/datanode master.
+}
+
+function print_config
+{
+ local ii
+ local jj
+
+ echo "========= Postgres-XC configuration ========================"
+ echo "=== Overall ==="
+ echo Postgres-XC owner: $pgxcOwner
+ echo Postgres-XC user: $pgxcUser
+ echo Postgres-XC install directory: $pgxcInstallDir
+ echo tmpDir: $tmpDir
+ echo localTmpDir: $localTmpDir
+ echo "=== Each Server ==="
+ for ((ii=0;ii<${#allServers[@]};ii++)); do
+ echo "=== ${allServers[$ii]} ==="
+ # GTM Master
+ if [ ${allServers[$ii]} == $gtmMasterServer ]; then
+ echo "GTM Master: "
+ echo " " Nodename: "'"$gtmName"'", port: $gtmMasterPort, dir: "'"$gtmMasterDir"'"
+ echo " " ExtraConfig: "'"$gtmExtraCofig"'", Specific Extra Config: "'"$gtmMasterSpecificExtraConfig"'"
+ fi
+ # GTM Slave
+ if [ $gtmSlave == y ]; then
+ if [ ${allServers[$ii]} == $gtmSlaveServer ]; then
+ echo "GTM Slave: "
+ echo " " Nodename: "'"$gtmName"'", port: $gtmSlavePort, dir: "'"$gtmSlaveDir"'"
+ echo " " ExtraConfig: "'"$gtmExtraConfig"'", Specific Extra Config: "'"$gtmSlaveSpecificExtraConfig"'"
+ fi
+ fi
+ # GTM Proxy
+ if [ $gtmProxy == y ]; then
+ for ((jj=0;jj<${#gtmProxyServers[@]};jj++)); do
+ if [ ${allServers[$ii]} == ${gtmProxyServers[$jj]} ]; then
+ echo "GTM Proxy:"
+ echo " " Nodename: "'"${gtmProxyNames[$jj]}"'", port: ${gtmProxyPorts[$jj]}, dir: "'"${gtmProxyDirs[$jj]}"'"
+ echo " " ExtraConfig: "'"$gtmPxyExtraConfig"'", Specific Extra Config: "'"${gtmPxySpecificExtraConfig[$jj]}"'"
+ fi
+ done
+ fi
+ # Coordinator Master
+ for ((jj=0;jj<${#coordMasterServers[@]};jj++)); do
+ if [ ${allServers[$ii]} == ${coordMasterServers[$jj]} ]; then
+ echo "Coordinator Master:"
+ echo " " Nodename: "'"${coordNames[$jj]}"'", port: ${coordPorts[$jj]}, pooler port: "'"${poolerPorts[$jj]}"'"
+ echo " " MaxWalSenders: ${coordMaxWalsenders[$jj]}, Dir: "'"${coordMasterDirs[$jj]}"'"
+ echo " " ExtraConfig: "'"$coordExtraConfig"'", Specific Extra Config: "'"${coordSpecificExtraConfig[$jj]}"'"
+ echo " " pg_hba entries: '(' ${coordPgHbaEntries[@]} ')'
+ echo " " Extra pg_hba: "'"$coordExraPgHba"'", Specific Extra pg_hba: "'"${coordSpecificExtraPgHba[$jj]}"'"
+ fi
+ done
+ # Coordinator Slave
+ if [ $coordSlave == y ]; then
+ for ((jj=0;jj<${#coordSlaveServers[@]};jj++)); do
+ if [ ${allServers[$ii]} == ${coordSlaveServers[$jj]} ]; then
+ echo "Coordinator Slave:"
+ echo " " Nodename: "'"${coordNames[$jj]}"'", port: ${coordPorts[$jj]}, pooler port: ${poolerPorts[$jj]}
+ echo " " Dir: "'"${coordSlaveDirs[$jj]}"'", Archive Log Dir: "'"${coordArchLogDirs[$jj]}"'"
+ fi
+ done
+ fi
+ # Datanode Master
+ for ((jj=0;jj<${#datanodeMasterServers[@]};jj++)); do
+ if [ ${allServers[$ii]} == ${datanodeMasterServers[$jj]} ]; then
+ echo "Datanode Master"
+ echo " " Nodename: "'"${datanodeNames[$jj]}"'", port: ${datanodePorts[$jj]}
+ #mdsecho " " Pooler Ports: ${datanodePoolerPorts[$jj]}
+ echo " " MaxWalSenders: ${datanodeMaxWalSenders[$jj]}, Dir: "'"${datanodeMasterDirs[$jj]}
+ echo " " ExtraConfig: "'"datanodeExtraConfig"'", Specific Extra Config: \
+ "'"${datanodeSpecificExtraConfig[$jj]}"'"
+ echo " " pg_hba entries: '(' ${datanodePgHbaEntries[@]} ')'
+ echo " " Extra pg_hba: "'"$datanodeExtraPgHba"'", Specific Extra pg_hba: \
+ "'"${datanodeSpecificExtraPgHba[$jj]}"'"
+ fi
+ done
+ # Datanode Slave
+ if [ $datanodeSlave == y ]; then
+ for ((jj=0;jj<${#datanodeMasterServers[@]};jj++)); do
+ if [ ${allServers[$ii]} == ${datanodeSlaveServers[$jj]} ]; then
+ echo "Datanode Slave"
+ echo " " Nodename: "'"${datanodeNames[$jj]}"'", port: ${datanodePorts[$jj]}
+ #mdsecho " " Pooler Ports: ${datanodePoolerPorts[$jj]}
+ echo " " MaxWalSenders: ${datanodeMaxWalSenders[$jj]}, Dir: "'"${datanodeSlaveDirs[$jj]}
+ echo " " ExtraConfig: "'"datanodeExtraConfig"'", Specific Extra Config: \
+ "'"${datanodeSpecificExtraConfig[$jj]}"'"
+ echo " " pg_hba entries: '(' ${datanodePgHbaEntries[@]} ')'
+ echo " " Extra pg_hba: "'"$datanodeExtraPgHba"'", Specific Extra pg_hba: \
+ "'"${datanodeSpecificExtraPgHba[$jj]}"'"
+ fi
+ done
+ fi
+ done
+ echo "=== End of configuration ==="
+}
+
+# $1: nodename, $2: [master/slave/all]
+function pgxc_ctl_monitor
+{
+ local fn=monitor
+ local i
+ if [ $# -le 0 ]; then
+ eecho "$progname:$fn" specify node name
+ return 2
+ fi
+ if [ "$1" == "none" ] || [ "$1" == "N/A" ]; then
+ eecho "$progname:$fn" invalid node name
+ return 2
+ fi
+ if [ "$1" == "$gtmName" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_monitor -Z gtm -p $gtmMasterPort -h $gtmMasterServer
+ if [ $? -eq 0 ]; then
+ echo GTM master "("$gtmName")" running
+ else
+ echo GTM master "("$gtmName")" not running
+ fi
+ return
+ elif [ "$2" == "slave" ]; then
+ if [ "$gtmSlaveServer" == "none" ] || [ "$gtmSlaveServer" == "N/A" ]; then
+ echo GTM slave not configured.
+ return
+ fi
+ pgxc_monitor -Z gtm -p $gtmSlavePort -h $gtmSlaveServer
+ if [ $? -eq 0 ]; then
+ echo GTM slave "("$gtmName")" running
+ else
+ echo GTM slave "("$gtmName")" not running
+ fi
+ return
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_monitor -Z gtm -p $gtmMasterPort -h $gtmMasterServer
+ if [ $? -eq 0 ]; then
+ echo GTM master "("$gtmName")" running
+ else
+ echo GTM master "("$gtmName")" not running
+ fi
+ if [ "$gtmSlaveServer" == "none" ] || [ "$gtmSlaveServer" == "N/A" ]; then
+ echo GTM slave not configured.
+ return
+ fi
+ pgxc_monitor -Z gtm -p $gtmSlavePort -h $gtmSlaveServer
+ if [ $? -eq 0 ]; then
+ echo GTM slave "("$gtmName")" running
+ else
+ echo GTM slave "("$gtmName")" not running
+ fi
+ fi
+ return
+ fi
+ # GTM-Proxy
+ for ((i=0; i<${#gtmProxyNames[@]}; i++)); do
+ if [ "$1" == "${gtmProxyNames[$i]}" ]; then
+ if [ "${gtmProxyServers[$i]}" == "none" ] || [ "${gtmProxyServers[$i]}" == "N/A" ]; then
+ echo GTM proxy "("${gtmProxyNames[$i]}")": not configured
+ return;
+ fi
+ pgxc_monitor -Z gtm -p ${gtmProxyPorts[$i]} -h ${gtmProxyServers[$i]}
+ if [ $? -eq 0 ]; then
+ echo GTM proxy "("${gtmProxyNames[$i]}")": running
+ else
+ echo GTM proxy "("${gtmProxyNames[$i]}")": not running
+ fi
+ return
+ fi
+ done
+ # Coordinator
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [ "$1" == "${coordNames[$i]}" ]; then
+ if [ "$2" == "master" ]; then
+ if [ "${coordMasterServers[$i]}" == "none" ] || [ "${coordMasterServers[$i]}" == "N/A" ]; then
+ echo Coordinator master "("${coordNames[$i]}")": not configured
+ return
+ fi
+ pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo Coordinator master "("${coordNames[$i]}")": running
+ else
+ echo Coordinator master "("${coordNames[$i]}")": not running
+ fi
+ return
+ fi
+ if [ "$2" == "slave" ]; then
+ if [ "${coordSlaveServers[$i]}" == "none" ] || [ "${coordSlaveServers[$i]}" == "N/A" ]; then
+ echo Coordinator slave "("${coordNames[$i]}")": not configured
+ return
+ fi
+ pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordSlaveServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo Coordinator slave "("${coordNames[$i]}")": running
+ else
+ echo Coordinator slave "("${coordNames[$i]}")": not running
+ fi
+ return
+ fi
+ if [ "$2" == "all" ] || [ "$2" == "" ]; then
+ if [ "${coordMasterServers[$i]}" == "none" ] || [ "${coordMasterServers[$i]}" == "N/A" ]; then
+ echo Coordinator master "("${coordNames[$i]}")": not configured
+ else
+ pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo Coordinator master "("${coordNames[$i]}")": running
+ else
+ echo Coordinator master "("${coordNames[$i]}")": not running
+ fi
+ fi
+ if [ "${coordSlaveServers[$i]}" == "none" ] || [ "${coordSlaveServers[$i]}" == "N/A" ]; then
+ echo Coordinator slave "("${coordNames[$i]}")": not configured
+ else
+ pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordSlaveServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo Coordinator slave "("${coordNames[$i]}")": running
+ else
+ echo Coordinator slave "("${coordNames[$i]}")": not running
+ fi
+ fi
+ return
+ fi
+ fi
+ done
+ # Datanode
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ if [ "$1" == "${datanodeNames[$i]}" ]; then
+ if [ "$2" == "master" ]; then
+ if [ "${datanodeMasterServers[$i]}" == "none" ] || [ "${datanodeMasterServers[$i]}" == "N/A" ]; then
+ echo Datanode master "("${datanodeNames[$i]}")": not configured
+ return
+ fi
+ pgxc_monitor -Z node -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo Datanode master "("${datanodeNames[$i]}")": running
+ else
+ echo Datanode master "("${datanodeNames[$i]}")": not running
+ fi
+ return
+ fi
+ if [ "$2" == "slave" ]; then
+ if [ "${datanodeSlaveServers[$i]}" == "none" ] || [ "${datanodeSlaveServers[$i]}" == "N/A" ]; then
+ echo Datanode slave "("${datanodeNames[$i]}")": not configured
+ return
+ fi
+ pgxc_monitor -Z node -p ${datanodePorts[$i]} -h ${datanodeSlaveServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo Datanode slave "("${datanodeNames[$i]}")": running
+ else
+ echo Datanode slave "("${datanodeNames[$i]}")": not running
+ fi
+ return
+ fi
+ if [ "$2" == "all" ] || [ "$2" == "" ]; then
+ if [ "${datanodeMasterServers[$i]}" == "none" ] || [ "${datanodeMasterServers[$i]}" == "N/A" ]; then
+ echo Datanode master "("${datanodeNames[$i]}")": not configured
+ else
+ pgxc_monitor -Z node -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo Datanode master "("${datanodeNames[$i]}")": running
+ else
+ echo Datanode master "("${datanodeNames[$i]}")": not running
+ fi
+ fi
+ if [ "${datanodeSlaveServers[$i]}" == "none" ] || [ "${datanodeSlaveServers[$i]}" == "N/A" ]; then
+ echo Datanode slave "("${datanodeNames[$i]}")": not configured
+ else
+ pgxc_monitor -Z node -p ${datanodePorts[$i]} -h ${datanodeSlaveServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo Datanode slave "("${coordNames[$i]}")": running
+ else
+ echo Datanode slave "("${coordNames[$i]}")": not running
+ fi
+ fi
+ return
+ fi
+ fi
+ done
+
+}
+#==============================================================
+# Check each component status (running or not)
+# You must install/deploy pgxc_monitor
+#==============================================================
+
+function pgxc_ctl_show_component
+{
+ local i
+ # GTM
+ if [ "$1" == "$gtmName" ]; then
+ echo "$1:" GTM
+ echo " " GTM Master, "port=$gtmMasterPort", "host=$gtmMasterServer", "dir=$gtmMasterDir"
+ if [ "$gtmSlaveServer" == "none" ] || [ "$gtmSlaveServer" == "N/A" ]; then
+ echo " " GTM Slave, not configured
+ else
+ echo " " GTM Slave, "port=$gtmSlavePort", "host=$gtmSlaveServer", "dir=$gtmSlaveDir"
+ fi
+ return
+ fi
+ # GTM Proxy
+ for ((i=0; i<${#gtmProxyNames[@]}; i++)); do
+ if [ "${gtmProxyNames[$i]}" == "$1" ]; then
+ echo "$1:" GTM Proxy, "port=${gtmProxyPorts[$i]}", "host=${gtmProxyServers[$i]}", "dir=${gtmProxyDirs[$i]}"
+ return
+ fi
+ done
+ # Coordinator
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [ "${coordNames[$i]}" == "$1" ]; then
+ echo "$1:" Coordinator
+ echo " " Coordinator Master, port=${coordPorts[$i]}, host=${coordMasterServers[$i]}, dir=${coordMasterDirs[$i]}
+ if [ "${coordSlaveServers[$i]}" == "none" ] || [ "${coordSlaveServers[$i]}" == "N/A" ]; then
+ echo " " Coordinator Slave, not configured
+ else
+ echo " " Coordinator Slave, port=${coordPorts[$i]}, host=${coordSlaveServers[$i]}, dir=${coordSlaveDirs[$i]}
+ fi
+ return
+ fi
+ done
+ # Datanode
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ if [ "${datanodeNames[$i]}" == "$1" ]; then
+ echo "$1:" Datanode
+ echo " " Datanode Master, port=${datanodePorts[$i]}, pooler_port=${datanodePoolerPorts[$]}, host=${datanodeMasterServers[$i]}, dir=${datanodeMasterDirs[$i]}
+ if [ "${datanodeSlaveServers[$i]}" == "none" ] || [ "${datanodeSlaveServers[$i]}" == "N/A" ]; then
+ echo " " Datanode Slave, not configured
+ else
+ echo " " Datanode Slave, port=${datanodePorts[$i]}, pooler_port=${datanodePoolerPorts[$]}, host=${datanodeSlaveServers[$i]}, dir=${datanodeSlaveDirs[$i]}
+ fi
+ return
+ fi
+ done
+ echo Component $1 not found.
+ return 1
+}
+
+function pgxc_ctl_show_gtm_proxy_all
+{
+ local i
+ echo ----------
+ if [ "$gtmProxy" != "y" ]; then
+ eecho GTM Proxy is not configured
+ return 2
+ fi
+ echo GTM Proxies:
+ for ((i=0; i<${#gtmProxyNames[@]}; i++)); do
+ pgxc_ctl_show_component ${gtmProxyNames[$i]}
+ done
+}
+
+function pgxc_ctl_show_coordinator_all
+{
+ local i
+ echo ----------
+ echo Coordinators:
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ pgxc_ctl_show_component ${coordNames[$i]}
+ done
+}
+
+function pgxc_ctl_show_datanode_all
+{
+ local i
+ echo ----------
+ echo Datanodes:
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ pgxc_ctl_show_component ${datanodeNames[$i]}
+ done
+}
+
+function monitor_components
+{
+ local i
+ local j
+
+ # GTM master
+ pgxc_monitor -Z gtm -p $gtmMasterPort -h $gtmMasterServer
+ if [ $? -eq 0 ]; then
+ echo -n GTM master "("$gtmName")": running. " "
+ else
+ echo -n GTM master "("$gtmName")": not running. " "
+ fi
+ echo host: $gtmMasterServer, port: $gtmMasterPort, dir: $gtmMasterDir
+
+ # GTM slave
+ if [ $gtmSlave == y ]; then
+ if [ $gtmSlaveServer == none ] || [ $gtmSlaveServer == N/A ]; then
+ echo GTM slave "("$gtmName")": not configured.
+ else
+ pgxc_monitor -Z gtm -p $gtmSlavePort -h $gtmSlaveServer
+ if [ $? -eq 0 ]; then
+ echo -n GTM slave "("$gtmName")": running. " "
+ else
+ echo -n GTM slave "("$gtmName")": not running. " "
+ fi
+ echo host: $gtmSlaveServer, port: $gtmSlavePort, dir: $gtmSlaveDir
+ fi
+ fi
+
+ # GTM proxies
+ if [ $gtmProxy == y ]; then
+ for ((i=0; i<${#gtmProxyNames[@]}; i++)); do
+ if [ ${gtmProxyServers[$i]} != none ] && [ ${gtmProxyServers[$i]} != N/A ]; then
+ pgxc_monitor -Z gtm -p ${gtmProxyPorts[$i]} -h ${gtmProxyServers[$i]}
+ if [ $? -eq 0 ]; then
+ echo -n GTM proxy "("${gtmProxyNames[$i]}")": running. " "
+ else
+ echo -n GTM proxy "("${gtmProxyNames[$i]}")": not running. " "
+ fi
+ echo host: ${gtmProxyServers[$i]}, port: ${gtmProxyPorts[$i]}, dir: ${gtmProxyDirs[$i]}
+ fi
+ done
+ fi
+
+ # Coordinator masters
+ local postmaster_pid
+ for ((i=0; i<${#coordNames[@]};i++));do
+ if [ ${coordMasterServers[$i]} != none ] && [ ${coordMasterServers[$i]} != N/A ]; then
+ pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo -n Coordinator master "("${coordNames[$i]}")": running. " "
+ else
+ echo -n Coordinator master "("${coordNames[$i]}")": not running. " "
+ fi
+ echo host: ${coordMasterServers[$i]}, port: ${coordPorts[$i]}, dir: ${coordMasterDirs[$i]}
+ fi
+ done
+
+ # Coordinator slaves
+ if [ $coordSlave == y ]; then
+ for ((i=0; i<${#coordNames[@]};i++)); do
+ if [ ${coordSlaveServers[$i]} == none ] || [ ${coordSlaveServers[$i]} == N/A ]; then
+ if [ ${coordNames[$i]} != none ] && [ $coordNames[$i] != N/A ]; then
+ echo Coordinator slave "("${coordNames[$i]}")": not configured
+ fi
+ else
+ pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordSlaveServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo -n Coordinator slave "("${coordNames[$i]}")": running. " "
+ else
+ echo -n Coordinator slave "("${coordNames[$i]}")": not running. " "
+ fi
+ echo host: ${coordSlaveServers[$i]}, port: ${coordPorts[$i]}, dir: ${coordSlaveDirs[$i]}
+ fi
+ done
+ fi
+
+ # Datanode masters
+ for ((i=0; i<${#datanodeNames[@]}; i++));do
+ if [ ${datanodeMasterServers[$i]} != none ] && [ ${datanodeMasterServers[$i]} != N/A ]; then
+ pgxc_monitor -Z node -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo -n Datanode master "("${datanodeNames[$i]}")": running. " "
+ else
+ echo -n Datanode master "("${datanodeNames[$i]}")": not running. " "
+ fi
+ echo host: ${datanodeMasterServers[$i]}, port: ${datanodePorts[$i]}, dir: ${datanodeMasterDirs[$i]}
+ fi
+ done
+
+ # Datanode slaves
+ if [ $datanodeSlave == y ]; then
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ if [ ${datanodeSlaveServers[$i]} == none ] || [ ${datanodeSlaveServers[$i]} == N/A ]; then
+ if [ ${datanodeNames[$i]} != none ] && [ ${datanodeNames[$i]} != N/A ]; then
+ echo Datanode slave "("${datanodeNames[$i]}")": not configured
+ fi
+ else
+ pgxc_monitor -Z node -p ${datanodePorts[$i]} -h ${datanodeSlaveServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ echo -n Datanode slave "("${datanodeNames[$i]}")": running. " "
+ else
+ echo -n Datanode slave "("${datanodeNames[$i]}")": not running. " "
+ fi
+ echo host: ${datanodeSlaveServers[$i]}, port: ${datanodePorts[$i]}, pooler_port=${datanodePoolerPorts[$]}, dir: ${datanodeSlaveDirs[$i]}
+ fi
+ done
+ fi
+}
+
+
+#===============================================================
+# Tool function to check -m option to stop coordinator and
+# datanode
+#===============================================================
+function check_immediate
+{
+ case $1 in
+ immediate )
+ immediate="-m immediate" ;;
+ fast )
+ immediate="-m fast" ;;
+ normal )
+ immediate="" ;;
+ * )
+ echo "ERROR: Please specify immediate, fast or normal"
+ exit 1;;
+ esac;
+}
+
+#==================================================
+#
+# Setup .bashrc file for PATH and LD_LIBRARY_PATH
+#
+#==================================================
+
+function setup_bashrc
+{
+ vecho ================================================================
+ vecho Setting .bashrc files
+ for ((i=0; i< ${#allServers[@]}; i++)); do
+ vecho ---- ${allServers[$i]} -------------------------
+ doit ssh $pgxcUser@${allServers[$i]} cp .bashrc .bashrc.org
+ ssh $pgxcUser@${allServers[$i]} "cat >> .bashrc" <<EOF
+# .bachrc addition for Postgres-XC PATH and LD_LIBRARY_PATH
+# $datetime
+export PATH_ORG=\$PATH
+export PATH=$pgxcInstallDir/bin:\$PATH
+export LD_LIBRARY_PATH_ORG=\$LD_LIBRARY_PATH
+export LD_LIBRARY_PATH=$pgxcInstallDir/lib:\$LD_LIBRARY_PATH
+export MANPATH_ORG=\$MANPATH
+export MANPATH=$pgxcInstallDir/share/man:\$MANPATH
+# End of addition
+EOF
+ done
+}
+
+function setup_bashrc_individual
+{
+ vecho ================================================================
+ vecho Setting .bashrc files for $pgxcOwner at $1
+ doit ssh $pgxcUser@$1 cp .bashrc .bashrc.org
+ ssh $pgxcUser@$1 "cat >> .bashrc" <<EOF
+# .bachrc addition for Postgres-XC PATH and LD_LIBRARY_PATH
+# $datetime
+export PATH_ORG=\$PATH
+export PATH=$pgxcInstallDir/bin:\$PATH
+export LD_LIBRARY_PATH_ORG=\$LD_LIBRARY_PATH
+export LD_LIBRARY_PATH=$pgxcInstallDir/lib:\$LD_LIBRARY_PATH
+export MANPATH_ORG=\$MANPATH
+export MANPATH=$pgxcInstallDir/share/man:\$MANPATH
+# End of addition
+EOF
+}
+
+#==================================================
+#
+# Deploy binaries and other common things to each server
+#
+# All the build materials will be deployed to each
+# servers.
+#
+#=================================================
+
+function pgxc_deploy_all
+{
+ vecho ================================================================
+ vecho pgxc_deploy_all: copy built materials to all the target servers.
+
+ doall rm -rf $pgxcInstallDir/bin $pgxcInstallDir/include $pgxcInstallDir/lib $pgxcInstallDir/share
+ doall mkdir -p $pgxcInstallDir
+ vecho tar czCf $pgxcInstallDir $tmpDir/wk.tgz bin include lib share
+ tar czCf $pgxcInstallDir $tmpDir/wk.tgz bin include lib share
+ cpall $tmpDir/wk.tgz $pgxcInstallDir/wk.tgz
+ doall tar xzCf $pgxcInstallDir $pgxcInstallDir/wk.tgz
+ doall rm $pgxcInstallDir/wk.tgz
+ doit rm $tmpDir/wk.tgz
+}
+
+# First argument is the target node.
+
+function pgxc_deploy_individual
+{
+ vecho ================================================================
+ vecho pgxc_deploy_individual: copy built materials to the server $1
+
+ doit ssh $pgxcUser@$1 rm -rf $pgxcInstallDir/bin $pgxcInstallDir/include $pgxcInstallDir/lib $pgxcInstallDir/share
+ doit ssh $pgxcUser@$1 mkdir -p $pgxcInstallDir
+ doit tar czCf $pgxcInstallDir $tmpDir/wk.tgz bin include lib share
+ doit scp $tmpDir/wk.tgz $pgxcUser@$1:$pgxcInstallDir/wk.tgz
+ doit ssh $pgxcUser@$1 tar xzCf $pgxcInstallDir $pgxcInstallDir/wk.tgz
+ doit ssh $pgxcUser@$1 rm $pgxcInstallDir/wk.tgz
+ doit rm $tmpDir/wk.tgz
+}
+
+#==================================================
+#
+# Cleanup work directories
+#
+#==================================================
+
+# First argument is the server name. second argument is the directory name
+# Server name could be none, where the target does not exist.
+function pgxc_clean_dir
+{
+ if [ $# -ne 2 ]; then
+ return 2
+ fi
+ if [ $1 == none ]; then
+ return 0
+ fi
+ doit ssh $pgxcUser@$1 rm -rf $2
+ doit ssh $pgxcUser@$1 mkdir -p $2
+ doit ssh $pgxcUser@$1 chmod 0700 $2
+}
+
+function pgxc_clean_socket
+{
+ doit ssh $pgxcUser@$1 rm -f /tmp/.s.'*'$2'*'
+}
+
+# First argument is the nodename. The second is "master", "slave" or "all".
+function pgxc_clean_node
+{
+ local i
+
+ log_echo pgxc_clean_node'('$*')'
+ if [ $1 == $gtmName ]; then
+ shift;
+ case $1 in
+ master )
+ if [ "$gtmMasterServer" != "none" ] && [ "$gtmMasterServer" != "N/A" ]; then
+ doit pgxc_clean_dir $gtmMasterServer $gtmMasterDir
+ doit pgxc_clean_socket $gtmMasterServer $gtmMasterPort
+ else
+ eecho GTM $gtmName is not configured.
+ fi
+ return;;
+ slave )
+ if [ "$gtmSlaveServer" != "none" ] && [ "$gtmSlaveServer" != "N/A" ]; then
+ doit pgxc_clean_dir $gtmSlaveServer $gtmSlaveDir
+ doit pgxc_clean_socket $gtmSlaveServer $gtmSlavePort
+ else
+ eecho GTM slave $gtmName is not configured.
+ fi
+ return;;
+ all )
+ if [ "$gtmMasterServer" != "none" ] && [ "$gtmMasterServer" != "N/A" ]; then
+ doit pgxc_clean_dir $gtmMasterServer $gtmMasterDir
+ doit pgxc_clean_socket $gtmMasterServer $gtmMasterPort
+ else
+ eecho GTM $gtmName is not configured.
+ fi
+ if [ "$gtmSlaveServer" != "none" ] && [ "$gtmSlaveServer" != "N/A" ]; then
+ doit pgxc_clean_dir $gtmSlaveServer $gtmSlaveDir
+ doit pgxc_clean_socket $gtmSlaveServer $gtmSlavePort
+ else
+ eecho GTM slave $gtmName is not configured.
+ fi
+ return;;
+ * )
+ echo ERROR: invalid argument for pgxc_clean_node, $1
+ return 1;;
+ esac
+ fi
+ for ((i= 0; i< ${#gtmProxyNames[@]}; i++)); do
+ if [ $1 == ${gtmProxyNames[$i]} ]; then
+ if [ "${gtmProxyServers[$i]}" != "none" ] && [ "${gtmProxyServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${gtmProxyServers[$i]} ${gtmProxyDirs[$i]}
+ doit pgxc_clean_socket ${gtmProxyServers[$i]} ${gtmProxyPorts[$i]}
+ else
+ eecho GTM Proxy $1 is not configured.
+ fi
+ return;
+ fi
+ done
+ for ((i= 0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ case $2 in
+ master )
+ if [ "${coordMasterServers[$i]}" != "none" ] && [ "${coordMasterServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${coordMasterServers[$i]} ${coordMasterDirs[$i]}
+ doit pgxc_clean_socket ${coordMasterServers[$i]} ${coordPorts[$i]}
+ else
+ eecho Coordinator master $1 is not configured.
+ fi
+ return;;
+ slave )
+ if [ "${coordSlaveServers[$i]}" != "none" ] && [ "${coordSlaveServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${coordSlaveServers[$i]} ${coordSlaveDirs[$i]}
+ doit pgxc_clean_socket ${coordSlaveServers[$i]} ${coordPorts[$i]}
+ else
+ eecho Coordinator slave $1 is not configured.
+ fi
+ return;;
+ all )
+ if [ "${coordMasterServers[$i]}" != "none" ] && [ "${coordMasterServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${coordMasterServers[$i]} ${coordMasterDirs[$i]}
+ doit pgxc_clean_socket ${coordMasterServers[$i]} ${coordPorts[$i]}
+ else
+ eecho Coordinator master $1 is not configured.
+ fi
+ if [ "${coordSlaveServers[$i]}" != "none" ] && [ "${coordSlaveServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${coordSlaveServers[$i]} ${coordSlaveDirs[$i]}
+ doit pgxc_clean_socket ${coordSlaveServers[$i]} ${coordPorts[$i]}
+ else
+ eecho Coordinator slave $1 is not configured.
+ fi
+ return;;
+ * )
+ echo ERROR: invalid argument for pgxc_clean_node, $1
+ return 1;;
+ esac
+ fi
+ done
+ for ((i= 0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ case $2 in
+ master )
+ if [ "${datanodeMasterServers[$i]}" != "none" ] && [ "${datanodeMasterServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${datanodeMasterServers[$i]} ${datanodeMasterDirs[$i]}
+ doit pgxc_clean_socket ${datanodeMasterServers[$i]} ${datanodePorts[$i]}
+ doit pgxc_clean_socket ${datanodeMasterServers[$i]} ${datanodePoolerPorts[$i]}
+ else
+ eecho Datanode master $1 is not configured.
+ fi
+ return;;
+ slave )
+ if [ "${datanodeSlaveServers[$i]}" != "none" ] && [ "${datanodeSlaveServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${datanodeSlaveServers[$i]} ${datanodeSlaveDirs[$i]}
+ doit pgxc_clean_socket ${datanodeSlaveServers[$i]} ${datanodePorts[$i]}
+ doit pgxc_clean_socket ${datanodeSlaveServers[$i]} ${datanodePoolerPorts[$i]}
+ else
+ eecho Datanode slave $1 is not configured.
+ fi
+ return;;
+ all )
+ if [ "${datanodeMasterServers[$i]}" != "none" ] && [ "${datanodeMasterServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${datanodeMasterServers[$i]} ${datanodeMasterDirs[$i]}
+ doit pgxc_clean_socket ${datanodeMasterServers[$i]} ${datanodePorts[$i]}
+ doit pgxc_clean_socket ${datanodeMasterServers[$i]} ${datanodePoolerPorts[$i]}
+ else
+ eecho Datanode master $1 is not configured.
+ fi
+ if [ "${datanodeSlaveServers[$i]}" != "none" ] && [ "${datanodeSlaveServers[$i]}" != "N/A" ]; then
+ doit pgxc_clean_dir ${datanodeSlaveServers[$i]} ${datanodeSlaveDirs[$i]}
+ doit pgxc_clean_socket ${datanodeSlaveServers[$i]} ${datanodePorts[$i]}
+ doit pgxc_clean_socket ${datanodeSlaveServers[$i]} ${datanodePoolerPorts[$i]}
+ else
+ eecho Datanode slave $1 is not configured.
+ fi
+ return;;
+ * )
+ echo ERROR: invalid argument for pgxc_clean_node, $1
+ return 1;;
+ esac
+ fi
+ done
+ echo ERROR: no target nodename found, $1
+}
+
+# First argument is "master", "slave" or "all"
+function pgxc_clean_gtm
+{
+ log_ehco pgxc_clen_gtm'('$*')'
+ if [ $# -ne 1 ];then
+ echo Specify master, slave or all
+ return 1
+ fi
+ case $1 in
+ master );;
+ slave );;
+ all );;
+ * )
+ vecho Specify master, slave or all
+ return 1;;
+ esac
+ if [ $1 == master ] || [ $1 == all ]; then
+ pxc_clean_dir $gtmMasterServer $gtmMasterDir
+ fi
+ if [ $gtm_slave != y ]; then
+ if [ $1 == slave ] || [ $1 == all ]; then
+ pgxc_clean_dir $gtmSlaveServer $gtmSlaveDir
+ fi
+ fi
+}
+
+# First argument is gtm_proxy name
+function pgxc_clean_gtm_proxy
+{
+ log_echo pgxc_clean_gtm_proxy'('$*')'
+ if [ $gtmProxy != y ]; then
+ echo gtm_proxy is not configured
+ return 1
+ fi
+ if [ $# -ne 1 ]; then
+ echo Specify gtm_proxy name
+ return 2
+ fi
+ local i
+ for ((i=0; i<${#gtmProxyNames[@]}; i++));do
+ if [ $1 == ${gtmProxyNames[$i]} ]; then
+ pgxc_clean_dir ${gtmProxyServers[$i]} ${gtmProxyDirs[$i]}
+ return
+ fi
+ done
+ echo specified gtm_proxy is not configured, $1
+ return 2
+}
+
+# No argument
+function pgxc_clean_gtm_proxy_all
+{
+ log_echo pgxc_clean_gtm_proxy_all'('$*')'
+ if [ $gtmProxy != y ]; then
+ echo gtm_proxy is not configured
+ return 1
+ fi
+ local i
+ for ((i=0; i<${#gtmProxyNames[@]}; i++));do
+ pgxc_clean_gtm_proxy ${gtmProxyNames[$i]}
+ done
+}
+
+# First argument is coordinator name
+function pgxc_clean_coordinator_master
+{
+ log_echo pgxc_clean_coordinator_master'('$*')'
+ if [ $# -ne 1 ]; then
+ echo specify coordinator name
+ return 2
+ fi
+ local i
+ for ((i=0; i<${#coordNames[@]}; i++));do
+ if [ $1 == ${coordNames[$i]} ]; then
+ pgxc_clean_dir ${coordMasterServers[$i]} ${coordMasterDirs[$i]}
+ return
+ fi
+ done
+ echo specified coordinator is not configured, $1
+ return 2
+}
+
+function pgxc_clean_coordinator_master_all
+{
+ local i
+ log_echo pgxc_clean_coordinator_master_all'('$*')'
+ for ((i=0;i<${#coordNames[@]};i++));do
+ pgxc_clean_coordinator_master ${coordNames[$i]}
+ done
+}
+
+# First argument is a coordinator name
+function pgxc_clean_coordinator_slave
+{
+ log_echo pgxc_clean_coordinator_slave'('$*')'
+ if [ $coordSlave != y ]; then
+ echo Coordinator slave is not configured.
+ return 1
+ fi
+ if [ $# -ne 1 ]; then
+ echo Specify coordinator name.
+ return 2
+ fi
+ local i
+ for ((i=0;i<${#coordNames[@]};i++));do
+ if [ $1 == ${coordNames[$i]} ]; then
+ if [ ${coordSlaveServers[$i]} == none ] || [ ${coordSlaveServers[$i]} == N/A ]; then
+ echo Specified coordinator slave is not configured, $1
+ return 2
+ fi
+ pgxc_clean_dir ${coordSlaveServers[$i]} ${coordSlaveDirs[$i]}
+ return
+ fi
+ done
+ echo Specified coordinator is not configured, $1
+ return 2
+}
+
+function pgxc_clean_coordinator_slave_all
+{
+ log_echo pgxc_clean_coordinator_slave_all'('$*')'
+ if [ $coordSlave != y ]; then
+ echo Coordinator slave is not configured.
+ return 1
+ fi
+ local i
+ for ((i=0; i<${#coordNames[@]}; i++));do
+ pgxc_clean_coordinator_slave ${coordNames[$i]}
+ done
+}
+
+function pgxc_clean_coordinator_all
+{
+ log_echo pgxc_clean_coordinator_all'('$*')'
+ pgxc_clean_coordinator_master_all
+ pgxc_clean_coordinator_slave_all
+}
+
+function pgxc_clean_datanode_master
+{
+ log_echo pgxc_clean_datanode_master'('$*')'
+ if [ $# -ne 1 ]; then
+ echo Specify datanode name
+ return 2
+ fi
+ local i
+ for ((i=0; i<${datanodeNames[@]}; i++));do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ pgxc_clean_dir ${datanodeMasterServers[$i]} ${datanodeMasterDirs[$i]}
+ return
+ fi
+ done
+ echo Specified datanode is not configured, $1
+ return 2
+}
+
+function pgxc_clena_datanode_master_all
+{
+ local i
+ log_echo pgxc_clean_datanode_master_all'('$*')'
+ for ((i=0; i<${#datanodeNames[@]}; i++));do
+ pgxc_clean_datanode_master ${datanodeNames[$i]}
+ done
+}
+
+function pgxc_clean_datanode_slave
+{
+ log_echo pgxc_clean_datanode_slave'('$*')'
+ if [ $datanodeSlave != y ]; then
+ echo Datanode slave is not configured.
+ return 1
+ fi
+ if [ $# -ne 1 ]; then
+ echo Specify datanode name.
+ return 2
+ fi
+ local i
+ for ((i=0;i<${#datanodeNames[@]}; i++));do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ if [ ${datanodeSlaveServers[$i]} == none ] || [ ${datanodeSlaveServers[$i]} == N/A ]; then
+ echo Specified datanode slave is not configured, $1
+ return 2
+ fi
+ pgxc_clean_dir ${datanodeSlaveServers[$i]} ${datanodeSlaveDirs[$i]}
+ return
+ fi
+ done
+ echo Specified datanode is not configured, $1
+ return 2
+}
+
+function pgxc_clean_datanode_slave_all
+{
+ log_echo pgxc_clean_datanode_slave_all'('$*')'
+ if [ $datanodeSlave != y ]; then
+ echo Datanode slave is not configured.
+ return 1
+ fi
+ local i
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ pgxc_clean_datanode_slave ${datanodeNames[$i]}
+ done
+}
+
+function pgxc_clean_datanode_all
+{
+ log_echo pgxc_clean_datanode_all'('$*')'
+ pgxc_clean_datanode_master_all
+ pgxc_clean_datanode_slave_all
+}
+
+function pgxc_clean_node_all
+{
+ local i
+
+ log_echo pgxc_clean_node_all'('$*')'
+ pgxc_clean_node $gtmName all
+ if [ "$gtmProxy" == "y" ]; then
+ for ((i=0; i< ${#gtmProxyNames[@]}; i++)); do
+ pgxc_clean_node ${gtmProxyNames[$i]}
+ done
+ else
+ eecho GTM Proxies are not configured.
+ fi
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ pgxc_clean_node ${coordNames[$i]} all
+ done
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ pgxc_clean_node ${datanodeNames[$i]} all
+ done
+}
+
+# print_stdout host file prompt
+function print_stdout
+{
+ local host
+ local file
+ if [ $# -ne 3 ]; then
+ return 1
+ fi
+ host=$1
+ shift
+ file=$1
+ shift
+ if [ $verbose == y ]; then
+ if [ $interactive != y ]; then
+ if [ $host != localhost ]; then
+ ssh $pgxcUser@$host cat $file
+ else
+ cat $file >> $logfile
+ fi
+ else
+ echo -n $*
+ if [ `readyesno n` == y ]; then
+ if [ $host != localhost ]; then
+ ssh $pgxcUser@$host cat $file
+ else
+ cat $file
+ fi
+ fi
+ fi
+ fi
+ if [ $logOpt == y ]; then
+ if [ $host != localhost ]; then
+ (ssh $pgxcUser@$host cat $file) >> $logfile
+ else
+ cat $file >> $logfile
+ fi
+ fi
+ if [ $host != localhost ]; then
+ ssh $pgxcUser@$host rm -f $file
+ else
+ rm -f $file
+ fi
+}
+
+# Print without asking, if $verbose == y
+function do_stdout
+{
+ local host
+ local file
+ if [ $# -ne 2 ]; then
+ return 1
+ fi
+ host=$1
+ shift
+ file=$1
+ shift
+ if [ $verbose == y ]; then
+ if [ $host != localhost ]; then
+ ssh $pgxcUser@$host cat $file
+ else
+ cat $file
+ fi
+ if [ $logOpt == y ]; then
+ if [ $host != localhost ]; then
+ (ssh $pgxcUser@$host cat $file) >> $logfile
+ else
+ cat $file >> $logfile
+ fi
+ fi
+ fi
+ if [ $host != localhost ]; then
+ ssh $pgxcUser@$host rm -f $file
+ else
+ rm -f $file
+ fi
+}
+
+# Write the file to the log if log is enabled. $1: file to write
+function fileWriteToLog
+{
+ if [ $logOpt == y ]; then
+ cat $1 >> $logfile
+ fi
+}
+
+function print_initgtm_out
+{
+ print_stdout $1 $2 "Print initgtm output\?"
+}
+
+function print_initdb_out
+{
+ print_stdout $1 $2 "Print initgtm output\?"
+}
+
+function print_pg_ctl_out
+{
+ print_stdout $1 $2 "Print pg_ctl output\?"
+}
+
+function print_gtm_ctl_out
+{
+ print_stdout $1 $2 "Print gtm_ctl output\?"
+}
+
+#===========================================================
+#
+# GTM and GTM slave staff
+#
+#===========================================================
+
+# Reconfigure GTM Master. Result will be reflected to the configuration file too.
+# Parameters are server, port and directory. Please note that the node name
+# is fixed. You should edit configuration file to change GTM node name.
+# You may specify two more arguments, gtm additional configuration file common to
+# master/slave and gtm additional configuration file specific to gtm master.
+# If you don't specify "no" additional ones, you can specify "none".
+function pgxc_configure_gtm_master
+{
+ log_echo pgxc_configure_gtm_master"("$*")"
+ vecho ================================================================
+ vecho Configure GTM Master
+
+ if [ $# -lt 3 -o $# -gt 5 ]; then
+ echo ERROR: invalid arguments, $*
+ return 1
+ fi
+ gtmMasterServer=$1
+ gtmMasterPort=$2
+ gtmMasterDir=$3
+ vecho New GTM Master config: Server: "'"$gtmMasterServer"'", Port: $gtmMasterPort, Dir: "'"$gtmMasterDir"'"
+ # Update configuration file
+ cat >> $configFile <<EOF
+#====================================
+# Updated due to GTM Master reconfiguration
+# $datetime
+gtmMasterServer=$gtmMasterServer
+gtmMasterPort=$gtmMasterPort
+gtmMasterDir=$gtmMasterDir
+EOF
+ if [ $# -ge 4 ]; then
+ gtmExtraConfig=$4
+ vecho -n " "ExtraConfig: "'"$gtmExtraConfig"'"
+ cat >> $configFile <<EOF
+gtmExtraConfig=$gtmExtraConfig
+EOF
+ if [ $# -eq 5 ]; then
+ gtmMasterSpecificExtraConfig=$5
+ vecho "," ExtraSpecificConfig: "'"$gtmMasterSpecificExtraConfig"'"
+ cat >> $configFile <<EOF
+gtmMasterSpecificExtraConfig=$gtmMasterSpecificExtraConfig
+EOF
+ fi
+ fi
+ vecho ""
+ cat >> $configFile <<EOF
+# --- End of reconfiguration --------
+EOF
+ # Backup configuration file
+ pgxc_backup_config_file
+}
+
+function pgxc_init_gtm_master
+{
+ local yesno
+
+ log_echo pgxc_init_gtm_master"("$*")"
+ vecho ================================================================
+ vecho GTM Master initialize
+
+ doit ssh $pgxcUser@$gtmMasterServer "killall -u $pgxcOwner -9 gtm > /dev/null 2>&1"
+ doit ssh $pgxcUser@$gtmMasterServer rm -rf $gtmMasterDir
+ doit ssh $pgxcUser@$gtmMasterServer mkdir -p $gtmMasterDir
+ doit ssh $pgxcUser@$gtmMasterServer "initgtm -Z gtm -D $gtmMasterDir > $tmpDir/initgtm.out 2>&1"
+ print_initgtm_out $gtmMasterServer $tmpDir/initgtm.out
+ vecho Configuring $gtmMasterServer:$gtmMasterDir/gtm.conf
+ vecho ssh $pgxcUser@$gtmMasterServer '"'cat '>>' $gtmMasterDir/gtm.conf '<<EOF'
+ ssh $pgxcUser@$gtmMasterServer "cat >> $gtmMasterDir/gtm.conf" <<EOF
+#===========================================
+# Added at initialization. $datetime
+listen_addresses = '*'
+EOF
+ if [ $gtmExtraConfig != none ]; then
+ vecho ssh $pgxcUser@$gtmMasterServer '"'cat '>>' $gtmMasterDir/gtm.conf'"' '<' $gtmExtraConfig
+ ssh $pgxcUser@$gtmMasterServer "cat >> $gtmMasterDir/gtm.conf" < $gtmExtraConfig
+ fi
+ if [ $gtmMasterSpecificExtraConfig != none ]; then
+ vecho ssh $pgxcUser@$gtmMasterServer '"'cat '>>' $gtmMasterDir/gtm.conf'"' '<' $gtmSpecificExtraConfig
+ ssh $pgxcUser@$gtmMasterServer "cat >> $gtmMasterDir/gtm.conf" < $gtmSpecificExtraConfig
+ fi
+ vecho ssh $pgxcUser@$gtmMasterServer '"'cat '>>' $gtmMasterDir/gtm.conf'"'
+ ssh $pgxcUser@$gtmMasterServer "cat >> $gtmMasterDir/gtm.conf" <<EOF
+port = $gtmMasterPort
+nodename = '$gtmName'
+startup = ACT
+# End of addition
+EOF
+ # Next two lines a needed to start GTM with minimum GXID as possible. (default is 10000).
+ # Current default GXID initial value will cause datanode slave error because there's too
+ # many XID's involved. This is a dirty hack and should be corrected by running initdb with
+ # gtm, or gtm can correct what GXID to start with from all the nodes.
+ vecho Initializing starting GXID value...
+ vecho '('ssh $pgxcUser@$gtmMasterServer gtm -x 2000 -D $gtmMasterDir '&)'
+ (ssh $pgxcUser@$gtmMasterServer gtm -x 2000 -D $gtmMasterDir &)
+ sleep 1
+ vecho ssh $pgxcUser@$gtmMasterServer '"'gtm_ctl stop -Z gtm -D $gtmMasterDir '>' /dev/null '2>&1"'
+ ssh $pgxcUser@$gtmMasterServer "gtm_ctl stop -Z gtm -D $gtmMasterDir > /dev/null 2>&1"
+}
+
+# Configure gtm_slave. The arguments are host name, port and directory.
+# If you remove (or don't configure) the slave, you specify host name as
+# none. You don't have to worry about the rest of the parameters.
+# You can specify additional parameters, extra file to go to gtm.conf
+# file, only to the slave. The common file should be configured
+# using pgxc_configure_gtm_master function.
+#function pgxc_configure_gtm_slave
+#{
+#}
+
+
+function pgxc_init_gtm_slave
+{
+ log_echo pgxc_init_gtm_slave'('$*')'
+ vecho ================================================================
+ vecho $progname:$0 GTM Slave initialize
+
+ if [ "$gtmSlave" != "y" ] || [ "$gtmSlaveServer" == "N/A" ] || [ "$gtmSlaverServer" == "none" ] ; then
+ echo $progname:$0 ERROR: GTM Slave is not configured.
+ return 1
+ fi
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'killall -u $pgxcOwner -9 gtm '>' /dev/null '2>&1"'
+ ssh $pgxcUser@$gtmSlaveServer "killall -u $pgxcOwner -9 gtm > /dev/null 2>&1"
+ doit ssh $pgxcUser@$gtmSlaveServer rm -rf $gtmSlaveDir
+ doit ssh $pgxcUser@$gtmSlaveServer mkdir -p $gtmSlaveDir
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'initgtm -Z gtm -D $gtmSlaveDir '>' $tmpDir/initgtm.out '2>&1"'
+ ssh $pgxcUser@$gtmSlaveServer "initgtm -Z gtm -D $gtmSlaveDir > $tmpDir/initgtm.out 2>&1"
+ print_initgtm_out $gtmSlaveServer $tmpDir/initgtm.out
+ vecho $pgxcUser@$gtmSlaveServer '"'cat '>>' $gtmSlaveDir/gtm.conf'"'
+ ssh $pgxcUser@$gtmSlaveServer "cat >> $gtmSlaveDir/gtm.conf" <<EOF
+listen_addresses = '*'
+EOF
+ if [ $gtmExtraConfig != none ]; then
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'cat '>>' $gtmSlaveDir/gtm.conf'"' '<' $gtmExtraConfig
+ ssh $pgxcUser@$gtmSlaveServer "cat >> $gtmSlaveDir/gtm.conf" < $gtmExtraConfig
+ fi
+ if [ $gtmSlaveSpecificExtraConfig != none ]; then
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'cat '>>' $gtmSlaveDir/gtm.conf'"' '<' $gtmSlaveSpecificExtraConfig
+ ssh $pgxcUser@$gtmSlaveServer "cat >> $gtmSlaveDir/gtm.conf" < $gtmSlaveSpecificExtraConfig
+ fi
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'cat '>>' $gtmSlaveDir/gtm.conf'"'
+ ssh $pgxcUser@$gtmSlaveServer "cat >> $gtmSlaveDir/gtm.conf" <<EOF
+port = $gtmSlavePort
+nodename = '$gtmName'
+startup = STANDBY
+active_host = '$gtmMasterServer'
+active_port = $gtmMasterPort
+EOF
+}
+
+function pgxc_start_gtm_master
+{
+ log_echo pgxc_start_gtm_master'('$*')'
+ vecho ================================================================
+ vecho Starting GTM Master
+
+ vecho ssh $pgxcUser@$gtmMasterServer '"'killall -u $pgxcOwner -9 gtm '>' /dev/null '2>&1"'
+ ssh $pgxcUser@$gtmMasterServer "killall -u $pgxcOwner -9 gtm > /dev/null 2>&1"
+ doit ssh $pgxcUser@$gtmMasterServer "rm -f $gtmMasterDir/register.node"
+ vecho ssh $pgxcUser@$gtmMasterServer '"'gtm_ctl start -Z gtm -D $gtmMasterDir '>' $tmpDir/gtm.out'"'
+ ssh $pgxcUser@$gtmMasterServer "gtm_ctl start -Z gtm -D $gtmMasterDir > $tmpDir/gtm.out"
+ do_stdout $gtmMasterServer $tmpDir/gtm.out
+}
+
+function pgxc_start_gtm_slave
+{
+ log_echo pgxc_start_gtm_slave'('$*')'
+ vecho ================================================================
+ vecho $progname:$0 Starting GTM Slave
+
+ if [ $gtmSlaveServer == none ]; then
+ eecho ERROR: GTM slave is not configured.
+ return 1
+ fi
+ vecho ssh $pgxcUser@$gtmMasterServer '"'gtm_ctl status -Z gtm -D $gtmMasterDir '>' /dev/null '2>&1"'
+ ssh $pgxcUser@$gtmMasterServer "gtm_ctl status -Z gtm -D $gtmMasterDir > /dev/null 2>&1"
+ if [ $? -ne 0 ]; then
+ echo ERROR: GTM Master is not running. Cannot start the slave.
+ return 1
+ fi
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'killall -u $pgxcOwner -9 gtm '>' /dev/null '2>&1"'
+ ssh $pgxcUser@$gtmSlaveServer "killall -u $pgxcOwner -9 gtm >/dev/null 2>&1"
+ doit ssh $pgxcUser@$gtmSlaveServer "rm -f $gtmSlaveDir/register.node"
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'gtm_ctl start -Z gtm -D $gtmSlaveDir '>' $tmpDir/gtm.out'"'
+ ssh $pgxcUser@$gtmSlaveServer "gtm_ctl start -Z gtm -D $gtmSlaveDir > $tmpDir/gtm.out"
+ do_stdout $gtmSlaveServer $tmpDir/gtm.out
+}
+
+
+function pgxc_stop_gtm_master
+{
+ log_echo pgxc_stop_gtm_master'('$*')'
+ vecho ================================================================
+ vecho Stopping GTM Master
+ doit ssh $pgxcUser@$gtmMasterServer gtm_ctl stop -Z gtm -D $gtmMasterDir
+}
+
+function pgxc_stop_gtm_slave
+{
+ log_echo pgxc_stop_gtm_slave'('$*')'
+ vecho ================================================================
+ vecho Stopping GTM Slave
+ if [ $gtmSlaveServer == none ] || [ $gtmSlaveServer == N/A ]; then
+ eecho ERROR: GTM slave is not configured.
+ return 1
+ fi
+ doit ssh $pgxcUser@$gtmSlaveServer gtm_ctl stop -Z gtm -D $gtmSlaveDir
+}
+
+function pgxc_kill_gtm_master
+{
+ log_echo pgxc_kill_gtm_master'('$*')'
+ vecho ================================================================
+ vecho Stopping GTM Master
+ vecho ssh $pgxcUser@$gtmMasterServer '"'killall -u $pgxcUser -9 gtm '>'/dev/null '2>&1"'
+ ssh $pgxcUser@$gtmMasterServer "killall -u $pgxcUser -9 gtm >/dev/null 2>&1"
+ pgxc_clean_socket $gtmMasterServer $gtmMasterPort
+}
+
+function pgxc_kill_gtm_slave
+{
+ log_echo pgxc_kill_gtm_slave'('$*')'
+ vecho ================================================================
+ vecho Stopping GTM Slave
+ if [ $gtmSlaveServer == none ] || [ $gtmSlaveServer == N/A ]; then
+ eecho ERROR: GTM slave is not configured.
+ return 1
+ fi
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'killall -u $pgxcUser -9 gtm '>'/dev/null '2>&1"'
+ ssh $pgxcUser@$gtmSlaveServer "killall -u $pgxcUser -9 gtm >/dev/null 2>&1"
+ pgxc_clean_socket $gtmSlaveServer $gtmSlavePort
+}
+
+function pgxc_failover_gtm
+{
+ log_echo pgxc_failover_gtm
+ # Reconnect should be done in separate action.
+ vecho ================================================================
+ vecho GTM Failover
+
+ if [ $gtmSlaveServer == none ]; then
+ eecho ERROR: pgxc_failover_gtm: GTM slave is not available.
+ return 1
+ fi
+ doit pgxc_monitor -Z gtm -p $gtmSlavePort -h $gtmSlaveServer
+# ssh "$pgxcUser@$gtmSlaveServer gtm_ctl status -Z gtm -D $gtmSlaveDir > /dev/null 2>&1"
+ if [ $? -ne 0 ]; then
+ eecho ERROR: GTM slave is not running.
+ return 1
+ fi
+ # STONITH GTM Master
+ # Please note that master and slave should run on different server.
+# vecho $pgxcUser@$gtmMasterServer '"'killall -u $pgxcOwner -9 gtm '>'/dev/null '2>&1"'
+# ssh $pgxcUser@$gtmMasterServer "killall -u $pgxcOwner -9 gtm >/dev/null 2>&1"
+ doit ssh $pgxcUser@$gtmSlaveServer gtm_ctl promote -Z gtm -D $gtmSlaveDir
+ # Update GTM configuration file as the master
+ vecho Reconfigure GTM as Master
+ vecho ssh $pgxcUser@$gtmSlaveServer '"'cat '>>' $gtmSlaveDir/gtm.conf'"'
+ ssh $pgxcUser@$gtmSlaveServer "cat >> $gtmSlaveDir/gtm.conf" <<EOF
+#===================================================
+# Updated due to GTM failover
+# $datetime
+startup = ACT
+#----End of reconfiguration -------------------------
+EOF
+ # Update configuration
+ vecho Reconfiguring whole Postgres-XC cluster
+ vecho cat '>>' $configFile
+ cat >> $configFile <<EOF
+#===================================================
+# pgxc configuration file updated due to GTM failover
+# $datetime
+gtmMasterServer=$gtmSlaveServer
+gtmMasterPort=$gtmSlavePort
+gtmMasterDir=$gtmSlaveDir
+gtmSlaveServer=none
+gtmSlavePort=0
+gtmSlaveDir=none
+#----End of reconfiguration -------------------------
+EOF
+ # Bacup config file
+ pgxc_backup_config_file
+ # Reconfigure myself
+ gtmMasterServer=$gtmSlaveServer
+ gtmMasterPort=$gtmSlavePort
+ gtmMasterDir=$gtmSlaveDir
+ gtmSlaveServer=none
+ gtmSlavePort=0
+ gtmSlaveDir=none
+}
+
+#===============================================================================
+#
+# GTM Proxy staff
+#
+#===============================================================================
+
+function pgxc_init_gtm_proxy
+{
+ # First argument is the nodename
+ log_echo pgxc_init_gtm_proxy'('$*')'
+ vecho ================================================================
+ vecho Initialize GTM Proxy $1
+
+ local i
+
+ if [ $# -ne 1 ]; then
+ eecho ERROR: Specify gtm_proxy name
+ return 1
+ fi
+ for ((i=0; i< ${#gtmProxyNames[@]}; i++)); do
+ if [ $1 == ${gtmProxyNames[$i]} ] && [ ${gtmProxyServers[$i]} != none ] && [ ${gtmProxyServers[$i]} != N/A ]; then
+ vecho ssh $pgxcUser@${gtmProxyServers[$i]} '"'killall -u $pgxcOwner -9 gtm_proxy '>'/dev/null '2>&1"'
+ ssh $pgxcUser@${gtmProxyServers[$i]} "killall -u $pgxcOwner -9 gtm_proxy >/dev/null 2>&1"
+ doit ssh $pgxcUser@${gtmProxyServers[$i]} rm -rf ${gtmProxyDirs[$i]}
+ doit ssh $pgxcUser@${gtmProxyServers[$i]} mkdir -p ${gtmProxyDirs[$i]}
+ vecho ssh $pgxcUser@${gtmProxyServers[$i]} '"'initgtm -Z gtm_proxy -D ${gtmProxyDirs[$i]} '>' $tmpDir/initgtm.out '2>&1"'
+ ssh $pgxcUser@${gtmProxyServers[$i]} "initgtm -Z gtm_proxy -D ${gtmProxyDirs[$i]} > $tmpDir/initgtm.out 2>&1"
+ print_initgtm_out ${gtmProxyServers[$i]} $localTmpDir/initgtm.out
+ vecho Configuring ${gtmProxyServers[$i]}:${gtmProxyDirs[$i]}/gtm_proxy.conf
+ if [ $gtmPxyExtraConfig != none ] && [ $gtmPxyExtrConfig != N/A ]; then
+ vecho ssh $pgxcUser@${gtmProxyServers[$i]} '"'cat '>>' ${gtmProxyDirs[$i]}/gtm_proxy.conf'"' '<' $gtmPxyExtraConfig
+ ssh $pgxcUser@${gtmProxyServers[$i]} "cat >> ${gtmProxyDirs[$i]}/gtm_proxy.conf" < $gtmPxyExtraConfig
+ fi
+ vecho ssh $pgxcUser@${gtmProxyServers[$i]} '"'cat '>>' ${gtmProxyDirs[$i]}/gtm_proxy.conf'"'
+ ssh $pgxcUser@${gtmProxyServers[$i]} "cat >> ${gtmProxyDirs[$i]}/gtm_proxy.conf" <<EOF
+nodename = '${gtmProxyNames[$i]}'
+listen_addresses = '*'
+port = ${gtmProxyPorts[$i]}
+gtm_host = $gtmMasterServer
+gtm_port = $gtmMasterPort
+worker_threads = 1
+gtm_connect_retry_interval = 1
+EOF
+ return
+ fi
+ done
+ eecho ERROR: specified GTM proxy is not configured, $1
+ return 1
+}
+
+function pgxc_init_gtm_proxy_all
+{
+ local i
+
+ log_echo pgxc_init_gtm_proxy_all'('$*')'
+ if [ $gtmProxy != y ]; then
+ eecho ERROR: gtm_proxy is not configured
+ return 1
+ fi
+ for((i=0;i<${#gtmProxyNames[@]};i++)); do
+ if [ ${gtmProxyServers[$i]} != none ] && [ ${gtmProxyServers[$i]} != N/A ]; then
+ pgxc_init_gtm_proxy ${gtmProxyNames[$i]}
+ fi
+ done
+}
+
+function pgxc_start_gtm_proxy
+{
+ # First argument is the nodename
+ log_echo pgxc_start_gtm_proxy'('$*')'
+ vecho ================================================================
+ vecho Start GTM Proxy $1
+
+ if [ $# -ne 1 ]; then
+ eecho Error: specify GTM proxy name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#gtmProxyNames[@]}; i++)); do
+ if [ $1 == ${gtmProxyNames[$i]} ]; then
+ vecho ssh $pgxcUser@${gtmProxyServers[$i]} '"'killall -u $pgxcOwner -9 gtm_proxy '>'/dev/null '2>&1"'
+ ssh $pgxcUser@${gtmProxyServers[$i]} "killall -u $pgxcOwner -9 gtm_proxy >/dev/null 2>&1"
+ vecho ssh $pgxcUser@${gtmProxyServers[$i]} '"'gtm_ctl start -Z gtm_proxy -D ${gtmProxyDirs[$i]} '>' $tmpDir/gtm_proxy.out '2>&1"'
+ ssh $pgxcUser@${gtmProxyServers[$i]} "gtm_ctl start -Z gtm_proxy -D ${gtmProxyDirs[$i]} > $tmpDir/gtm_proxy.out 2>&1"
+ do_stdout ${gtmProxyServers[$i]} $tmpDir/gtm_proxy.out
+ return
+ fi
+ done
+ eecho ERROR: specified GTM proxy does not exist, $1
+ return 1
+}
+
+function pgxc_start_gtm_proxy_all
+{
+ log_echo pgxc_startgtm_proxy_all'('$*')'
+ local i
+ vecho ================================================================
+ vecho Starting all the GTM proxies
+ if [ $gtmProxy != y ]; then
+ eecho ERROR: GTM proxy is not configured.
+ return 1
+ fi
+ for((i=0;i<${#gtmProxyNames[@]};i++)); do
+ if [ ${gtmProxyServers[$i]} != none ] && [ ${gtmProxyServers[$i]} != N/A ]; then
+ pgxc_monitor -Z gtm -p ${gtmProxyPorts[$i]} -h ${gtmProxyServers[$i]}
+ if [ $? -eq 0 ]; then
+ eecho gtm_proxy"("${gtpmProxyNames[$i]}")" is already running.
+ else
+ doit pgxc_start_gtm_proxy ${gtmProxyNames[$i]}
+ fi
+ fi
+ done
+}
+
+function pgxc_stop_gtm_proxy
+{
+ # First argument is the nodename
+ log_echo pgxc_stop_gtm_proxy'('$*')'
+ vecho ================================================================
+ vecho Stop GTM Proxy $1
+
+ if [ $# -ne 1 ]; then
+ iecho Specify GTM Proxy name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#gtmProxyNames[@]}; i++)); do
+ if [ $1 == ${gtmProxyNames[$i]} ]; then
+ doit ssh $pgxcUser@${gtmProxyServers[$i]} gtm_ctl stop -Z gtm_proxy -D ${gtmProxyDirs[$i]}
+ return
+ fi
+ done
+ eecho ERROR: specified GTM proxy does not exist, $1
+ return 1
+}
+
+function pgxc_stop_gtm_proxy_all
+{
+ log_echo pgxc_stop_gtm_proxy_all'('$*')'
+ vecho ================================================================
+ vecho Stop all the GTM Proxies
+
+ local i
+ if [ $gtmProxy != y ]; then
+ eecho Error: GTM Proxy is not configured
+ return 1
+ fi
+ for((i=0;i<${#gtmProxyNames[@]};i++)); do
+ if [ ${gtmProxyServers[$i]} == none ] || [ ${gtmProxyServers[$i]} == N/A ]; then
+ continue
+ fi
+ doit pgxc_stop_gtm_proxy ${gtmProxyNames[$i]}
+ done
+}
+
+
+
+function pgxc_kill_gtm_proxy
+{
+ # First argument is the nodename
+ log_echo pgxc_kill_gtm_proxy'('$*')'
+ vecho ================================================================
+ vecho Kill GTM Proxy $1
+
+ if [ $# -ne 1 ]; then
+ eecho ERROR: specify GTM proxy name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#gtmProxyNames[@]}; i++)); do
+ if [ $1 == ${gtmProxyNames[$i]} ]; then
+ vecho ssh $pgxcUser@${gtmProxyServers[$i]} '"'killall -u $pgxcOwner -9 gtm_proxy '>'/dev/null '2>&1"'
+ ssh $pgxcUser@${gtmProxyServers[$i]} "killall -u $pgxcOwner -9 gtm_proxy >/dev/null 2>&1"
+ doit pgxc_clean_socket ${gtmProxyServers[$i]} ${gtmProxyPorts[$i]}
+ return
+ fi
+ done
+ eecho ERROR: specified GTM proxy does not exist, $1
+ return 1
+}
+
+#----------------ここまで: ログ出力拡張: Oct 19, 2012 --------------------------
+function pgxc_kill_gtm_proxy_all
+{
+ log_echo pgxc_lill_gtm_proxy_all "("$*")"
+ vecho ================================================================
+ vecho Killing all the GTM Proxies
+
+ local i
+ if [ $gtmProxy != y ]; then
+ eecho GTM Proxy is not configured
+ return 1
+ fi
+ for((i=0;i<${#gtmProxyNames[@]};i++)); do
+ if [ ${gtmProxyServers[$i]} == none ] || [ ${gtmProxySevrers[$i]} == N/A ]; then
+ continue
+ fi
+ pgxc_kill_gtm_proxy ${gtmProxyNames[$i]}
+ done
+}
+
+function pgxc_reconnect_gtm_proxy
+{
+ # Reconnect to the current GTM master. When failed over, the current Master must have been updated.
+ # Remember to update gtm_proxy configuration file so that it connects to the new master at the next
+ # start.
+ # Please note that we assume GTM has already been failed over.
+ # First argument is gtm_proxy nodename
+ log_echo pgxc_reconnect_gtm_proxy "("$*")"
+ vecho ================================================================
+ vecho Reconnect GTM Proxy $1
+
+ if [ $# -ne 1 ]; then
+ eecho Specify GTM proxy name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#gtmProxyNames[@]}; i++)); do
+ if [ $1 == ${gtmProxyNames[$i]} ]; then
+ vecho doit ssh $pgxcUser@${gtmProxyServers[$i]} gtm_ctl reconnect -Z gtm_proxy -D ${gtmProxyDirs[$i]} -o \
+ \"-s $gtmMasterServer -t $gtmMasterPort\"
+
+ doit ssh $pgxcUser@${gtmProxyServers[$i]} gtm_ctl reconnect -Z gtm_proxy -D ${gtmProxyDirs[$i]} -o \
+ \"-s $gtmMasterServer -t $gtmMasterPort\"
+ vecho Reconfiguring GTM Proxy reflect reconnect.
+ log_echo ssh $pgxcUser@${gtmProxyServers[$i]} '"'cat '>>' ${gtmProxyDirs[$i]}/gtm_proxy.conf'"'
+ ssh $pgxcUser@${gtmProxyServers[$i]} "cat >> ${gtmProxyDirs[$i]}/gtm_proxy.conf" <<EOF
+#===================================================
+# Updated due to GTM Proxy reconnect
+# $datetime
+gtm_host = $gtmMasterServer
+gtm_port = $gtmMasterPort
+#----End of reconfiguration -------------------------
+EOF
+ return
+ fi
+ done
+ eecho ERROR: specified GTM proxy does not exist, $1
+ return 1
+
+}
+
+function pgxc_reconnect_gtm_proxy_all
+{
+ log_echo pgxc_reconnect_gtm_proxy_all "("$*")"
+ vecho ================================================================
+ vecho Reconnect all the GTM proxies
+
+ local i
+ if [ $gtmProxy != y ]; then
+ eecho GTM Poxy is not configured
+ return 1
+ fi
+ for((i=0;i<${#gtmProxyNames[@]};i++)); do
+ if [ ${gtmProxyServers[$i]} == none ] || [ ${gtmProxyServers[$i]} == N/A ]; then
+ continue
+ fi
+ pgxc_reconnect_gtm_proxy ${gtmProxyNames[$i]}
+ done
+}
+#===============================================================================
+#
+# Coordinator Staff
+#
+#===============================================================================
+
+function pgxc_init_coordinator_master
+{
+ # First argument is the nodename
+ log_echo pgxc_init_coordinator_master "("$*")"
+ vecho ================================================================
+ vecho Initialize coordinator master $1
+
+ if [ $# -ne 1 ]; then
+ eecho Specify coordinator name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ] && [ ${coordMasterServers[$i]} != none ] && [ ${coordMasterServers[$i]} != N/A ]; then
+ psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -c 'select 1' postgres $pgxcOwner > /dev/null 2> /dev/null
+ if [ $? -eq 0 ]; then
+ eecho ERROR: target coordinator master is running now. Stop to configure $1 slave.
+ return 1
+ fi
+ doit ssh $pgxcUser@${coordMasterServers[$i]} rm -rf ${coordMasterDirs[$i]}
+ doit ssh $pgxcUser@${coordMasterServers[$i]} mkdir -p ${coordMasterDirs[$i]}
+ doit ssh $pgxcUser@${coordMasterServers[$i]} "initdb --nodename ${coordNames[$i]} -D ${coordMasterDirs[$i]} > $tmpDir/initdb.out 2>&1"
+ print_initdb_out ${coordMasterServers[$i]} $tmpDir/initdb.out
+ vecho Configuring ${coordMasterServers[$i]}:${coordMasterDirs[$i]}/postgresql.conf
+ # Get effective GTM port and host. If gtm_proxy is not found, then connect to GTM
+ local j
+ local targetGTMhost
+ local targetGTMport
+ targetGTMhost=$gtmMasterServer
+ targetGTMport=$gtmMasterPort
+ for ((j=0; j< ${#gtmProxyServers[@]}; j++)); do
+ if [ ${coordMasterServers[$i]} == ${gtmProxyServers[$j]} ]; then
+ targetGTMhost=${gtmProxyServers[$j]}
+ targetGTMport=${gtmProxyPorts[$j]}
+ break
+ fi
+ done
+ if [ $coordExtraConfig != none ] && [ $coordExtraConfig != N/A ]; then
+ vecho Configuring $pgxcUser@${coordMasterServer[$i]}:${coaordMasterDirs[$i]}/postgresql.conf using $coordExtraConfig
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/postgresql.conf" < $coordExtraConfig
+ fi
+ if [ ${coordSpecificExraConfig[$i]} != none ] && [ ${coordSpecificExraConfig[$i]} != none ]; then
+ vecho Configuring $pgxcUser@${coordMasterServers[$i]}:${coordMasterDirs[$i]}/postgresql.conf using ${coordSpecificExtraConfig[$i]}
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/postgresql.conf" < ${coordSpecificExtraConfig[$i]}
+ fi
+ vecho Configuring $pgxcUser@${coordMasterServers[$i]}:${coordMasterDirs[$i]}/postgresq
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/postgresql.conf" <<EOF
+#===========================================
+# Added at initialization. $datetime
+log_destination = 'stderr'
+logging_collector = on
+log_directory = 'pg_log'
+listen_addresses = '*'
+port = ${coordPorts[$i]}
+max_connections = 100
+pooler_port = ${poolerPorts[$i]}
+gtm_host = '$targetGTMhost'
+gtm_port = $targetGTMport
+EOF
+ # Additional initialization for log_shipping.
+ if [ $coordSlave == y ] && [ ${coordSlaveServers[$i]} != none ] && [ ${coordSlaveServers[$i]} != N/A ]; then
+ # At least cleanup remote archive directory.
+ pgxc_clean_dir ${coordSlaveServers[$i]} ${coordArchLogDirs[$i]}
+ # Then setup postgresql.conf
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/postgresql.conf" <<EOF
+wal_level = hot_standby
+archive_mode = on
+archive_command = 'rsync %p $pgxcUser@${coordSlaveServers[$i]}:${coordArchLogDirs[$i]}/%f'
+max_wal_senders = ${coordMaxWALSenders[$i]}
+# End of Addition
+EOF
+ else
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/postgresql.conf" <<EOF
+# End of Addition
+EOF
+ fi
+ vecho Configuring ${coordMasterServers[$i]}:${coordMasterDirs[$i]}/pg_hba.conf
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/pg_hba.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/pg_hba.conf" <<EOF
+#=================================================
+# Addition at initialization, $datetime
+EOF
+ if [ $coordExtraPgHba != none ] && [ $coordExtraPgHba != N/A ]; then
+ vecho Configuring ${coordMasterServers[$i]}:${coordMasterDirs[$i]}/pg_hba.conf using $coordExtraPgHba
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/pg_hab.conf'"' '<' $coordExtraPgHba
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/pg_hab.conf" < $coordExtraPgHba
+ fi
+ if [ ${coordSpecificExtraPgHba[$i]} != none ] && [ ${coordSpecificExtraPgHba[$i]} != N/A ]; then
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/pg_hab.conf'"' '<' ${coordSpecificExtraPgHba[$i]}
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/pg_hab.conf" < ${coordSpecificExtraPgHba[$i]}
+ fi
+ local j
+ for ((j=0; j< ${#coordPgHbaEntries[@]}; j++)); do
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/pg_hba.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/pg_hba.conf" <<EOF
+host all $pgxcOwner ${coordPgHbaEntries[$j]} trust
+EOF
+ if [ ${coordSlaveServers[$i]} != none ] && [ ${coordSlaveServers[$i]} != N/A ]; then
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/pg_hba.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/pg_hba.conf" <<EOF
+host replication $pgxcOwner ${coordPgHbaEntries[$j]} trust
+EOF
+ fi
+ done
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/pg_hba.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/pg_hba.conf" <<EOF
+# End of addition
+EOF
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator is not configured, $1
+ return 1
+}
+
+function pgxc_init_coordinator_master_all
+{
+ log_echo pgxc_init_coordinator_master_all'('$*')'
+ vecho ================================================================
+ vecho Initialize all the coordinator masters
+
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ ${coordMasterServers[$i]} != none ] && [ ${coordMasterServers[$i]} != N/A ]; then
+ pgxc_init_coordinator_master ${coordNames[$i]}
+ fi
+ done
+}
+
+function pgxc_start_coordinator_master
+{
+ log_echo pgxc_start_coordinator_master'('$*')'
+ # First argument is the coordinator name
+ vecho ================================================================
+ vecho Start coordinator master $1
+
+ if [ $# -ne 1 ]; then
+ eecho Specify coordinator name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ log_echo psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c \'select 1\' '>' /dev/null '2>&1'
+ psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ eecho ERROR: target coordinator master is running now.
+ return 1
+ fi
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'pg_ctl start -Z coordinator -D ${coordMasterDirs[$i]} -o -i '>' $tmpDir/coord.out '2>&1"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "pg_ctl start -Z coordinator -D ${coordMasterDirs[$i]} -o -i > $tmpDir/coord.out 2>&1"
+ do_stdout ${coordMasterServers[$i]} $tmpDir/coord.out
+ return
+ fi
+ done
+ echo ERROR: specified coordinator is not configured, $1
+ return 1
+}
+
+function pgxc_start_coordinator_master_all
+{
+ log_echo pgxc_start_coordinator_master_all'('$*')'
+ vecho ================================================================
+ vecho Start all the coordinator masters
+
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ ${coordMasterServers[$i]} != none ] && [ ${coordMasterServers[$i]} != N/A ]; then
+ doit pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? -eq 0 ]; then
+ eecho coordinator master "("${coordNames[$i]}")" is already running
+ else
+ pgxc_start_coordinator_master ${coordNames[$i]}
+ fi
+ fi
+ done
+}
+
+function pgxc_stop_coordinator_master
+{
+ log_echo pgxc_stop_coordinator_master'('$*')'
+ # First arugument is the coordinator name
+ vecho ================================================================
+ vecho Stop coordinator master $1
+
+ if [ $# -ne 1 ]; then
+ eecho Specify coordinator name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ doit ssh $pgxcUser@${coordMasterServers[$i]} pg_ctl stop -Z coordinator -D ${coordMasterDirs[$i]} $immediate
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator does not exist, $1
+ return 1
+}
+
+function pgxc_stop_coordinator_master_all
+{
+ log_echo pgxc_stop_coordinator_master_all'('$*')'
+ vecho ================================================================
+ vecho Stop all the coordinator masters
+
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ ${coordMasterServers[$i]} != none ] && [ ${coordMasterServers[$i]} != N/A ]; then
+ pgxc_stop_coordinator_master ${coordNames[$i]}
+ fi
+ done
+}
+
+
+function pgxc_kill_coordinator_master
+{
+ log_echo pgxc_kill_coordinator_master'('$*')'
+ # First arugument is the coordinator name
+
+ # It's safer to kill the target coordinator with killall command. In this case, we need to
+ # capture postmaster's pid for the target
+ vecho ================================================================
+ vecho Kill coordinator master $1
+
+ if [ $# -ne 1 ]; then
+ eecho Specify nodename
+ return 1
+ fi
+ local i
+ local postmaster_pid
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ if [ ${coordMasterServers[$i]} != none ] && [ ${coordMasterServers[$i]} != N/A ]; then
+ postmaster_pid=`get_postmaster_pid ${coordMasterServers[$i]} ${coordMasterDirs[$i]}`
+ if [ $postmaster_pid != none ]; then
+ doit kill_all_child_parent ${coordMasterServers[$i]} $postmaster_pid
+ fi
+ doit pgxc_clean_socket ${coordMasterServers[$i]} ${coordPorts[$i]}
+ else
+ eecho specified coordinator master does not exist, $1
+ fi
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator does not exist, $1
+ return 1
+}
+
+function pgxc_kill_coordinator_master_all
+{
+ log_echo pgxc_kill_coordinator_master_all'('$*')'
+ vecho ================================================================
+ vecho Start all the coordinator masters
+
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ ${coordMasterServers[$i]} != none ] && [ ${coordMasterServers[$i]} != N/A ]; then
+ pgxc_kill_coordinator_master ${coordNames[$i]}
+ fi
+ done
+}
+
+
+# Caution: This function has not been tested yet! Should test when datanode is ready.
+# If a coordinator is not configured with the slave, we should remove it from the cluster
+# when it fails.
+function pgxc_remove_coordinator_master # NOT TESTED YET
+{
+ log_echo pgxc_remove_coordinator_master'('$*')'
+ local i
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [[ ${coordNames[$i]} == $1 ]]; then
+ local j
+ for ((j=0; j< ${#coordNames[@]}; j++)); do
+ if [ $i -ne -$j ]; then
+ if [ ${coordMasterServers[$j]} != none ] && [ ${coordMasterServers[$i]} != N/A ]; then
+ log_echo psql -p ${coordPorts[$j]} -h ${coordMasterServers[$j]} postgres $pgxcOwner -c '"'DROP NODE ${coordNames[$j]}'"'
+ psql -p ${coordPorts[$j]} -h ${coordMasterServers[$j]} postgres $pgxcOwner -c "DROP NODE ${coordNames[$j]}"
+ fi
+ else
+ doit ssh $pgxcUser@${coordMasterServers[$j]} pg_ctl stop -Z coordinator -D ${coordMaseterDirs[$j]} -m immediate
+ fi
+ done
+ ${coordMasterServers[$i]}=none
+ ${coordMasterDirs[$i]}=none
+ log_echo Update configuration file $configFile with new cordinaor Master
+ cat >> $configFile <<EOF
+#=========================================================
+# Update due to coordinator master removal, $1, $datetime
+coordMasterServers=(${coordMasterServers[@]})
+coordMasterDirs=(${coordMasterDirs[@])
+# End of update
+EOF
+ # Backup configiraiton file
+ pgxc_backup_config_file
+ fi
+ done
+}
+
+# To construct coordinator slave, pg_basebackup utility is used, which needs master coordinator running.
+# If the master coordinator is not running, then we temporary run it. After copying the base backup,
+# the the master will be stopped. Please be sure that coordinator master is initialized properly.
+# If it is running, then it will be restarted to reflect the change to postgresql.conf.
+function pgxc_init_coordinator_slave
+{
+ log_echo pgxc_init_coordinator_slave'('$*')'
+ # First argument is the coordinator name
+ vecho ================================================================
+ vecho Initialize coordinator slave $1
+
+ if [ $# -ne 1 ]; then
+ eecho Specify coordinator node name
+ return 1
+ fi
+ if [ "$coordSlave" != "y" ]; then
+ eecho No coordinator Slave is configured.
+ return 1
+ fi
+ local i
+ local start_master=n
+ restart=n
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ if [ ${coordSlaveServers[$i]} == N/A ] || [ ${coordSlaveServers[$i]} == none ]; then
+ eecho ERROR: slave for the coordinator $1 is not configured.
+ return 1
+ fi
+ # Coordinator master should be running
+ log_echo psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c \'select 1\' '>' /dev/null '2>&1'
+ psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ start_master=y
+ fi
+ # Clean slave's directory
+ doit ssh $pgxcUser@${coordSlaveServers[$i]} rm -rf ${coordSlaveDirs[$i]}
+ doit ssh $pgxcUser@${coordSlaveServers[$i]} mkdir -p ${coordSlaveDirs[$i]}
+ doit ssh $pgxcUser@${coordSlaveServers[$i]} chmod 0700 ${coordSlaveDirs[$i]}
+ # if the master is not running, we just start it and then stop it.
+ if [ $start_master == y ]; then
+ log_echo Starting the coordinator master to obtain base backup
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'pg_ctl start -Z coordinator -D ${coordMasterDirs[$i]} -o -i '>' $tmpDir/cmd.out'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "pg_ctl start -Z coordinator -D ${coordMasterDirs[$i]} -o -i > $tmpDir/cmd.out"
+ do_stdout ${coordMasterServers[$i]} $tmpDir/cmd.out
+ sleep 2
+ fi
+ # Obtain base backup of the master
+ doit ssh $pgxcUser@${coordSlaveServers[$i]} pg_basebackup -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -D ${coordSlaveDirs[$i]} -x
+
+ # Configure recovery.conf of the slave
+ vecho -- Configure slave\'s recovery.conf, ${coordSlaveServers[$i]}:${coordSlaveDirs[$i]}/recovery.conf
+ log_echo ssh $pgxcUser@$coordSlaveServers[$i]} '"'cat '>>' ${coordSlaveDirs[$i]}/recovery.conf'"'
+ ssh $pgxcUser@${coordSlaveServers[$i]} "cat >> ${coordSlaveDirs[$i]}/recovery.conf" <<EOF
+#==========================================
+# Added to initialize the slave, $datetime
+standby_mode = on
+primary_conninfo = 'host = ${coordMasterServers[$i]} port = ${coordPorts[$i]} user = $pgxcOwner application_name = ${coordNames[$i]}'
+restore_command = 'cp ${coordArchLogDirs[$i]}/%f %p'
+archive_cleanup_command = 'pg_archivecleanup ${coordArchLogDirs[$i]} %r'
+EOF
+ # Configure slave's postgresql.conf
+ vecho -- Configure slave\'s postgresql.conf, ${coordSlaveServers[$i]}:${coordSlaveDirs[$i]}/postgresql.conf
+ log_echo ssh $pgxcUser@${coordSlaveServers[$i]} '"'cat '>>' ${coordSlaveDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${coordSlaveServers[$i]} "cat >> ${coordSlaveDirs[$i]}/postgresql.conf" <<EOF
+#==========================================
+# Added to initialize the slave, $datetime
+hot_standby = on
+port = ${coordPorts[$i]}
+EOF
+ # Stop the Master if it was not runnig
+ if [ $start_master = y ]; then
+ doit ssh $pgxcUser@${coordMasterServers[$i]} pg_ctl stop -Z coordinator -D ${coordMasterDirs[$i]} -m fast
+ fi
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator is not configured, $1
+ return 1
+}
+
+function pgxc_init_coordinator_slave_all
+{
+ log_echo pgxc_init_coordinator_slave_all'('$*')'
+ # First argument is the coordinator name
+ vecho ================================================================
+ vecho Initialize all the coordinator slaves
+
+ local i
+ if [ $coordSlave != y ]; then
+ eecho Coordinator slaves are not configured.
+ return 1
+ fi
+ for ((i=0;i<${#coordNames[@]};i++)); do
+ if [ ${coordNames[$i]} != none ] && [ ${coordNames[$i]} != N/A ]; then
+ log_echo psql -p ${coordPorts[$i]} -h ${coordSlaveServers[$i]} postgres $pgxcOwner -c \'select 1\' '>' /dev/null '2>&1'
+ psql -p ${coordPorts[$i]} -h ${coordSlaveServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ eecho Coordinator slave "("${coordNames[$i]}")" is already running.
+ return 1
+ fi
+ pgxc_init_coordinator_slave ${coordNames[$i]}
+ fi
+ done
+ return
+}
+
+function pgxc_start_coordinator_slave
+{
+ log_echo pgxc_start_coordinator_slave'('$*')'
+ # First argument is the coordinator name
+ vecho ================================================================
+ vecho Start coordinator slave $1
+
+ if [ $coordSlave != y ]; then
+ eecho Coordinator slaves are not configured.
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ if [ ${coordSlaveServers[$i]} == none ] || [ ${coordSlaveServers[$i]} == N/A ]; then
+ eecho ERROR: slave for coordinator $1 is not configured.
+ return 1
+ fi
+ # Coordinator master should be running
+ log_echo psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c \'select 1\' '>' /dev/null '2>&1'
+ psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ eecho ERROR: corresponding coordinator master is not running now, $1
+ return 1
+ fi
+ # Start the slave
+ log_echo ssh $pgxcUser@${coordSlaveServers[$i]} '"'pg_ctl start -Z coordinator -D ${coordSlaveDirs[$i]} -o -i '>' $tmpDir/coord.out'"'
+ ssh $pgxcUser@${coordSlaveServers[$i]} "pg_ctl start -Z coordinator -D ${coordSlaveDirs[$i]} -o -i > $tmpDir/coord.out"
+ do_stdout ${coordSlaveServers[$i]} $tmpDir/coord.out
+ # Change the master to synchronous mode
+ vecho Change the master to synchrnous mode, $1
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/postgresql.conf" <<EOF
+#==========================================================
+# Added to start the slave in sync. mode, $datetime
+synchronous_commit = on
+synchronous_standby_names = '${coordNames[$i]}'
+# End of the addition
+EOF
+ doit ssh $pgxcUser@${coordMasterServers[$i]} pg_ctl reload -Z coordinator -D ${coordMasterDirs[$i]}
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator does not exist, $1
+ return 1
+}
+
+function pgxc_start_coordinator_slave_all
+{
+ # First argument is the coordinator name
+ vecho ================================================================
+ vecho Start all the coordinator slaves
+
+ if [ $coordSlave != y ]; then
+ eecho Coordinator slaves are not configured.
+ return 1
+ fi
+ local i
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [ ${coordNames[$i]} != none ] && [ ${coordNames[$i]} != N/A ]; then
+ pgxc_start_coordinator_slave ${coordNames[$i]}
+ fi
+ done
+}
+
+function pgxc_stop_coordinator_slave
+{
+ log_echo pgxc_stop_coordinator_slave'('$*')'
+ # First argument is the coordinator name
+ vecho ================================================================
+ vecho Stop coordinator slave $1
+
+ if [ $coordSlave != y ]; then
+ eecho Coordinator slaves are not configured.
+ return 1
+ fi
+ if [ $# -ne 1 ]; then
+ eecho Specify coordinator node name
+ return 1
+ fi
+
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ if [ ${coordSlaveServers[$i]} == none ] || [ ${coordSlaveServers[$i]} == N/A ]; then
+ eecho ERROR: slave for the coordinator $1 is not configured.
+ return 1
+ fi
+ # If the master is running, master's switch replication to asynchronous mode.
+ log_echo psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c \'select 1\' '>' /dev/null '2>&1'
+ psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ # Switch Master to asynchronous mode.
+ vecho Switching master of $1 at ${coordMasterServer[$i]} to asynchronous replication mode.
+ log_echo ssh $pgxcUser@${coordMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${coordMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/postgresql.conf" <<EOF
+#=======================================
+# Updated to trun off the slave $datetime
+synchronous_standby_names = ''
+# End of the update
+EOF
+ doit ssh $pgxcUser@${coordMasterServers[$i]} pg_ctl reload -Z coordinator -D ${coordMasterDirs[$i]}
+ fi
+ doit ssh $pgxcUser@${coordSlaveServers[$i]} pg_ctl stop -Z coordinator -D ${coordSlaveDirs[$i]} $immediate
+ return;
+ fi
+ done
+ eecho ERROR: Specified coordinator was not configured, $1
+ return 1
+}
+
+function pgxc_stop_coordinator_slave_all
+{
+ log_echo pgxc_stop_coordinator_slave_all'('$*')'
+ # First argument is the coordinator name
+ vecho ================================================================
+ vecho Stop all the coordinator slaves
+
+ if [ $coordSlave != y ]; then
+ eecho Coordinator slaves are not configured.
+ return 1
+ fi
+ local i
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [ ${coordNames[$i]} != none ] && [ ${coordNames[$i]} != N/A ]; then
+ pgxc_stop_coordinator_slave ${coordNames[$i]}
+ fi
+ done
+}
+
+function pgxc_kill_coordinator_slave
+{
+ log_echo pgxc_kill_coordinator_slave'('$*')'
+ # First arugument is the coordinator name
+
+ # It's safer to kill the target coordinator with killall command. In this case, we need to
+ # capture postmaster's pid for the target
+ vecho ================================================================
+ vecho Kill coordinator master $1
+
+ if [ $coordSlave != y ]; then
+ eecho Coordinator slaves are not configured.
+ return 1
+ fi
+ if [ $# -ne 1 ]; then
+ eecho Specify nodename
+ return 1
+ fi
+ local i
+ local postmaster_pid
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ if [ ${coordSlaveServers[$i]} != none ] && [ ${coordSlaveServers[$i]} != N/A ]; then
+ postmaster_pid=`get_postmaster_pid ${coordSlaveServers[$i]} ${coordSlaveDirs[$i]}`
+ if [ $postmaster_pid != none ]; then
+ doit kill_all_child_parent ${coordSlaveServers[$i]} $postmaster_pid
+ fi
+ doit pgxc_clean_socket ${coordSlaveServers[$i]} ${coordPorts[$i]}
+ else
+ eecho specified coordinator slave does not exist, $1
+ fi
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator does not exist, $1
+ return 1
+}
+
+# This failover function assumes that no IP address is carried over from corresponding master server.
+# If IP address can be carried over, then you don't need a section which issues ALTER NODE statement.
+# Instead, you should disable the slave.
+function pgxc_failover_coordinator
+{
+ log_echo pgxc_failover_coordinator'('$*')'
+ local fn=pgxc_failover_coordinator
+
+ # First argument is the coordinator name
+ decho called: $fn $*
+ vecho ================================================================
+ vecho Failover coordinator $1
+
+ if [ $# -ne 1 ]; then
+ eecho $progname:$fn Error: Specify coordinator name to failover
+ return 1
+ fi
+ if [ $coordSlave != y ]; then
+ eecho $progname:$fn No coordinator slaves are configured. Cannot failover
+ return 1
+ fi
+
+ local i
+ for ((i=0; i< ${#coordNames[@]}; i++)); do
+ if [ $1 == ${coordNames[$i]} ]; then
+ if [ ${coordSlaveServers[$i]} == none ] || [ ${coordSlaveServers[$i]} == N/A ]; then
+ eecho $progname:$fn: ERROR, Slave for the coordinator $1 is not configured. Cannot failover.
+ return 1
+ fi
+ decho "Target coordinator slave to failover:" ${coordNames[$i]} "at" ${coordSlaveServers[$i]}
+ # Find the new local gtm_proxy
+ local j
+ local targetGTMhost
+ local targetGTMport
+ targetGTMhost=none
+ targetGTMport=0
+ for ((j=0; j<${#gtmProxyServers[@]}; j++)); do
+ if [ ${coordSlaveServers[$i]} == ${gtmProxyServers[$j]} ]; then
+ targetGTMhost=${gtmProxyServers[$j]}
+ targetGTMport=${gtmProxyPorts[$j]}
+ break
+ fi
+ done
+ # gtm_proxy has to be configured properly
+ # This can be a bit more flexible so that each component can connect to GTM directly if
+ # gtm_proxy is not configured locally.
+ decho "New GTM Proxy:" "$targetGTMHost":"$targetGTMPort"
+ if [ "$targetGTMhost" == none ]; then
+ eecho $progname:$fn: ERROR, gtm_proxy is not configured at the server ${coordSlaveServers[$i]}. Cannot failover.
+ return 1
+ fi
+ # Now promote the slave
+ vecho $0: Promoting coordinator slave at ${coordSlaveServers[$i]}:${coordSlaveDirs[$i]}
+ doit gtm_util unregister -Z coordinator -p $gtmMasterPort -h gtmMasterServer ${coordNames[$i]}
+ # doit ssh $pgxcUser@$gtmMasterServer rm -f $gtmMasterDir/register.node
+ doit ssh $pgxcUser@${coordSlaveServers[$i]} pg_ctl promote -Z coordinator -D ${coordSlaveDirs[$i]}
+ ssh $pgxcUser@${coordSlaveServers[$i]} rm -rf $tmpDir/cmd.out
+ vecho done
+ # Restart the new master with new gtm_proxy
+ # The following command is a dirty hack to unregister the old master. This is only one way available now, but
+ # the next version of the core should include an utility to clean it up partially.
+ # Reconfigure new master's gtm_proxy
+ vecho Reconfiguring new gtm_proxy for ${coordSlaveServers[$i]}:${coordSlaveDirs[$i]}/postgresql.conf
+ log_echo ssh $pgxcUser@${coordSlaveServers[$i]} '"'cat '>>' ${coordSlaveDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${coordSlaveServers[$i]} "cat >> ${coordSlaveDirs[$i]}/postgresql.conf" <<EOF
+#=================================================
+# Added to promote, $datetime
+gtm_host = '$targetGTMhost'
+gtm_port = $targetGTMport
+# End of addition
+EOF
+ vecho done
+ # doit ssh $pgxcUser@${coordSlaveServers[$i]} pg_ctl stop -w -Z coordinator -D ${coordSlaveDirs[$i]} -o -i -m immediate
+ vecho Restarting ${coordNames[$i]} at ${coordSlaveServers[$i]}
+ log_echo ssh $pgxcUser@${coordSlaveServers[$i]} pg_ctl restart -Z coordinator -D ${coordSlaveDirs[$i]} -w -o -i '>' $localTmpDir/cmd.out '2>&1'
+ ssh $pgxcUser@${coordSlaveServers[$i]} pg_ctl restart -Z coordinator -D ${coordSlaveDirs[$i]} -w -o -i > $localTmpDir/cmd.out 2>&1 &
+ sleep 1
+ doit cat $localTmpDir/cmd.out
+ doit rm -f $localTmpDir/cmd.out
+
+ # Update the configuration variable
+ coordMasterServers[$i]="${coordSlaveServers[$i]}"
+ coordMasterDirs[$i]="${coordSlaveDirs[$i]}"
+ coordSlaveServers[$i]="N/A"
+ coordSlaveDirs[$i]="N/A"
+ # Then update the configuration file with this new configuration
+ log_echo cat '>>' $configFile
+ cat >> $configFile <<EOF
+#=====================================================
+# Updated due to the coordinator failover, $1, $datetime
+coordMasterServers=( ${coordMasterServers[@]} )
+coordMasterDirs=( ${coordMasterDirs[@]} )
+coordSlaveServers=( ${coordSlaveServers[@]} )
+coordSlaveDirs=( ${coordSlaveDirs[@]} )
+# End of the update
+EOF
+ # Backup configration file
+ pgxc_backup_config_file
+ # Update other coordinators with this new one ---> first, clean connection for all the users for all the databases
+ # Get all the available users --> Use this coordinator to get usernam
+ # It may be better to clean connections. However, we found that clean connection is not stable enough when some node
+ # is gone. We will wait until it is more stable.
+ # It is not clean but I'd like to leave these code for future improvement.
+ vecho Update other coordinators with new coordinator configuration.
+ vecho Clean all the pooler connections and update the node configuration
+ for ((j=0; j< ${#coordMasterServers[@]}; j++)); do
+ if [ "${coordMasterServers[$j]}" != none ] && [ "${coordMasterServers[$j]}" != N/A ]; then
+ doit pgxc_monitor -Z node -p ${coordPorts[$j]} -h ${coordMasterServers[$j]} -U $pgxcOwner -d postgres
+ if [ $? -ne 0 ]; then
+ eecho Coordinator ${coordNames[$j]} is not running. Skip reconfiguration for this.
+ continue;
+ fi
+ # The following code section seemed to be necessary. However, when some node fails, the current Postgres-XC does not work well and
+ # it seems to be okay practically to skip it.
+ #
+ #for user in $users; do
+ #if [ $j != $i ]; then
+ #vecho cleaning connection on ${coordMasterServers[$j]} for user $user
+ #psql -p ${coordPorts[$j]} -h ${coordMasterServers[$j]} postgres $pgxcOwner -c "CLEAN CONNECTION TO ALL TO USER $user" > /dev/null 2>&1
+ #fi
+ #done
+ #
+ # Issue ALTER NODE. If target coordinator is not running, we have no way to do this now. May need to run this afterwords.
+ # Store the script to elsewhere? --> Now we don't do this. May be for further work because we expect everything except
+ # for the current coordinator is running healthy.
+ cat > $localTmpDir/cmd.sql <<EOF
+ALTER NODE ${coordNames[$i]} WITH (HOST='${coordMasterServers[$i]}', PORT=${coordPorts[$i]});
+select pgxc_pool_reload();
+\q
+EOF
+ cat $localTmpDir/cmd.sql
+ doit psql -p ${coordPorts[$j]} -h ${coordMasterServers[$j]} postgres $pgxcOwner -f $localTmpDir/cmd.sql
+ rm -f $localTmpDir/cmd.sql
+ fi
+ done
+ return;
+ fi
+ done
+ eecho ERROR: specified coordinator $1 not configured.
+ return 2
+}
+
+# $1: database name
+function pgxc_clean_connection_all
+{
+ log_echo pgxc_clean_connection_all'('$*')'
+ local fn=pgxc_clean_connection
+ local i
+ if [ $# -le 0 ]; then
+ eecho $progname:$fn no database name specified
+ return 2
+ fi
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [ "${coordMasterServers[$i]}" == "none" ] || [ "${coordMasterServers[$i]}" == "N/A" ]; then
+ continue;
+ fi
+ log_echo psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c '"'CLEAN CONNECTION TO ALL FOR DATABASE $1'"'
+ psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} postgres $pgxcOwner -c "CLEAN CONNECTION TO ALL FOR DATABASE $1"
+ done
+}
+
+#===============================================================================
+#
+# Datanode staff
+#
+#===============================================================================
+
+function pgxc_init_datanode_master
+{
+ log_echo pgxc_init_datanode_master'('$*')'
+ # First argument is the nodename
+ vecho ================================================================
+ vecho Initialize datanode master $1
+
+ if [ $# -ne 1 ]; then
+ eecho Specify datanode name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ] && [ ${datanodeMasterServers[$i]} != none ] && [ ${datanodeMasterServers[$i]} != N/A ]; then
+ psql -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} -c 'select 1' postgres $pgxcOwner > /dev/null 2> /dev/null
+ if [ $? -eq 0 ]; then
+ eecho ERROR: target coordinator master is running now. Stop it to configure.
+ return 1
+ fi
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} rm -rf ${datanodeMasterDirs[$i]}
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} mkdir -p ${datanodeMasterDirs[$i]}
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} "initdb --nodename ${datanodeNames[$i]} -D ${datanodeMasterDirs[$i]} > $tmpDir/initdb.out 2>&1"
+ print_initdb_out ${datanodeMasterServers[$i]} $tmpDir/initdb.out
+ vecho Configuring ${datanodeMasterServers[$i]}:${datanodeMasterDirs[$i]}/postgresql.conf
+ # Get effective GTM port and host. If gtm_proxy is not found, then connect to GTM
+ local j
+ local targetGTMhost
+ local targetGTMport
+ targetGTMhost=$gtmMasterServer
+ targetGTMport=$gtmMasterPort
+ for ((j=0; j< ${#gtmProxyServers[@]}; j++)); do
+ if [ ${datanodeMasterServers[$i]} == ${gtmProxyServers[$j]} ]; then
+ targetGTMhost=${gtmProxyServers[$j]}
+ targetGTMport=${gtmProxyPorts[$j]}
+ break
+ fi
+ done
+ if [ $datanodeExtraConfig != none ] && [ $datanodeExtraConfig != N/A ]; then
+ vecho Configuring $pgxcUser@${datanodeMasterServer[$i]}:${datanodeMasterDirs[$i]}/postgresql.conf using $datanodeExtraConfig
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/postgresql.conf'"' '<' $datanodeExtraConfig
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/postgresql.conf" < $datanodeExtraConfig
+ fi
+ if [ ${datanodeSpecificExtraConfig[$i]} != none ] && [ ${datanodeSpecificExtraConfig[$i]} != none ]; then
+ vecho Configuring $pgxcUser@${datanodeMasterServers[$i]}:${datanodeMasterDirs[$i]}/postgresql.conf using ${datanodeSpecificExtraConfig[$i]}
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/postgresql.conf'"' '<' ${datanodeSpecificExtraConfig[$i]}
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/postgresql.conf" < ${datanodeSpecificExtraConfig[$i]}
+ fi
+ vecho Configuring $pgxcUser@${datanodeMasterServers[$i]}:${datanodeMasterDirs[$i]}/postgresql.conf
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/postgresql.conf" <<EOF
+#===========================================
+# Added at initialization. $datetime
+log_destination = 'stderr'
+logging_collector = on
+log_directory = 'pg_log'
+listen_addresses = '*'
+port = ${datanodePorts[$i]}
+pooler_port = ${datanodePoolerPorts[$i]}
+max_connections = 100
+gtm_host = '$targetGTMhost'
+gtm_port = $targetGTMport
+EOF
+ # Additional initialization for log_shipping.
+ if [ $datanodeSlave == y ] && [ ${datanodeSlaveServers[$i]} != none ] && [ ${datanodeSlaveServers[$i]} != N/A ]; then
+ # At least cleanup remote archive directory.
+ doit pgxc_clean_dir ${datanodeSlaveServers[$i]} ${datanodeArchLogDirs[$i]}
+ # Then setup postgresql.conf
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/postgresql.conf" <<EOF
+wal_level = hot_standby
+archive_mode = on
+archive_command = 'rsync %p $pgxcUser@${datanodeSlaveServers[$i]}:${datanodeArchLogDirs[$i]}/%f'
+max_wal_senders = ${datanodeMaxWalSenders[$i]}
+# End of Addition
+EOF
+ else
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/postgresql.conf" <<EOF
+# End of Addition
+EOF
+ fi
+ vecho Configuring ${cdatanodeMasterServers[$i]}:${datanodeMasterDirs[$i]}/pg_hba.conf
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${coordMasterDirs[$i]}/pg_hba.conf'"'
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${coordMasterDirs[$i]}/pg_hba.conf" <<EOF
+#=================================================
+# Addition at initialization, $datetime
+EOF
+ if [ $datanodeExtraPgHba != none ] && [ $datanodeExtraPgHba != N/A ]; then
+ vecho Configuring ${datanodeMasterServers[$i]}:${datanodeMasterDirs[$i]}/pg_hba.conf using $datanodeExtraPgHba
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/pg_hab.conf'"' '<' $datanodeExtraPgHba
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/pg_hab.conf" < $datanodeExtraPgHba
+ fi
+ if [ ${datanodeSpecificExtraPgHba[$i]} != none ] && [ ${datanodeSpecificExtraPgHba[$i]} != N/A ]; then
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/pg_hab.conf'"' '<' ${datanodeSpecificExtraPgHba[$i]}
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/pg_hab.conf" < ${datanodeSpecificExtraPgHba[$i]}
+ fi
+ local j
+ for ((j=0; j< ${#datanodePgHbaEntries[@]}; j++)); do
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/pg_hba.conf'"'
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/pg_hba.conf" <<EOF
+host all $pgxcOwner ${datanodePgHbaEntries[$j]} trust
+EOF
+ if [ ${datanodeSlaveServers[$i]} != none ] && [ ${datanodeSlaveServers[$i]} != N/A ]; then
+ vecho ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/pg_hba.conf"
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/pg_hba.conf" <<EOF
+host replication $pgxcOwner ${datanodePgHbaEntries[$j]} trust
+EOF
+ fi
+ done
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/pg_hba.conf'"'
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/pg_hba.conf" <<EOF
+# End of addition
+EOF
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator is not configured, $1
+ return 1
+}
+
+function pgxc_init_datanode_master_all
+{
+ log_echo pgxc_init_datanode_master_all'('$*')'
+ vecho ================================================================
+ vecho Initialize all the datanode masters
+
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ ${datanodeMasterServers[$i]} != none ] && [ ${datanodeMasterServers[$i]} != N/A ]; then
+ pgxc_init_datanode_master ${datanodeNames[$i]}
+ fi
+ done
+}
+
+function pgxc_start_datanode_master
+{
+ log_echo pgxc_start_datanode_master'('$*')'
+ # First argument is the nodename
+ vecho ================================================================
+ vecho Start datanode master $1
+
+ if [ $# -ne 1 ];then
+ eecho ERROR: specify datanode name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ log_echo psql -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} postgres $pgxcOwner -c \'select 1\' '>' /dev/null '2>&1'
+ psql -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ eecho ERROR: target datanode master is running now.
+ return 1
+ fi
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} "pg_ctl start -Z datanode -D ${datanodeMasterDirs[$i]} -o -i > $tmpDir/datanode.out"
+ do_stdout ${datanodeMasterServers[$i]} $tmpDir/datanode.out
+ return
+ fi
+ done
+ eecho ERROR: specified datanode is not configured, $1
+ return 1
+}
+
+function pgxc_start_datanode_master_all
+{
+ log_echo pgxc_start_datanode_master_all'('$*')'
+ vecho ================================================================
+ vecho Start all the datanode masters
+
+ local i
+ for ((i=0;i<${#datanodeNames[@]};i++));do
+ pgxc_start_datanode_master ${datanodeNames[$i]}
+ done
+}
+
+function pgxc_stop_datanode_master
+{
+ log_echo pgxc_stop_datanode_master'('$*')'
+ # First argument is the nodename
+ vecho ================================================================
+ vecho Stop datanode master $1
+
+ if [ $# -ne 1 ]; then
+ eecho ERROR: specify datanode name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} pg_ctl stop -Z datanode -D ${datanodeMasterDirs[$i]} $immediate
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator does not exist, $1
+ return 1
+}
+
+function pgxc_stop_datanode_master_all
+{
+ log_echo pgxc_stop_datanode_master_all'('$*')'
+ vecho ================================================================
+ vecho Stop all the datanode master
+
+ local i
+ for ((i=0;i<${#datanodeNames[@]};i++));do
+ pgxc_stop_datanode_master ${datanodeNames[$i]}
+ done
+}
+
+function pgxc_kill_datanode_master
+{
+ log_echo pgxc_kill_datanode_master'('$*')'
+ # First arugument is the nodename
+ vecho ================================================================
+ vecho Kill coordinator master $1
+
+ if [ $# -ne 1 ]; then
+ eecho ERROR: specify datanode name
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodedNames[$i]} ]; then
+ if [ ${datanodeMasterServers[$i]} != none ] && [ ${datanodeMasterServers[$i]} != N/A ]; then
+ postmaster_pid=`get_postmaster_pid ${datanodeMasterServers[$i]} ${datandoeMasterDirs[$i]}`
+ if [ $postmaster_pid != none ]; then
+ doit kill_all_child_parent ${datanodeMasterServers[$i]} $postmaster_pid
+ fi
+ doit pgxc_clean_socket ${datanodeMasterServers[$i]} ${datanodePorts[$i]}
+ doit pgxc_clean_socket ${datanodeMasterServers[$i]} ${datanodePoolerPorts[$i]}
+ else
+ eecho ERROR: could not find specified coordinator master, $1
+ fi
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator does not exist, $1
+ return 1
+}
+
+function pgxc_kill_datanode_master_all
+{
+ log_echo pgxc_kill_datanode_master_all'('$*')'
+ vecho ================================================================
+ vecho Kill all the datanode master
+
+ local i
+ for ((i=0;i<${#datanodeNames[@]};i++));do
+ pgxc_kill_datanode_master ${datanodeNames[$i]}
+ done
+}
+
+# To construct datanode slave, pg_basebackup utility is used, which needs master coordinator running.
+# If the master is not running, then we temporary run it. After copying the base backup,
+# the the master will be stopped. Please be sure that coordinator master is initialized properly.
+# If it is running, then it will be restarted to reflect the change to postgresql.conf.
+function pgxc_init_datanode_slave
+{
+ log_echo pgxc_init_datanode_slave'('$*')'
+ # First argument is the datanode name
+ vecho ================================================================
+ vecho Initialize datanode slave $1
+
+ if [ $# -ne 1 ]; then
+ eecho ERROR: specify coordinator node name
+ return 1;
+ fi
+ if [ "$datanodeSlave" != "y" ]; then
+ eecho No datanode slave is configured.
+ return 1
+ fi
+
+ local i
+ local start_master=n
+ for ((i=0;i<${#datanodeNames[@]};i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ if [ ${datanodeSlaveServers[$i]} == N/A ]; then
+ eecho ERROR: slave for the datanode $1 is not configured.
+ return 1
+ fi
+ # Datanode master should be running
+ log_echo psql -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} postgres $pgxcOwner -c \'select 1\' '>' /dev/null 2>&1
+ psql -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ start_master=y
+ fi
+ # Clean slave's directory
+ doit ssh $pgxcUser@${datanodeSlaveServers[$i]} rm -rf ${datanodeSlaveDirs[$i]}
+ doit ssh $pgxcUser@${datanodeSlaveServers[$i]} mkdir -p ${datanodeSlaveDirs[$i]}
+
+ # if the master is not running, we just start it and then stop it.
+ if [ $start_master == y ]; then
+ vecho Starting the datanode master to obtain base backup
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} "pg_ctl start -Z datanode -D ${datanodeMasterDirs[$i]} -o -i > $tmpDir/cmd.out"
+ do_stdout ${datanodeMasterServers[$i]} $tmpDir/cmd.out
+ sleep 2
+ fi
+ # Obtain base backup of the master
+ doit pgxc_clean_dir ${datanodeSlaveServers[$i]} ${datanodeSlaveDirs[$i]}
+ doit ssh $pgxcUser@${datanodeSlaveServers[$i]} pg_basebackup -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} -D ${datanodeSlaveDirs[$i]} -x
+
+ # Configure recovery.conf of the slave
+ vecho -- Configure slave\'s recovery.conf, ${datanodeSlaveServers[$i]}:${datanodeSlaveDirs[$i]}/recovery.conf
+ log_echo ssh $pgxcUser@${datanodeSlaveServers[$i]} '"'cat '>>' ${datanodeSlaveDirs[$i]}/recovery.conf'"'
+ ssh $pgxcUser@${datanodeSlaveServers[$i]} "cat >> ${datanodeSlaveDirs[$i]}/recovery.conf" <<EOF
+#==========================================
+# Added to initialize the slave, $datetime
+standby_mode = on
+primary_conninfo = 'host = ${datanodeMasterServers[$i]} port = ${datanodePorts[$i]} user = $pgxcOwner application_name = ${datanodeNames[$i]}'
+restore_command = 'cp ${datanodeArchLogDirs[$i]}/%f %p'
+archive_cleanup_command = 'pg_archivecleanup ${datanodeArchLogDirs[$i]} %r'
+EOF
+
+ # Configure slave's postgresql.conf
+ vecho -- Configure slave\'s postgresql.conf, ${doatanodeSlaveServers[$i]}:${datanodeSlaveDirs[$i]}/postgresql.conf
+ log_echo ssh $pgxcUser@${datanodeSlaveServers[$i]} '"'cat '>>' ${datanodeSlaveDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${datanodeSlaveServers[$i]} "cat >> ${datanodeSlaveDirs[$i]}/postgresql.conf" <<EOF
+#==========================================
+# Added to startup the slave, $dtetime
+hot_standby = on
+port = ${datanodePorts[$i]}
+pooler_port = ${datanodePoolerPorts[$i]}
+EOF
+ if [ $start_master == y ]; then
+ vecho Stopping the datanode master.
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} "pg_ctl stop -Z datanode -D ${datanodeMasterDirs[$i]} > /dev/null 2>&1"
+ fi
+ return
+ fi
+ done
+ eecho ERROR: specified coordinator is not configured, $1
+ return 1
+}
+
+function pgxc_init_datanode_slave_all
+{
+ log_echo pgxc_init_datanode_slave_all'('$*')'
+ vecho ================================================================
+ vecho Initialize all the datanode slaves
+
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ ${datanodeSlaveServers[$i]} != none ] && [ ${datanodeSlaveServers[$i]} != N/A ]; then
+ pgxc_init_datanode_slave ${datanodeNames[$i]}
+ fi
+ done
+}
+
+function pgxc_start_datanode_slave
+{
+ log_echo pgxc_start_datanode_slave'('$*')'
+ # First argument is the datanode name
+ vecho ================================================================
+ vecho Start datanode slave $1
+
+ if [ $datanodeSlave != y ]; then
+ eecho ERROR: no datanode slaves are configured
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ if [ ${datanodeSlaveServers[$i]} == none ] || [ ${datanodeSlaveServers[$i]} == N/A ]; then
+ eecho ERROR: slave for datanode $1 is not configured.
+ return 1
+ fi
+ # Datanode master should be running
+ log_echo psql -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} postgres $pgxcOwner -c \'select 1\' '>' /dev/null '2>&1'
+ psql -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -ne 0 ] ; then
+ eecho ERROR: corresponding datanode master is not running now, $1
+ return 1
+ fi
+ # Start the slave
+ psql -p ${datanodePorts[$i]} -h ${datanodeSlaveServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2>&1
+ if [ $? -eq 0 ] ; then
+ eecho ERROR: datanode slave "("${datanodeNames[$i]}")" is already running
+ return 1
+ fi
+ doit ssh $pgxcUser@${datanodeSlaveServers[$i]} "pg_ctl start -Z datanode -D ${datanodeSlaveDirs[$i]} -o -i > $tmpDir/coord.out"
+ do_stdout ${datanodeSlaveServers[$i]} $tmpDir/coord.out
+ # Change the master to synchronous mode
+ vecho Change the master to synchrnous mode, $1
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/postgresql.conf" <<EOF
+#==========================================================
+# Added to start the slave in sync. mode, $datetime
+synchronous_commit = on
+synchronous_standby_names = '${datanodeNames[$i]}'
+# End of the addition
+EOF
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} pg_ctl reload -Z datanode -D ${datanodeMasterDirs[$i]}
+ return
+ fi
+ done
+ eecho ERROR: specified datanode does not exist, $1
+ return 1
+}
+
+function pgxc_start_datanode_slave_all
+{
+ log_eco pgxc_start_datanode_slave_all'('$*')'
+ vecho ================================================================
+ vecho Start all the datanode slaves
+
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ ${datanodeSlaveServers[$i]} != none ] && [ ${datanodeSlaveServers[$i]} != N/A ]; then
+ pgxc_start_datanode_slave ${datanodeNames[$i]}
+ fi
+ done
+}
+
+function pgxc_stop_datanode_slave
+{
+ log_echo pgxc_stop_datanode_slave'('$*')'
+ # First argument is the datanode name
+ vecho ================================================================
+ vecho Stop datanode slave $1
+
+
+ if [ $datanodeSlave != y ]; then
+ eecho ERROR: no datanode slaves are configured
+ return 1
+ fi
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ if [ ${datanodeSlaveServers[$i]} == none ] || [ ${datanodeSlaveServers[$i]} == N/A ]; then
+ eecho ERROR: slave for the datanode $1 is not configured.
+ fi
+ # If the master is running, master's switch replication to asynchronous mode.
+ psql -p ${datanodePorts[$i]} -h ${datanodeMasterServers[$i]} postgres $pgxcOwner -c 'select 1' > /dev/null 2> /dev/null
+ if [ $? -eq 0 ]; then
+ # Switch Master to asynchronous mode.
+ vecho Switching master of $1 at ${datanodeMasterServer[$i]} to asynchronous replication mode.
+ log_echo ssh $pgxcUser@${datanodeMasterServers[$i]} '"'cat '>>' ${datanodeMasterDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${datanodeMasterServers[$i]} "cat >> ${datanodeMasterDirs[$i]}/postgresql.conf" <<EOF
+#=======================================
+# Updated to trun off the slave $datetime
+synchronous_standby_names = ''
+# End of the update
+EOF
+ doit ssh $pgxcUser@${datanodeMasterServers[$i]} pg_ctl reload -Z datanode -D ${datanodeMasterDirs[$i]}
+ fi
+ doit ssh $pgxcUser@${datanodeSlaveServers[$i]} pg_ctl stop -Z datanode -D ${datanodeSlaveDirs[$i]} $immediate
+ return;
+ fi
+ done
+ eecho ERROR: Specified datanode is not configureed, $1
+ return 1
+}
+function pgxc_stop_datanode_slave_all
+{
+ log_echo pgxc_stop_datanode_slave_all'('$*')'
+ vecho ================================================================
+ vecho Stop all the datanode slaves
+
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ ${datanodeSlaveServers[$i]} != none ] && [ ${datanodeSlaveServers[$i]} != N/A ]; then
+ pgxc_stop_datanode_slave ${datanodeNames[$i]}
+ fi
+ done
+}
+
+function pgxc_kill_datanode_master
+{
+ log_echo pgxc_kill_datanode_master'('$*')'
+ # First argument is the datanodeinator name
+ vecho ================================================================
+ vecho Kill datanode slave $1
+
+ local i
+ local postmaster_pid
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ if [ ${datanodeMasterServers[$i]} != none ] && [ ${datanodeMasterServers[$i]} != N/A ]; then
+ postmaster_pid=`get_postmaster_pid ${datanodeMasterServers[$i]} ${datanodeMasterDirs[$i]}`
+ if [ $postmaster_pid != none ]; then
+ doit kill_all_child_parent ${datanodeMasterServers[$i]} $postmaster_pid
+ fi
+ else
+ eecho ERROR: specified coordinator master does not exist, $1
+ fi
+ return
+ fi
+ done
+ eecho ERROR: specified datanode master is not configured, $1
+ return 1
+}
+
+function pgxc_kill_datanode_master_all
+{
+ log_echo pgxc_kill_datanode_master_all'('$*')'
+ vecho ================================================================
+ vecho Kill all the datanode masters
+
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ ${datanodeMasterServers[$i]} != none ] && [ ${datanodeMasterServers[$i]} != N/A ]; then
+ pgxc_kill_datanode_master ${datanodeNames[$i]}
+ fi
+ done
+}
+
+# Please note that this function does not take care of anything but just kill the processes.
+function pgxc_kill_datanode_slave
+{
+ log_echo pgxc_kill_datanode_slave'('$*')'
+ # First argument is the datanodeinator name
+ vecho ================================================================
+ vecho Kill datanode slave $1
+
+ local i
+ local postmaster_pid
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ if [ ${datanodeSlaveServers[$i]} != none ] && [ ${datanodeSlaveServers[$i]} != N/A ]; then
+ postmaster_pid=`get_postmaster_pid ${datanodeSlaveServers[$i]} ${datanodeSlaveDirs[$i]}`
+ if [ $postmaster_pid != none ]; then
+ doit kill_all_child_parent ${datanodeSlaveServers[$i]} $postmaster_pid
+ fi
+ doit pgxc_clean_socket ${datanodeSlaveServers[$i]} ${datanodePorts[$i]}
+ doit pgxc_clean_socket ${datanodeSlaveServers[$i]} ${datanodePoolerPorts[$i]}
+ else
+ eecho ERROR: specified coordinator master does not exist, $1
+ fi
+ return
+ fi
+ done
+ eecho ERROR: specified datanode master is not configured, $1
+ return 1
+}
+
+# Select coordinator
+function pgxc_kill_datanode_slave_all
+{
+ log_echo pgxc_kill_datanode_slave_all'('$*')'
+ vecho ================================================================
+ vecho Kill all the datanode masters
+
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ ${datanodeSlaveServers[$i]} != none ] && [ ${datanodeSlaveServers[$i]} != N/A ]; then
+ pgxc_kill_datanode_slave ${datanodeNames[$i]}
+ fi
+ done
+}
+
+
+selected_coord="none"
+selected_coord_port=0
+selected_coord_host="none"
+
+function pgxc_find_coordinator
+{
+ local fn=pgxc_find_coordinator
+ selected_coord="none"
+ selected_coord_port=0
+ selected_coord_host="none"
+ local i
+ log_echo $progname-$fn $*
+ if [ $# -le 0 ]; then
+ eecho $progname-$fn missing coordinator name
+ return 2
+ fi
+ if [ "$1" == "none" ] || [ "$1" == "N/A" ]; then
+ eecho "$progname:$fn" invalid coordinator name $1
+ return 2
+ fi
+ for ((i=0;i<${#coordNames[@]};i++));do
+ if [ "${coordNames[$i]}" == "$1" ]; then
+ if [ "${coordMasterServers[$i]}" == "none" ] || [ "${coordMasterServers[$i]}" == "N/A" ]; then
+ eecho "$progname:$fn" specified coordinator $1 does not have master
+ return 2
+ fi
+ selected_coord=${coordNames[$i]}
+ selected_coord_port=${coordPorts[$i]}
+ selected_coord_host=${coordMasterServers[$i]}
+ return;
+ fi
+ done
+ eecho $progname-$fn specified coordinator $1 not found
+ return 1
+}
+
+function pgxc_select_coordinator
+{
+ selected_coord="none"
+ selected_coord_port=0
+ selected_coord_host="none"
+ local i
+ local s
+ s=$(($RANDOM%${#coordMasterServers[@]}))
+ for ((i=$s;i<${#coordMasterServers[@]};i++)); do
+ if [ ${coordMasterServers[$i]} != "none" ] && [ ${coordMasterServers[$i]} != "N/A" ]; then
+ pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ # Need this block not to select failed coordinator. This may happen if a server fails
+ # and datanode fails over first.
+ selected_coord=${coordNames[$i]}
+ selected_coord_port=${coordPorts[$i]}
+ selected_coord_host=${coordMasterServers[$i]}
+ return 0
+ fi
+ fi
+ done
+ for ((i=0;i<$s;i++)); do
+ if [ ${coordMasterServers[$i]} != "none" ] && [ ${coordMasterServers[$i]} != "N/A" ]; then
+ pgxc_monitor -Z node -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -U $pgxcOwner -d postgres
+ if [ $? == 0 ]; then
+ # Need this block not to select failed coordinator. This may happen if a server fails
+ # and datanode fails over first.
+ selected_coord=${coordNames[$i]}
+ selected_coord_port=${coordPorts[$i]}
+ selected_coord_host=${coordMasterServers[$i]}
+ return 0
+ fi
+ fi
+ done
+ return 1
+}
+
+# Connects to the target ($1) from the coordinator ($2) using specified user ($3).
+# Issue "select 1" using EXECUTE DIRECT.
+# Need to make CLEAN CONNECTION work.
+function pgxc_dummy_connect
+{
+ local i
+ if [ $# -ne 3 ]; then
+ echo pgxc_dummy_connect target coordinator user
+ return 1
+ fi
+ for ((i=0;i<${#coordNames[@]}; i++)); do
+ if [ $2 == ${coordNames[$i]} ]; then
+ cat > $localTmpDir/cmd_dummy.sql <<EOF
+EXECUTE DIRECT ON ($1) 'SELECT 1';
+\q
+EOF
+ psql -p ${coordPorts[$i]} -h ${coordMasterServers[$i]} -f $localTmpDir/cmd_dummy.sql postgres $3
+ rm -f $localTmpDir/cmd_dummy.sql
+ return
+ fi
+ done
+ echo ERROR: coordinator $2 not found.
+ return 1
+}
+
+# This failover function assumes that no IP address is carried over from corresponding master server.
+# If IP address can be carried over, then you don't need a section which issues ALTER NODE statement.
+# Instead, you should disable the slave.
+function pgxc_failover_datanode
+{
+ log_echo pgxc_failover_datanode'('$*')'
+ local fn=pgxc_failover_datanode
+
+ # First argument is the datanode name
+ decho called: $fn $*
+ vecho ================================================================
+ vecho Failover datanode $1
+
+ if [ $# -ne 1 ]; then
+ eecho $progname:$fn Error: Specify datanode name to failover
+ return 1
+ fi
+ if [ $datanodeSlave != y ]; then
+ echo $progname:$fn No datanode slaves are configured. Cannot failover
+ return 1
+ fi
+
+ local i
+ for ((i=0; i< ${#datanodeNames[@]}; i++)); do
+ if [ $1 == ${datanodeNames[$i]} ]; then
+ if [ ${datanodeSlaveServers[$i]} == none ] || [ ${datanodeSlaveServers[$i]} == N/A ]; then
+ eecho $progname:$0: ERROR, Slave for the datanode $1 is not configured. Cannot failover.
+ return 1
+ fi
+ decho "Target coordinator slave to failover:" ${datanodeNames[$i]} "at" ${datanodeSlaveServers[$i]}
+ # Find the new local gtm_proxy
+ local j
+ local targetGTMhost
+ local targetGTMport
+ targetGTMhost=none
+ targetGTMport=0
+ for ((j=0; j<${#gtmProxyServers[@]}; j++)); do
+ if [ ${datanodeSlaveServers[$i]} == ${gtmProxyServers[$j]} ]; then
+ targetGTMhost=${gtmProxyServers[$j]}
+ targetGTMport=${gtmProxyPorts[$j]}
+ break
+ fi
+ done
+ # gtm_proxy has to be configured properly
+ # This can be a bit more flexible so that each component can connect to GTM directly if
+ # gtm_proxy is not configured locally.
+ decho "New GTM Proxy:" "$targetGTMHost":"$targetGTMPort"
+ if [ "$targetGTMhost" == none ]; then
+ eecho $progname:$fn: ERROR, gtm_proxy is not configured at the server ${datanodeSlaveServers[$i]}. Cannot failover.
+ return 1
+ fi
+ # Now promote the slave
+ vecho Promoting datnode slave at ${datanodeSlaveServers[$i]}:${datanodeSlaveDirs[$i]}
+ doit gtm_util unregister -Z datanode -p $gtmMasterPort -h $gtmMasterServer ${datanodeNames[$i]}
+ # doit ssh $pgxcUser@$gtmMasterServer rm -f $gtmMasterDir/register.node
+ doit ssh $pgxcUser@${datanodeSlaveServers[$i]} pg_ctl promote -Z datanode -D ${datanodeSlaveDirs[$i]}
+ ssh $pgxcUser@${datanodeSlaveServers[$i]} rm -rf $tmpDir/cmd.out
+ vecho done
+ # Restart the new master with new gtm_proxy
+ # The following command is a dirty hack to unregister the old master. This is only one way available now, but
+ # the next version of the core should include an utility to clean it up partially.
+ # Reconfigure new master's gtm_proxy
+ vecho Reconfiguring new gtm_proxy for ${datanodeSlaveServers[$i]}:${datanodeSlaveDirs[$i]}/postgresql.conf
+ log_echo ssh $pgxcUser@${datanodeSlaveServers[$i]} '"'cat '>>' ${datanodeSlaveDirs[$i]}/postgresql.conf'"'
+ ssh $pgxcUser@${datanodeSlaveServers[$i]} "cat >> ${datanodeSlaveDirs[$i]}/postgresql.conf" <<EOF
+#=================================================
+# Added to promote, $datetime
+gtm_host = '$targetGTMhost'
+gtm_port = $targetGTMport
+# End of addition
+EOF
+ vecho done
+ # doit ssh $pgxcUser@${datanodeSlaveServers[$i]} pg_ctl stop -w -Z datanode -D ${datanodeSlaveDirs[$i]} -o -i -m immediate
+ vecho Restarting ${datanodeNames[$i]} at ${datanodeSlaveServers[$i]}
+ log_echo ssh $pgxcUser@${datanodeSlaveServers[$i]} pg_ctl restart -w -Z datanode -D ${datanodeSlaveDirs[$i]} -w -o -i '>' $localTmpDir/cmd.out '2>&1 &'
+ ssh $pgxcUser@${datanodeSlaveServers[$i]} pg_ctl restart -w -Z datanode -D ${datanodeSlaveDirs[$i]} -w -o -i > $localTmpDir/cmd.out 2>&1 &
+ sleep 2 # do we need it?
+ doit cat $localTmpDir/cmd.out
+ doit rm -f $localTmpDir/cmd.out
+
+ # Update other coordinators with this new one ---> first, clean connection for all the users for all the databases
+ # Get all the available users --> Use this coordinator to get usernam
+ # It may be better to clean connections. However, we found that clean connection is not stable enough when some node
+ # is gone. We will wait until it is more stable.
+ # It is not clean but I'd like to leave these code for future improvement.
+ vecho Update coordinators with new datanode configuration.
+ # Clean all the pooler connections and update the node configuration
+ vecho Clean all the pooler connections and update the node configuration
+ for ((j=0; j< ${#coordMasterServers[@]}; j++)); do
+ if [ "${coordMasterServers[$j]}" != none ] && [ "${coordMasterServers[$j]}" != N/A ]; then
+ doit pgxc_monitor -Z node -p ${coordPorts[$j]} -h ${coordMasterServers[$j]} -U $pgxcOwner -d postgres
+ # The following block is needed because a coordinator may leave failed when a server fails and
+ # the datanode may fail-over first. --> In this case, the coordinator should be given a chance
+ # to reconfigure itself with this datanode. Coordinator should failover first.
+ if [ $? -ne 0 ]; then
+ eecho ERROR: coordinator ${coordNames[$j]} is not running. Skip reconfiguration for this.
+ continue;
+ fi
+ # Issue ALTER NODE. If target coordinator is not running, we have no way to do this now. May need to run this afterwords.
+ # Store the script to elsewhere? --> Now we don't do this. May be for further work because we expect everything except
+ # for the current coordinator is running healthy.
+ log_echo cat '>>' $localTmpDir/cmd.sql
+ cat >> $localTmpDir/cmd.sql <<EOF
+ALTER NODE ${datanodeNames[$i]} WITH (HOST='${datanodeSlaveServers[$i]}', PORT=${datanodePorts[$i]});
+select pgxc_pool_reload();
+\q
+EOF
+ cat $localTmpDir/cmd.sql
+ psql -p ${coordPorts[$j]} -h ${coordMasterServers[$j]} postgres $pgxcOwner -f $localTmpDir/cmd.sql
+ rm -f $localTmpDir/cmd.sql
+ fi
+ done
+ # Update the configuration variable
+ datanodeMasterServers[$i]="${datanodeSlaveServers[$i]}"
+ datanodeMasterDirs[$i]="${datanodeSlaveDirs[$i]}"
+ datanodeSlaveServers[$i]="none"
+ datanodeSlaveDirs[$i]="none"
+ # Then update the configuration file with this new configuration
+ log_echo cat '>>' $configFile
+ cat >> $configFile <<EOF
+#=====================================================
+# Updated due to the coordinator failover, $1, $datetime
+datanodeMasterServers=( ${datanodeMasterServers[@]} )
+datanodeMasterDirs=( ${datanodeMasterDirs[@]} )
+datanodeSlaveServers=( ${datanodeSlaveServers[@]} )
+datanodeSlaveDirs=( ${datanodeSlaveDirs[@]} )
+# End of the update
+EOF
+ # Backup configuraiton file
+ pgxc_backup_config_file
+ return;
+ fi
+ done
+ eecho ERROR: specified coordinator $1 not configured.
+ return 2
+}
+
+
+function pgxc_configure_nodes
+{
+ log_echo pgxc_configure_nodes'('$*')'
+ # First argument is a coordinator name
+ vecho ================================================================
+ vecho Configure nodes for coordinator $1
+
+ if [ $# -ne 1 ]; then
+ eecho ERROR: specify coorinator name.
+ return 2
+ fi
+ local i
+ local j
+ for ((i=0; i<${#coordNames[@]}; i++));do
+ if [ $1 == ${coordNames[$i]} ] && [ ${coordNames[$i]} != none ] && [ ${coordNames[$i]} != N/A ]; then
+ rm -rf $localTmpDir/cmd.sql
+ touch $localTmpDir/cmd.sql
+ for ((j=0; j<${#coordNames[@]}; j++));do
+ vecho Setup pgxc_node for ${coordNames[$j]} at ${coordNames[$i]}.
+ # Setup coordinators
+ decho i=$i, j=$j
+ if [ $i != $j ]; then
+ #echo CREATE NODE
+ cat >> $localTmpDir/cmd.sql <<EOF
+CREATE NODE ${coordNames[$j]} WITH (TYPE='coordinator', HOST='${coordMasterServers[$j]}', PORT=${coordPorts[$j]});
+EOF
+ else
+ #echo ALTER NODE
+ cat >> $localTmpDir/cmd.sql <<EOF
+ALTER NODE ${coordNames[$j]} WITH (HOST='${coordMasterServers[$j]}', PORT=${coordPorts[$j]});
+EOF
+ fi
+ done
+ for ((j=0; j<${#datanodeNames[@]}; j++)); do
+ vecho Setup pgxc_node for ${datanodeNames[$j]} at ${coordNames[$i]}.
+ # Setup datanodes
+ cat >> $localTmpDir/cmd.sql <<EOF
+CREATE NODE ${datanodeNames[$j]} WITH (TYPE='datanode', HOST='${datanodeMasterServers[$j]}', PORT=${datanodePorts[$j]});
+EOF
+ if [ ${datanodeNames[$j]} == $primaryDatanode ]; then
+ # Primary node
+ cat >> $localTmpDir/cmd.sql <<EOF
+ALTER NODE ${datanodeNames[$j]} WITH (PRIMARY);
+EOF
+ fi
+ if [ ${datanodeMasterServers[$j]} == ${coordMasterServers[$i]} ]; then
+ # Preferred node
+ cat >> $localTmpDir/cmd.sql <<EOF
+ALTER NODE ${datanodeNames[$j]} WITH (PREFERRED);
+EOF
+ fi
+ done
+ cat >> $localTmpDir/cmd.sql <<EOF
+\q
+EOF
+ ddo cat $localTmpDir/cmd.sql
+ if [ "$verbose" == "y" ] || [ $logOpt == "y" ]; then
+ doit psql -h ${coordMasterServers[$i]} -p ${coordPorts[$i]} -a -f $localTmpDir/cmd.sql postgres $pgxcOwner
+ else
+ psql -h ${coordMasterServers[$i]} -p ${coordPorts[$i]} -f $localTmpDir/cmd.sql postgres $pgxcOwner
+ fi
+ rm -rf $localTmpDir/cmd.sql
+ return
+ fi
+ done
+ eecho Coordinator $1 is not configured.
+ return 1
+}
+
+function pgxc_configure_nodes_all
+{
+ log_echo pgxc_configure_node'('$*')'
+ vecho ================================================================
+ vecho Configure nodes for all the coordinators
+
+ local i
+ for ((i=0;i<${#coordNames[@]};i++)); do
+ if [ "${coordMasterServers[$i]}" != "none" ] && [ "${coordMasterServers[$i]}" != "N/A" ]; then
+ pgxc_configure_nodes ${coordNames[$i]}
+ fi
+ done
+}
+
+
+function start_all
+{
+ log_echo start_all'('$*')'
+ pgxc_start_gtm_master
+ if [ $gtmSlave == y ]; then
+ pgxc_start_gtm_slave
+ fi
+ if [ $gtmProxy == y ]; then
+ pgxc_start_gtm_proxy_all
+ fi
+ pgxc_start_datanode_master_all
+ if [ $datanodeSlave == y ]; then
+ pgxc_start_datanode_slave_all
+ fi
+ pgxc_start_coordinator_master_all
+ if [ $coordSlave == y ]; then
+ pgxc_start_coordinator_slave_all
+ fi
+}
+
+function init_all
+{
+ log_echo init_all'('$*')'
+ pgxc_init_gtm_master
+ if [ $gtmSlave == y ]; then
+ pgxc_init_gtm_slave
+ fi
+ if [ $gtmProxy == y ]; then
+ pgxc_init_gtm_proxy_all
+ fi
+ pgxc_init_datanode_master_all
+ pgxc_start_gtm_master
+ if [ $gtmProxy == y ]; then
+ pgxc_start_gtm_proxy_all
+ fi
+ if [ $datanodeSlave == y ]; then
+ pgxc_init_datanode_slave_all
+ fi
+ pgxc_init_coordinator_master_all
+ if [ $coordSlave == y ]; then
+ pgxc_init_coordinator_slave_all
+ fi
+ if [ $gtmProxy == y ]; then
+ pgxc_stop_gtm_proxy_all
+ fi
+ if [ $gtmSlave == y ]; then
+ pgxc_stop_gtm_slave
+ fi
+ pgxc_stop_gtm_master
+ start_all
+ pgxc_configure_nodes_all
+}
+
+function stop_all
+{
+ log_echo stop_all'('$*')'
+ if [ $coordSlave == y ]; then
+ pgxc_stop_coordinator_slave_all
+ fi
+ pgxc_stop_coordinator_master_all
+ if [ $datanodeSlave == y ]; then
+ pgxc_stop_datanode_slave_all
+ fi
+ pgxc_stop_datanode_master_all
+ if [ $gtmProxy == y ]; then
+ pgxc_stop_gtm_proxy_all
+ fi
+ if [ $gtmSlave == y ]; then
+ pgxc_stop_gtm_slave
+ fi
+ pgxc_stop_gtm_master
+}
+
+function clean_all
+{
+ log_echo clean_all'('$*')'
+ local immediate_bk
+ immediate_bk="$immediate"
+ immediate="-m immediate"
+ stop_all
+ pgxc_clean_node_all
+ immediate="$immediate_bk"
+}
+
+# Start --- all | gtm [master|slave|all] | gtm_proxy | coordinator [master|slave|all] | datanode [master|slave|all] | nodename [master|slave|all]
+function pgxc_start_something
+{
+ log_echo pgxc_start_something'('*')'
+ local i
+
+ if [ "$1" == "all" ] || [ "$1" == "" ]; then
+ start_all
+ return
+ fi
+ if [ "$1" == "gtm" ] || [ "$1" == "$gtmName"]; then
+ if [ "$2" == "master" ]; then
+ pgxc_monitor -Z gtm $gtmMasterPort -h $gtmMasterServer
+ if [ $? -eq 0 ]; then
+ eecho GTM master is already running.
+ return 1
+ fi
+ pgxc_start_gtm_master
+ return;
+ fi
+ if [ "$2" == "slave" ]; then
+ if [ $gtmSlave != y ]; then
+ eecho GTM Slave is not configured. Cannot start.
+ return;
+ fi
+ pgxc_monitor -Z gtm -p $gtmMasterPort -h $gtmMasterServer
+ if [ $? -eq 0 ]; then
+ pgxc_start_gtm_slave
+ else
+ eecho GTM master is not running. Cannot start the slave.
+ fi
+ return;
+ fi
+ if [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_monitor -Z gtm $gtmMasterPort -h $gtmMasterServer
+ if [ $? -eq 0 ]; then
+ eecho GTM master is already running.
+ return 1
+ fi
+ pgxc_start_gtm_master
+ if [ $gtmSlave == y ]; then
+ pgxc_start_gtm_slave
+ fi
+ fi
+ return
+ fi
+ if [ "$1" == "gtm_proxy" ]; then
+ pgxc_start_gtm_proxy_all
+ return
+ fi
+ if [ "$1" == "coordinator" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_start_coordinator_master_all
+ elif [ "$2" == "slave" ]; then
+ pgxc_start_coordinator_slave_all
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_start_coordinator_master_all
+ pgxc_start_coordinator_slave_all
+ else
+ eecho Invalid argument for start coordinator command, $2
+ fi
+ return
+ fi
+ if [ "$1" == "datanode" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_start_datanode_master_all
+ elif [ "$2" == "slave" ]; then
+ pgxc_start_datanode_slave_all
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_start_datanode_master_all
+ pgxc_start_datanode_slave_all
+ else
+ eecho Invalid argument for start coordinator command, $2
+ fi
+ return
+ fi
+ # General nodename specification
+ # Have done GTM
+ # GTM proxy?
+ for ((i=0; i<${#gtmProxyNames[@]}; i++)); do
+ if [ "$1" == "${gtmProxyNames[$i]}" ]; then
+ pgxc_start_gtm_proxy $1
+ return
+ fi
+ done
+ # Coordinator?
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [ "$1" == "${coordNames[$i]}" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_start_coordinator_master $1
+ elif [ "$2" == "slave" ]; then
+ pgxc_start_coordinator_slave $1
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_start_coordinator_master $1
+ pgxc_start_coordinator_slave $1
+ else
+ eecho Invalid start coordinator command option, $2
+ fi
+ return
+ fi
+ done
+ # Datanode?
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ if [ "$1" == "${datanodeNames[$i]}" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_start_datanode_master $1
+ elif [ "$2" == "slave" ]; then
+ pgxc_start_datanode_slave $1
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_start_datanode_master $1
+ pgxc_start_datanode_slave $1
+ else
+ eecho Invalid start datanode command option, $2
+ fi
+ return
+ fi
+ done
+ eecho No component named $1 found
+ return 1
+}
+
+# Stop --- all | gtm [master|slave|all] | gtm_proxy | coordinator [master|slave|all] | datanode [master|slave|all] | nodename [master|slave|all]
+function pgxc_stop_something
+{
+ local i
+
+ log_echo pgxc_stop_something'('$*')'
+ if [ "$1" == "all" ] || [ "$1" == "" ]; then
+ stop_all
+ return
+ fi
+ if [ "$1" == "gtm" ] || [ "$1" == "$gtmName" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_monitor -Z gtm -p $gtmMasterPort -h $gtmMasterServer
+ if [ $? -ne 0 ]; then
+ eecho GTM master is not running. Cannot stop it.
+ return 1
+ fi
+ pgxc_stop_gtm_master
+ return;
+ fi
+ if [ "$2" == "slave" ]; then
+ if [ $gtmSlave != y ]; then
+ eecho GTM Slave is not configured. Cannot stop.
+ return;
+ fi
+ pgxc_monitor -Z gtm -p $gtmSlavePort -h $gtmSlaveServer
+ if [ $? -eq 0 ]; then
+ pgxc_stop_gtm_slave
+ else
+ eecho GTM slave is not running. Cannot stop it.
+ fi
+ return;
+ fi
+ if [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_monitor -Z gtm $gtmMasterPort -h $gtmMasterServer
+ if [ $? -ne 0 ]; then
+ eecho GTM master is not running.
+ else
+ pgxc_start_gtm_master
+ fi
+ if [ $gtmSlave == y ]; then
+ pgxc_monitor -Z gtm -p $gtmSlavePort -h gtmSlaveServer
+ if [ $? -eq 0 ]; then
+ pgxc_start_gtm_slave
+ else
+ eecho GTM slave is not running
+ fi
+ fi
+ return
+ else
+ eecho Invalid stop gtm option, $2
+ return 2
+ fi
+ return
+ fi
+ if [ "$1" == "gtm_proxy" ]; then
+ pgxc_stop_gtm_proxy_all
+ return
+ fi
+ if [ "$1" == "coordinator" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_stop_coordinator_master_all
+ elif [ "$2" == "slave" ]; then
+ pgxc_stop_coordinator_slave_all
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_stop_coordinator_master_all
+ pgxc_stop_coordinator_slave_all
+ else
+ eecho Invalid argument for start coordinator command, $2
+ fi
+ return
+ fi
+ if [ "$1" == "datanode" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_stop_datanode_master_all
+ elif [ "$2" == "slave" ]; then
+ pgxc_stop_datanode_slave_all
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_stop_datanode_master_all
+ pgxc_stop_datanode_slave_all
+ else
+ eecho Invalid argument for stop coordinator command, $2
+ fi
+ return
+ fi
+ # General nodename specification
+ # GTM case: has been handled
+ # GTM proxy?
+ for ((i=0; i<${#gtmProxyNames[@]}; i++)); do
+ if [ "$1" == "${gtmProxyNames[$i]}" ]; then
+ pgxc_stop_gtm_proxy $1
+ return
+ fi
+ done
+ # Coordinator?
+ for ((i=0; i<${#coordNames[@]}; i++)); do
+ if [ "$1" == "${coordNames[$i]}" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_stop_coordinator_master $1
+ elif [ "$2" == "slave" ]; then
+ pgxc_stop_coordinator_slave $1
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_stop_coordinator_master $1
+ pgxc_stop_coordinator_slave $1
+ else
+ eecho Invalid stop coordinator command option, $2
+ fi
+ return
+ fi
+ done
+ # Datanode?
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ if [ "$1" == "${datanodeNames[$i]}" ]; then
+ if [ "$2" == "master" ]; then
+ pgxc_stop_datanode_master $1
+ elif [ "$2" == "slave" ]; then
+ pgxc_stop_datanode_slave $1
+ elif [ "$2" == "all" ] || [ "$2" == "" ]; then
+ pgxc_stop_datanode_master $1
+ pgxc_stop_datanode_slave $1
+ else
+ eecho Invalid stop datanode command option, $2
+ fi
+ return
+ fi
+ done
+ eecho No component named $1 found
+ return 1
+}
+
+# failover --- gtm | component name
+function pgxc_failover_something
+{
+ local i
+
+ log_echo pgxc_failover_something'('$*')'
+ if [ "$1" == "gtm" ] || [ "$1" == "$gtmName" ]; then
+ pgxc_failover_gtm
+ pgxc_reconnect_gtm_proxy_all
+ return
+ fi
+ for ((i=0; i<${#coordNames[@]};i++)); do
+ if [ "$1" == ${coordNames[$i]} ]; then
+ pgxc_failover_coordinator $1
+ return
+ fi
+ done
+ for ((i=0; i<${#datanodeNames[@]}; i++)); do
+ if [ "$1" == ${datanodeNames[$i]} ]; then
+ pgxc_failover_datanode $1
+ return
+ fi
+ done
+ eecho No such componen found to failover $1
+ return 1
+}
+
+function pgxc_kill_something
+{
+ local n
+
+ log_echo pgxc_kill_something'('$*')'
+ if [ "$1" == "" ]; then
+ return 2
+ fi
+ for ((n=0; n<${#gtmProxyNames[@]}; n++)); do
+ if [ "$1" == "${gtmProxyNames[$n]}" ]; then
+ pgxc_kill_gtm_proxy $1
+ return
+ fi
+ done
+ if [ "$2" != "master" ] && [ "$2" != "slave" ]; then
+ eecho Specify master or slave.
+ return 2
+ fi
+ if [ $1 == $gtmName ]; then
+ if [ $2 == master ]; then
+ pgxc_kill_gtm_master
+ else
+ pgxc_kill_gtm_slave
+ fi
+ return;
+ fi
+ for ((n=0; n<${#coordNames[@]}; n++)); do
+ if [ $1 == ${coordNames[$n]} ]; then
+ if [ $2 == master ]; then
+ pgxc_kill_coordinator_master $1
+ else
+ pgxc_kill_coordinator_slave $1
+ fi
+ return;
+ fi
+ done
+ for ((n=0; n<${#datanodeNames[@]}; n++)); do
+ if [ $1 == ${datanodeNames[$n]} ]; then
+ if [ $2 == master ]; then
+ pgxc_kill_datanode_master $1
+ else
+ pgxc_kill_datanode_slave $1
+ fi
+ return;
+ fi
+ done
+}
+
+function kill_all
+{
+ log_echo kill_all'('$*')'
+ doall killall -u $pgxcUser -9 postgres gtm_proxy gtm
+}
+
+function test_failover
+{
+ kill_all
+ init_all
+ start_all
+ pgxc_kill_coordinator_master coord1
+ pgxc_failover_coordinator coord1
+}
+
+##############################################################################
+#
+# Help commands
+#
+##############################################################################
+
+function help_command
+{
+ echo Command you can type:
+ echo " " - clean Createdb Createuser deploy Dropdb Dropuser end failover
+ echo " " - init kill monitor prepare Psql q start stop Vacuumdb xcset xcshow
+ echo For details, type cmdname ?
+
+ echo
+ echo If you are familiar with internal functions, you can type "function" name and its argument directory.
+ echo pgxc_ctl will also accept any command for bash.
+}
+
+###############################################################################
+#
+# EXECUTING SECTION
+#
+###############################################################################
+
+#=======================================================
+# Things to be done at first
+#=======================================================
+
+# Handle options
+progname=$0
+moretodo=y
+cmd_with_log=null
+while [ $moretodo == y ]; do
+ if [ $# -gt 0 ]; then
+ case $1 in
+ -v )
+ shift;
+ verbose=y;
+ continue;;
+ --verbose )
+ shift;
+ verbose=y;
+ continue;;
+ --silent )
+ verbose=n;
+ continue;;
+ -d ) # debug option
+ shift;
+ DEBUG=y;
+ continue;;
+ --debug )
+ shift;
+ DEBUG=y;
+ continue;;
+ -c ) # Configuraton file
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no -c option value found
+ exit 1
+ else
+ $configFile=$1
+ shift
+ fi;
+ continue;;
+ --configuration ) # Configuraion file
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no --configuration option value found
+ exit 1
+ else
+ $configFile=$1
+ shift
+ fi;
+ continue;;
+ -f ) # input_file
+ shift;
+ if [ "$1" == "" ]; then
+ echo Epecify input file
+ else
+ $progname < $1
+ fi
+ continue;;
+ --interactive ) # interactive
+ shift;
+ interactive=y
+ continue;;
+ --batch )
+ shift
+ interactive=n
+ continue;;
+ --with-log )
+ shift
+ cmd_with_log=y
+ continue;;
+ --without-log )
+ shift
+ cmd_with_log=n
+ continue;;
+ * )
+ moretodo=n
+ continue;;
+ esac
+ else
+ moretodo=n
+ fi
+done
+
+# Read configuration file --> Should be activated only when debug option is off
+if [ -f $configFile ]; then
+ source $configFile
+fi
+# Log option can be overriden by command-line option
+if [ "$cmd_with_log" != "null" ]; then
+ cmdOpt=$cmd_with_log
+fi
+if [ "$logOpt" == "y" ]; then
+ set_log_file
+fi
+
+log_echo '===== Start PGXC_CTL ==========' `date`
+
+# Check configuration -- Mayh need more detailed check.
+
+# Check if slaves are configured and makeup each server to N/A if needed
+handle_no_slaves
+# Construct the server list
+makeServerList
+
+# For interactive operation ---> Finally this should be a part of pgxc_ctl interactive command
+# interactive=y --> should be set by options
+firstline=y
+lastline=n
+while [ 1 ]; do
+ if [ $lastline = "y" ]; then
+ break
+ fi
+ if [ $firstline == "y" ] && [ "$1" != "" ]; then
+ cmdname=$1; shift
+ p1=$1; shift
+ p2=$1; shift
+ p3=$1; shift
+ p4=$1; shift
+ cmdparam=$*
+ firstline=n
+ lastline=y
+ set_interactive off
+ else
+ echo -n "$xc_prompt"
+ read cmdname p1 p2 p3 p4 cmdparam
+ fi
+ log_echo_with_date '======' USER COMMAND '=====' $cmdname $p1 $p2 $p3 $p4 $cmdparam
+
+ if [ "$cmdname" == "" ]; then
+ continue
+ fi
+ if [ "$cmdname" == "exit" ] || [ "$cmdname" == "q" ]; then
+ if [ "$p1" == '?' ]; then
+ echo Exit command finishes pgxc_ctl.
+ continue
+ fi
+ break
+ fi
+ if [ "$cmdname" == "?" ] || [ "$cmdname" == "help" ]; then
+ help_command
+ continue
+ fi
+
+ # Init --- Now accepts only all or default to initialize everything here.
+ # If you'd like to initialize individual component, use internal function
+ # directly.
+ if [ "$cmdname" == "init" ]; then
+ if [ "$p1" != "" ] && [ "$p1" != "all" ]; then
+ eecho Invalid init command argument, $p1
+ continue
+ fi
+ init_all
+ continue
+ fi
+
+ # Start --- all | gtm [master|slave|all] | gtm_proxy | coordinator [master|slave|all] | datanode [master|slave|all] | nodename [master|slave|all]
+ if [ "$cmdname" == "start" ]; then
+ pgxc_start_something $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ fi
+
+ # Stop --- all | gtm [master|slave|all] | gtm_proxy | coordiantor [master|slave|all] | datanode [master|slave|all] | nodename [master|slave|all]
+ if [ "$cmdname" == "stop" ]; then
+ pgxc_stop_something $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ fi
+ # Kill -- kills processes will kill command
+ if [ "$cmdname" == "kill" ]; then
+ if [ "$p1" == "" ]; then
+ eecho Specify what to kill "("all/component name")"
+ elif [ "$p1" == "all" ]; then
+ kill_all
+ else
+ pgxc_kill_something $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ fi
+ fi
+ if [ "$cmdname" == "Psql" ]; then
+ if [ "$p1" != "-" ]; then
+ pgxc_select_coordinator
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ psql -p $selected_coord_port -h $selected_coord_host $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ else
+ pgxc_find_coordinator $p2
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ psql -p $selected_coord_port -h $selected_coord_host $p3 $p4 $cmdparam
+ continue
+ fi
+ continue
+ fi
+ if [ "$cmdname" == "clean" ]; then
+ if [ "$p1" == "all" ] || [ "$p1" == "" ]; then
+ clean_all
+ else
+ pgxc_clean_node $p1 $p2 $p3 $p4 $cmdparam
+ fi
+ continue
+ fi
+
+ # failover --- gtm | component name
+ if [ "$cmdname" == "failover" ]; then
+ pgxc_failover_something $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ fi
+ if [ "$cmdname" == "Createdb" ]; then
+ if [ "$p1" == "" ]; then
+ eecho Specify database name to create
+ continue
+ fi
+ if [ "$p1" == "-" ]; then
+ pgxc_find_coordinator $p2
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ createdb -p $selected_coord_port -h $selected_coord_host $p3 $p4 $cmdparam
+ continue
+ else
+ pgxc_select_coordinator
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ createdb -p $selected_coord_port -h $selected_coord_host $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ fi
+ fi
+ if [ "$cmdname" == "Dropdb" ]; then
+ if [ "$p1" == "" ]; then
+ eecho Specify database name to drop
+ continue
+ fi
+ if [ "$p1" == "-" ]; then
+ pgxc_find_coordinator $p2
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ if [ "$p3" == "" ]; then
+ echo Specify database name to drop
+ continue
+ fi
+ pgxc_clean_connection_all $p3
+ dropdb -p $selected_coord_port -h $selected_coord_host $p3 $p4 $cmdparam
+ continue
+ else
+ pgxc_select_coordinator
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ pgxc_clean_connection_all $p1
+ dropdb -p $selected_coord_port -h $selected_coord_host $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ fi
+ fi
+ if [ "$cmdname" == "Createuser" ]; then
+ if [ "$p1" == "" ]; then
+ eecho Specify coordinator user name to create
+ continue
+ fi
+ if [ "$p1" == "-" ]; then
+ pgxc_find_coordinator $p2
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ createuser -p $selected_coord_port -h $selected_coord_host $p3 $p4 $cmdparam
+ continue
+ else
+ pgxc_select_coordinator
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ createuser -p $selected_coord_port -h $selected_coord_host $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ fi
+ continue
+ fi
+ if [ "$cmdname" == "Dropuser" ]; then
+ if [ "$p1" == "" ]; then
+ eecho Specify user name to drop
+ continue
+ fi
+ if [ "$p1" == "-" ]; then
+ pgxc_find_coordinator $p2
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ # we may need to clean connection for this user
+ dropuser -p $selected_coord_port -h $selected_coord_host $p3 $p4 $cmdparam
+ continue
+ else
+ pgxc_select_coordinator
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ # we may need to clean connection for this user
+ dropuser -p $selected_coord_port -h $selected_coord_host $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ fi
+ continue
+ fi
+ if [ "$cmdname" == "Vacuumdb" ]; then
+ if [ "$p1" != "-" ]; then
+ pgxc_select_coordinator
+ continue
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ vacuumdb -p $selected_coord_port -h $selected_coord_host $p1 $p2 $p3 $p4 $cmdparam
+ continue
+ else
+ pgxc_find_coordinator $p2
+ if [ "$selected_coord" == "none" ]; then
+ eecho No available coordinator found
+ continue;
+ fi
+ vacuumdb -p $selected_coord_port -h $selected_coord_host $p3 $p4 $cmdparam
+ continue
+ fi
+ fi
+ if [ "$cmdname" == "prepare" ]; then
+ if [ "$p1" == "config" ]; then
+ create_config_file_template
+ else
+ eecho Type prepare config
+ fi
+ continue
+ fi
+ if [ "$cmdname" == "monitor" ]; then
+ if [ "$p1" == "all" ]; then
+ monitor_components
+ else
+ pgxc_ctl_monitor $p1 $p2 $p3 $p4 $cmdparam
+ fi
+ continue
+ fi
+ if [ "$cmdname" == "xcshow" ]; then
+ if [ "$p1" == "" ]; then
+ echo Specify item to show
+ elif [ "$p1" == "config" ]; then
+ print_config
+ elif [ "$p1" == "verbose" ]; then
+ echo verbose=$verbose
+ elif [ "$p1" == "interactive" ]; then
+ echo interactive=$interactive
+ elif [ "$p1" == "log" ]; then
+ echo -n log option = $logOpt
+ if [ "$logOpt" == "y" ]; then
+ echo , logDir: $logDir
+ else
+ echo ""
+ fi
+ elif [ "$p1" == "backup" ]; then
+ echo -n config backup = $configBackup
+ if [ "$configBackup" == "y" ]; then
+ echo "backup host:" $configBackupHost, "backup file:" $configBackupFile
+ else
+ echo ""
+ fi
+ elif [ "$p1" == "gtm" ]; then
+ echo GTM list
+ pgxc_ctl_show_component $gtmName
+ elif [ "$p1" == "gtm_proxy" ]; then
+ pgxc_ctl_show_gtm_proxy_all
+ elif [ "$p1" == "coordinator" ]; then
+ pgxc_ctl_show_coordinator_all
+ elif [ "$p1" == "datanode" ]; then
+ pgxc_ctl_show_datanode_all
+ else
+ pgxc_ctl_show_component $p1 $p2 $p3 $p4 $cmdparam
+ fi
+ continue
+ fi
+ if [ "$cmdname" == "xcset" ]; then
+ if [ "$p1" == "verbose" ]; then
+ set_verbose $p2
+ elif [ "$p1" == "log" ]; then
+ if [ "$p2" == "" ]; then
+ oeecho Specify log file name.
+ else
+ change_log_file $p2
+ fi
+ elif [ "$p1" == "interactive" ]; then
+ set_interactive $p2
+ else
+ echo "Invalid xcset option $p1"
+ fi
+ continue
+ fi
+ if [ "$cmdname" == "deploy" ]; then
+ if [ "$p1" == "all" ]; then
+ pgxc_deploy_all
+ elif [ "$p1" == "" ]; then
+ eecho Specify server name to deploy.
+ else
+ pgxc_deploy_individual $p1
+ fi
+ continue
+ fi
+ # Need the following trick to handle variable value assignment and echo.
+ #
+ # Note: we need to write the command output to the log too.
+ cat > $localTmpDir/wk.cmd <<EOF
+$cmdname $p1 $p2 $p3 $p4 $cmdparam
+EOF
+ source $localTmpDir/wk.cmd
+ rm $localTmpDir/wk.cmd
+ continue
+done
diff --git a/contrib/pgxc_ctl/pgxc_ctl.c b/contrib/pgxc_ctl/pgxc_ctl.c
new file mode 100644
index 0000000000..5e78f493f9
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl.c
@@ -0,0 +1,605 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgxc_ctl.c
+ *
+ * Main module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * PXC_CTL Postgres-XC configurator and operation tool
+ *
+ *
+ * Command line options
+ *
+ * -c --configuration file : configuration file. Rerative path
+ * start at $HOME/.pgxc_ctl or homedir if
+ * specified by --home option
+ * --home homedir : home directory of pgxc_ctl. Default is
+ * $HOME/.pgxc_ctl. You can override this
+ * with PGXC_CTL_HOME environment or option.
+ * Command argument has the highest priority.
+ *
+ * -v | --verbose: verbose mode. You can set your default in
+ * pgxc_ctl_rc file at home.
+ *
+ * --silent: Opposite to --verbose.
+ *
+ * -V | --version: prints out the version
+ *
+ * -l | --logdir dir: Log directory. Default is $home/pgxc_log
+ *
+ * -L | --logfile file: log file. Default is the timestamp.
+ * Relative path starts with --logdir.
+ *
+ */
+
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <getopt.h>
+
+#include "config.h"
+#include "variables.h"
+#include "pgxc_ctl.h"
+#include "bash_handler.h"
+#include "signature.h"
+#include "pgxc_ctl_log.h"
+#include "varnames.h"
+#include "do_command.h"
+#include "utils.h"
+
+/*
+ * Common global variable
+ */
+char pgxc_ctl_home[MAXPATH+1];
+char pgxc_ctl_bash_path[MAXPATH+1];
+char pgxc_ctl_config_path[MAXPATH+1];
+char progname[MAXPATH+1];
+char *myName;
+char *defaultDatabase;
+#ifdef XCP
+#define versionString "V9.2 for Postgres-XL 9.2"
+#else
+#define versionString "V1.0 for Postgres-XC 1.1"
+#endif
+
+FILE *inF;
+FILE *outF;
+
+static void build_pgxc_ctl_home(char *home);
+static void trim_trailing_slash(char *path);
+static void startLog(char *path, char *logFileNam);
+static void print_version(void);
+static void print_help(void);
+
+static void trim_trailing_slash(char *path)
+{
+ char *curr = path;
+ char *last = path;
+
+ while (*curr)
+ {
+ last = curr;
+ curr++;
+ }
+ while (last != path)
+ {
+ if (*last == '/')
+ {
+ last = 0;
+ last--;
+ continue;
+ }
+ else
+ return;
+ }
+}
+
+
+static void build_pgxc_ctl_home(char *home)
+{
+ char *env_pgxc_ctl_home = getenv(PGXC_CTL_HOME);
+ char *env_home = getenv(HOME); /* We assume this is always available */
+
+ if (home)
+ {
+ if (home[0] == '/')
+ {
+ /* Absolute path */
+ strncpy(pgxc_ctl_home, home, MAXPATH);
+ goto set_bash;
+ }
+ else
+ {
+ /* Relative path */
+ trim_trailing_slash(home);
+ snprintf(pgxc_ctl_home, MAXPATH, "%s/%s", env_home, home);
+ goto set_bash;
+ }
+ }
+ if ((env_pgxc_ctl_home = getenv(PGXC_CTL_HOME)) == NULL)
+ {
+ snprintf(pgxc_ctl_home, MAXPATH, "%s/%s", env_home, pgxc_ctl_home_def);
+ goto set_bash;
+ }
+ if (env_pgxc_ctl_home[0] == '/') /* Absoute path */
+ {
+ strncpy(pgxc_ctl_home, env_pgxc_ctl_home, MAXPATH);
+ goto set_bash;
+ }
+ trim_trailing_slash(env_pgxc_ctl_home);
+ if (env_pgxc_ctl_home[0] == '\0' || env_pgxc_ctl_home[0] == ' ' || env_pgxc_ctl_home[0] == '\t')
+ {
+ /* Null environment */
+ snprintf(pgxc_ctl_home, MAXPATH, "%s/%s", env_home, pgxc_ctl_home_def);
+ goto set_bash;
+ }
+ snprintf(pgxc_ctl_home, MAXPATH, "%s/%s", env_home, home);
+ goto set_bash;
+
+set_bash:
+ snprintf(pgxc_ctl_bash_path, MAXPATH, "%s/%s", pgxc_ctl_home, PGXC_CTL_BASH);
+ /*
+ * Create home dir if necessary and change current directory to it.
+ */
+ {
+ struct stat buf;
+ char cmd[MAXLINE+1];
+
+ if (stat(pgxc_ctl_home, &buf) ==0)
+ {
+ if (S_ISDIR(buf.st_mode))
+ {
+ Chdir(pgxc_ctl_home, TRUE);
+ return;
+ }
+ else
+ {
+ fprintf(stderr, "%s is not directory. Check your configurfation\n", pgxc_ctl_home);
+ exit(1);
+ }
+ }
+ snprintf(cmd, MAXLINE, "mkdir -p %s", pgxc_ctl_home);
+ system(cmd);
+ if (stat(pgxc_ctl_home, &buf) ==0)
+ {
+ if (S_ISDIR(buf.st_mode))
+ {
+ Chdir(pgxc_ctl_home, TRUE);
+ return;
+ }
+ else
+ {
+ fprintf(stderr, "Creating %s directory failed. Check your configuration\n", pgxc_ctl_home);
+ exit(1);
+ }
+ }
+ fprintf(stderr, "Creating directory %s failed. %s\n", pgxc_ctl_home, strerror(errno));
+ exit(1);
+ }
+ return;
+}
+
+
+static void build_configuration_path(char *path)
+{
+ struct stat statbuf;
+ int rr;
+
+ if (path)
+ reset_var_val(VAR_configFile, path);
+ if (!find_var(VAR_configFile) || !sval(VAR_configFile) || (sval(VAR_configFile)[0] == 0))
+ {
+ /* Default */
+ snprintf(pgxc_ctl_config_path, MAXPATH, "%s/%s", pgxc_ctl_home, DEFAULT_CONF_FILE_NAME);
+ rr = stat(pgxc_ctl_config_path, &statbuf);
+ if (rr || !S_ISREG(statbuf.st_mode))
+ {
+ /* No configuration specified and the default does not apply --> simply ignore */
+ elog(ERROR, "ERROR: Default configuration file \"%s\" was not found while no configuration file was specified\n",
+ pgxc_ctl_config_path);
+ pgxc_ctl_config_path[0] = 0;
+ return;
+ }
+ }
+ else if (sval(VAR_configFile)[0] == '/')
+ {
+ /* Absolute path */
+ strncpy(pgxc_ctl_config_path, sval(VAR_configFile), MAXPATH);
+ }
+ else
+ {
+ /* Relative path from $pgxc_ctl_home */
+ snprintf(pgxc_ctl_config_path, MAXPATH, "%s/%s", pgxc_ctl_home, sval(VAR_configFile));
+ }
+ rr = stat(pgxc_ctl_config_path, &statbuf);
+ if (rr || !S_ISREG(statbuf.st_mode))
+ {
+ if (rr)
+ elog(ERROR, "ERROR: File \"%s\" not found or not a regular file. %s\n",
+ pgxc_ctl_config_path, strerror(errno));
+ else
+ elog(ERROR, "ERROR: File \"%s\" not found or not a regular file",
+ pgxc_ctl_config_path);
+ }
+ return;
+}
+
+
+static void read_configuration(void)
+{
+ FILE *conf;
+ char cmd[MAXPATH+1];
+
+ install_pgxc_ctl_bash(pgxc_ctl_bash_path);
+ if (pgxc_ctl_config_path[0])
+ snprintf(cmd, MAXPATH, "%s --home %s --configuration %s",
+ pgxc_ctl_bash_path, pgxc_ctl_home, pgxc_ctl_config_path);
+ else
+ snprintf(cmd, MAXPATH, "%s --home %s", pgxc_ctl_bash_path, pgxc_ctl_home);
+ elog(NOTICE, "Reading configuration using %s\n", cmd);
+ conf = popen(cmd, "r");
+ if (conf == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot execute %s, %s", cmd, strerror(errno));
+ return;
+ }
+ read_vars(conf);
+ fclose(conf);
+ uninstall_pgxc_ctl_bash(pgxc_ctl_bash_path);
+ elog(INFO, "Finished to read configuration.\n");
+}
+
+static void prepare_pgxc_ctl_bash(char *path)
+{
+ struct stat buf;
+ int rc;
+
+ rc = stat(path, &buf);
+ if (rc)
+ install_pgxc_ctl_bash(path);
+ else
+ if (S_ISREG(buf.st_mode))
+ return;
+ rc = stat(path, &buf);
+ if (S_ISREG(buf.st_mode))
+ return;
+ fprintf(stderr, "Error: caould not install bash script %s\n", path);
+ exit(1);
+}
+
+static void pgxcCtlMkdir(char *path)
+{
+ char cmd[MAXPATH+1];
+
+ snprintf(cmd, MAXPATH, "mkdir -p %s", path);
+ system(cmd);
+}
+
+static void startLog(char *path, char *logFileNam)
+{
+ char logFilePath[MAXPATH+1];
+
+ if(path)
+ {
+ trim_trailing_slash(path);
+ pgxcCtlMkdir(path);
+ if(logFileNam)
+ {
+ if (logFileNam[0] == '/')
+ {
+ fprintf(stderr, "ERROR: both --logdir and --logfile are specified and logfile was abosolute path.\n");
+ exit(1);
+ }
+ if (path[0] == '/')
+ snprintf(logFilePath, MAXPATH, "%s/%s", path, logFileNam);
+ else
+ snprintf(logFilePath, MAXPATH, "%s/%s/%s", pgxc_ctl_home, path, logFileNam);
+ initLog(NULL, logFilePath);
+ }
+ else
+ {
+ if (path[0] == '/')
+ initLog(path, NULL);
+ else
+ {
+ snprintf(logFilePath, MAXPATH, "%s/%s", pgxc_ctl_home, path);
+ initLog(logFilePath, NULL);
+ }
+ }
+ }
+ else
+ {
+ if (logFileNam && logFileNam[0] == '/')
+ {
+ /* This is used as log file path */
+ initLog(NULL, logFileNam);
+ return;
+ }
+ else
+ {
+ snprintf(logFilePath, MAXPATH, "%s/pgxc_log", pgxc_ctl_home);
+ pgxcCtlMkdir(logFilePath);
+ initLog(logFilePath, NULL);
+ }
+ }
+ return;
+}
+
+static void setDefaultIfNeeded(char *name, char *val)
+{
+ if (!find_var(name) || !sval(name))
+ {
+ if (val)
+ reset_var_val(name, val);
+ else
+ reset_var(name);
+ }
+}
+
+static void setup_my_env(void)
+{
+ char path[MAXPATH+1];
+ char *home;
+ FILE *ini_env;
+
+ char *selectVarList[] = {
+ VAR_pgxc_ctl_home,
+ VAR_xc_prompt,
+ VAR_verbose,
+ VAR_logDir,
+ VAR_logFile,
+ VAR_tmpDir,
+ VAR_localTmpDir,
+ VAR_configFile,
+ VAR_echoAll,
+ VAR_debug,
+ VAR_printMessage,
+ VAR_logMessage,
+ VAR_defaultDatabase,
+ VAR_pgxcCtlName,
+ VAR_printLocation,
+ VAR_logLocation,
+ NULL
+ };
+
+ ini_env = fopen("/etc/pgxc_ctl", "r");
+ if (ini_env)
+ {
+ read_selected_vars(ini_env, selectVarList);
+ fclose(ini_env);
+ }
+ if ((home = getenv("HOME")))
+ {
+ snprintf(path, MAXPATH, "%s/.pgxc_ctl", getenv("HOME"));
+ if ((ini_env = fopen(path, "r")))
+ {
+ read_selected_vars(ini_env, selectVarList);
+ fclose(ini_env);
+ }
+ }
+ /*
+ * Setup defaults
+ */
+ snprintf(path, MAXPATH, "%s/pgxc_ctl", getenv("HOME"));
+ setDefaultIfNeeded(VAR_pgxc_ctl_home, path);
+ setDefaultIfNeeded(VAR_xc_prompt, "PGXC ");
+ snprintf(path, MAXPATH, "%s/pgxc_ctl/pgxc_log", getenv("HOME"));
+ setDefaultIfNeeded(VAR_logDir, path);
+ setDefaultIfNeeded(VAR_logFile, NULL);
+ setDefaultIfNeeded(VAR_tmpDir, "/tmp");
+ setDefaultIfNeeded(VAR_localTmpDir, "/tmp");
+ setDefaultIfNeeded(VAR_configFile, "pgxc_ctl.conf");
+ setDefaultIfNeeded(VAR_echoAll, "n");
+ setDefaultIfNeeded(VAR_debug, "n");
+ setDefaultIfNeeded(VAR_printMessage, "info");
+ setDefaultIfNeeded(VAR_logMessage, "info");
+ setDefaultIfNeeded(VAR_pgxcCtlName, DefaultName);
+ myName = Strdup(sval(VAR_pgxcCtlName));
+ setDefaultIfNeeded(VAR_defaultDatabase, DefaultDatabase);
+ defaultDatabase = Strdup(sval(VAR_defaultDatabase));
+ setDefaultIfNeeded(VAR_printLocation, "n");
+ setDefaultIfNeeded(VAR_logLocation, "n");
+}
+
+int main(int argc, char *argv[])
+{
+ char *configuration = NULL;
+ char *infile = NULL;
+ char *outfile = NULL;
+ char *verbose = NULL;
+ int version_opt = 0;
+ char *logdir = NULL;
+ char *logfile = NULL;
+ char *home = NULL;
+ int help_opt = 0;
+
+ int c;
+
+ static struct option long_options[] = {
+ {"configuration", required_argument, 0, 'c'},
+ {"silent", no_argument, 0, 1},
+ {"verbose", no_argument, 0, 'v'},
+ {"version", no_argument, 0, 'V'},
+ {"logdir", required_argument, 0, 'l'},
+ {"logfile", required_argument, 0, 'L'},
+ {"home", required_argument, 0, 2},
+ {"infile", required_argument, 0, 'i'},
+ {"outfile", required_argument, 0, 'o'},
+ {"help", no_argument, 0, 'h'},
+ {0, 0, 0, 0}
+ };
+
+ strcpy(progname, argv[0]);
+ init_var_hash();
+
+ while(1) {
+ int option_index = 0;
+
+ c = getopt_long(argc, argv, "i:o:c:vVl:L:h", long_options, &option_index);
+
+ if (c == -1)
+ break;
+ switch(c)
+ {
+ case 1:
+ verbose = "n";
+ break;
+ case 2:
+ if (home)
+ free (home);
+ home = strdup(optarg);
+ break;
+ case 'i':
+ if (infile)
+ free(infile);
+ infile = strdup(optarg);
+ break;
+ case 'o':
+ if (outfile)
+ free(outfile);
+ outfile = strdup(optarg);
+ break;
+ case 'v':
+ verbose = "y";
+ break;
+ case 'V':
+ version_opt = 1;
+ break;
+ case 'l':
+ if (logdir)
+ free(logdir);
+ logdir = strdup(optarg);
+ break;
+ case 'L':
+ if (logfile)
+ free(logfile);
+ logfile = strdup(optarg);
+ break;
+ case 'c':
+ if (configuration)
+ free(configuration);
+ configuration = strdup(optarg);
+ break;
+ case 'h':
+ help_opt = 1;
+ break;
+ default:
+ fprintf(stderr, "Invalid optin value, received code 0%o\n", c);
+ exit(1);
+ }
+ }
+ if (version_opt || help_opt)
+ {
+ if (version_opt)
+ print_version();
+ if (help_opt)
+ print_help();
+ exit(0);
+ }
+ setup_my_env(); /* Read $HOME/.pgxc_ctl */
+ build_pgxc_ctl_home(home);
+ if (infile)
+ reset_var_val(VAR_configFile, infile);
+ if (logdir)
+ reset_var_val(VAR_logDir, logdir);
+ if (logfile)
+ reset_var_val(VAR_logFile, logfile);
+ startLog(sval(VAR_logDir), sval(VAR_logFile));
+ prepare_pgxc_ctl_bash(pgxc_ctl_bash_path);
+ build_configuration_path(configuration);
+ read_configuration();
+ check_configuration();
+ /*
+ * Setop output
+ */
+ if (outfile)
+ {
+ elog(INFO, "Output file: %s\n", outfile);
+ if ((outF = fopen(outfile, "w")))
+ dup2(fileno(outF),2);
+ else
+ elog(ERROR, "ERROR: Cannot open output file %s, %s\n", outfile, strerror(errno));
+ }
+ else
+ outF = stdout;
+ /*
+ * Startup Message
+ */
+ elog(NOTICE, " ******** PGXC_CTL START ***************\n\n");
+ elog(NOTICE, "Current directory: %s\n", pgxc_ctl_home);
+ /*
+ * Setup input
+ */
+ if (infile)
+ {
+ elog(INFO, "Input file: %s\n", infile);
+ inF = fopen(infile, "r");
+ if(inF == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open input file %s, %s\n", infile, strerror(errno));
+ exit(1);
+ }
+ }
+ else
+ inF = stdin;
+ /*
+ * If we have remaing arguments, they will be treated as a command to do. Do this
+ * first, then handle the input from input file specified by -i option.
+ * If it is not found, then exit.
+ */
+#if 0
+ print_vars();
+#endif
+ if (optind < argc)
+ {
+ char orgBuf[MAXLINE + 1];
+ char wkBuf[MAXLINE + 1];
+ orgBuf[0] = 0;
+ while (optind < argc)
+ {
+ strncat(orgBuf, argv[optind++], MAXLINE);
+ strncat(orgBuf, " ", MAXLINE);
+ }
+ strncpy(wkBuf, orgBuf, MAXLINE);
+ do_singleLine(orgBuf, wkBuf);
+ if (infile)
+ do_command(inF, outF);
+ }
+ else
+ do_command(inF, outF);
+ exit(0);
+}
+
+static void print_version(void)
+{
+ printf("Pgxc_ctl %s\n", versionString);
+}
+
+static void print_help(void)
+{
+ printf(
+ "pgxc_ctl [option ...] [command]\n"
+ "option:\n"
+ " -c or --configuration conf_file: Specify configruration file.\n"
+ " -v or --verbose: Specify verbose output.\n"
+ " -V or --version: Print version and exit.\n"
+ " -l or --logdir log_directory: specifies what directory to write logs.\n"
+ " -L or --logfile log_file: Specifies log file.\n"
+ " --home home_direcotry: Specifies pgxc_ctl work director.\n"
+ " -i or --infile input_file: Specifies inptut file.\n"
+ " -o or --outfile output_file: Specifies output file.\n"
+ " -h or --help: Prints this message and exits.\n"
+ "For more deatils, refer to pgxc_ctl reference manual included in\n"
+ "postgres-xc reference manual.\n");
+}
diff --git a/contrib/pgxc_ctl/pgxc_ctl.h b/contrib/pgxc_ctl/pgxc_ctl.h
new file mode 100644
index 0000000000..2802da7f66
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl.h
@@ -0,0 +1,58 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgx_ctl.h
+ *
+ * Configuration module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PGXC_CTL_H
+#define PGXC_CTL_H
+
+#include <stdio.h>
+
+/* Common macros */
+#define MAXPATH (512-1)
+#define PGXC_CTL_HOME "PGXC_CTL_HOME"
+#define HOME "HOME"
+#define PGXC_CTL_BASH "pgxc_ctl_bash"
+
+#define MAXLINE (8192-1)
+#define DEFAULT_CONF_FILE_NAME "pgxc_ctl.conf"
+
+#define pgxc_ctl_home_def "pgxc_ctl"
+
+#define MAXTOKEN (64-1)
+
+#define true 1
+#define false 0
+#define TRUE 1
+#define FALSE 0
+
+/* Global variable definition */
+extern char pgxc_ctl_home[];
+extern char pgxc_bash_path[];
+extern char pgxc_ctl_config_path[];
+extern char progname[];
+
+/* Important files */
+extern FILE *inF;
+extern FILE *outF;
+
+/* pg_ctl stop option */
+#define IMMEDIATE "immediate"
+#define FAST "fast"
+#define SMART "smart"
+
+/* My nodename default --> used to ping */
+#define DefaultName "pgxc_ctl"
+extern char *myName; /* pgxc_ctl name used to ping */
+#define DefaultDatabase "postgres"
+extern char *defaultDatabase;
+
+extern void print_simple_node_info(char *nodeName, char *port, char *dir,
+ char *extraConfig, char *specificExtraConfig);
+
+#endif /* PGXC_CTL_H */
diff --git a/contrib/pgxc_ctl/pgxc_ctl_bash.c b/contrib/pgxc_ctl/pgxc_ctl_bash.c
new file mode 100644
index 0000000000..1e40e66149
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_bash.c
@@ -0,0 +1,671 @@
+/*
+ *-----------------------------------------------------------------------
+ *
+ * pgxc_ctl_bash.c
+ *
+ * Bash script body for Postrgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *------------------------------------------------------------------------
+ *
+ * This file was created by make_signature utility when pgxc_ctl was built.
+ *
+ * pgxc_ctl uses this bash script to configure postgres-xc and read
+ * configuration.
+ *
+ * This provides users very flexible way to configure their own
+ * postgres-xc cluster. For example, by using extra variables and script,
+ * you can save most of your specific hours typing same (or similar)
+ * variable values again and again.
+ */
+
+#include <stddef.h>
+
+/*
+ * Bash script to read pgxc_ctl configuration parameters and write
+ * back to itself.
+ *
+ * This part is written to pgxc_ctl work directory and reads
+ * configuration file, which is also written in bash script.
+ */
+
+char *pgxc_ctl_bash_script[] = {
+"#!/bin/bash",
+"# Common variables ######################################################################",
+"xc_prompt='PGXC$ '",
+"interactive=n",
+"verbose=n",
+"progname=$0",
+"",
+"bin=pgxc_ctl # Just in case. Never touch this",
+"logfile=none",
+"",
+"#===========================================================",
+"#",
+"# Extract parsed configuration values",
+"#",
+"#===========================================================",
+"",
+"# $1 is variable name of the array to print",
+"function print_array",
+"{",
+" echo -n $1 \" \"",
+" eval echo '$'{$1[@]}",
+"}",
+"",
+"",
+"function print_values",
+"{",
+" local i",
+" declare -i i",
+" local el",
+"",
+" # Install Directory",
+" echo pgxcInstallDir $pgxcInstallDir",
+"",
+" # Overall",
+" echo pgxcOwner $pgxcOwner",
+" echo pgxcUser $pgxcUser",
+" echo tmpDir $tmpDir",
+" echo localTmpDir $localTmpDir",
+" echo configBackup $configBackup",
+" echo configBackupHost $configBackupHost",
+" echo configBackupDir $configBackupDir",
+" echo configBackupFile $configBackupFile",
+"",
+" # GTM overall",
+" echo gtmName $gtmName",
+"",
+" # GTM master",
+" echo gtmMasterServer $gtmMasterServer",
+" echo gtmMasterPort $gtmMasterPort",
+" echo gtmMasterDir $gtmMasterDir",
+" echo gtmExtraConfig $gtmExtraConfig",
+" echo gtmMasterSpecificExtraConfig $gtmMasterSpecificExtraConfig",
+"",
+" # GTM slave",
+" echo gtmSlave $gtmSlave",
+" echo gtmSlaveServer $gtmSlaveServer",
+" echo gtmSlavePort $gtmSlavePort",
+" echo gtmSlaveDir $gtmSlaveDir",
+" echo gtmSlaveSpecificExtraConfig $gtmSlaveSpecificExtraConfig",
+"",
+" # GTM Proxy",
+" echo gtmProxy $gtmProxy",
+" print_array gtmProxyNames",
+" print_array gtmProxyServers",
+" print_array gtmProxyPorts",
+" print_array gtmProxyDirs",
+" echo gtmPxyExtraConfig $gtmPxyExtraConfig",
+" print_array gtmPxySpecificExtraConfig",
+"",
+" # Coordinators overall",
+" print_array coordNames",
+" print_array coordPorts",
+" print_array poolerPorts",
+" print_array coordPgHbaEntries",
+"",
+" # Coordinators master",
+" print_array coordMasterServers",
+" print_array coordMasterDirs",
+" print_array coordMaxWALSenders",
+"",
+" # Coordinators slave",
+" echo coordSlave $coordSlave",
+" echo coordSlaveSync $coordSlaveSync",
+" print_array coordSlaveServers",
+" print_array coordSlaveDirs",
+" print_array coordArchLogDirs",
+"",
+" # Coordinator Configuration files",
+" echo coordExtraConfig $coordExtraConfig",
+" print_array coordSpecificExtraConfig",
+" echo coordExtraPgHba $coordExtraPgHba",
+" print_array coordSpecificExtraPgHba",
+"",
+" # Coordinator Additional Slaves",
+" echo coordAdditionalSlaves $coordAdditionalSlaves",
+" if [ \"$coordAdditionalSlaves\" == \"y\" ]; then",
+" print_array coordAdditionalSlaveSet",
+" for ((i=0; i<${#coordAdditionalSlaveSet[@]}; i++)); do",
+" el=${coordAdditionalSlaveSet[$i]}",
+" echo -n ${el}_Sync \" \"",
+" eval echo '$'\"$el\"_Sync",
+" print_array ${el}_Servers",
+" print_array ${el}_Dirs",
+" print_array ${el}_ArchLogDirs",
+" done",
+" fi",
+"",
+" # Datanodes overall",
+" echo primaryDatanode $primaryDatanode",
+" print_array datanodeNames",
+" print_array datanodePorts",
+#ifdef XCP
+" print_array datanodePoolerPorts",
+#endif
+" print_array datanodePgHbaEntries",
+" ",
+" # Datanodes masters",
+" print_array datanodeMasterServers",
+" print_array datanodeMasterDirs",
+" print_array datanodeMaxWALSenders",
+" ",
+" # Datanodes slaves",
+" echo datanodeSlave $datanodeSlave",
+" echo datanodeSlaveSync $datanodeSlaveSync",
+" print_array datanodeSlaveServers",
+" print_array datanodeSlaveDirs",
+" print_array datanodeArchLogDirs",
+"",
+" # Datanode configuration files",
+" echo datanodeExtraConfig $datanodeExtraConfig",
+" print_array datanodeSpecificExtraConfig",
+" echo datanodeExtraPgHba $datanodeExtraPgHba",
+" print_array datanodeSpecificExtraPgHba",
+"",
+" # Datanodes additional slaves",
+" echo datanodeAdditionalSlaves $datanodeAdditionalSlaves",
+" if [ \"$datanodeAdditionalSlaves\" == \"y\" ]; then",
+" print_array datanodeAdditionalSlaveSet",
+" for ((i=0; i<${#datanodeAdditionalSlaveSet[@]}; i++)); do",
+" el=${datanodeAdditionalSlaveSet[$i]}",
+" echo -n ${el}_Sync \" \"",
+" eval echo '$'\"$el\"_Sync",
+" print_array ${el}_Servers",
+" print_array ${el}_Dirs",
+" print_array ${el}_ArchLogDirs",
+" done",
+" fi",
+" ",
+" # WAL Archives",
+" echo walArchive $walArchive",
+" print_array walArchiveSet",
+" if [ \"$walArchive\" == \"y\" ]; then",
+" for ((i=0; i<${#walArchvieSet[@]}; i++)); do",
+" print_array ${el}_source",
+" echo -n ${el}_host",
+" eval echo '$'\"$el\"_host",
+" echo -n ${el}_backupdir",
+" eval echo '$'\"$el\"_backupdir",
+" done",
+" fi",
+"}",
+"",
+"",
+"",
+"#============================================================",
+"#",
+"# Common functions",
+"#",
+"#============================================================",
+"",
+"# Optionally $1 will be $PGXC_CTL_HOME settings.",
+"function set_home",
+"{",
+" if [ $# > 1 ]; then",
+" echo \"Invalid set_home function call\"",
+" return 1",
+" fi",
+" if [ $# == 1 ]; then",
+" if [ -d $1 ]; then",
+" pgxc_ctl_home=$1",
+" else",
+" eecho \"set_home: $1 is not a directory.\"",
+" return 1",
+" fi",
+" elif [ $PGXC_CTL_HOME != \"\" ]; then",
+" if [ -d $PGXC_CTL_HOME ]; then",
+" pgxc_ctl_home=$PGXC_CTL_HOME",
+" else",
+" eecho \"set_home: env PGXC_CTL_HOME($PGXC_CTL_HOME) is not a directory.\"",
+" return 1;",
+" fi",
+" fi",
+" cd $pgxc_ctl_home;",
+"}",
+"",
+"###############################################################################",
+"#",
+"# EXECUTING SECTION",
+"#",
+"###############################################################################",
+"",
+"#=======================================================",
+"# Things to be done at first",
+"#=======================================================",
+"",
+"# Handle options",
+"progname=$0",
+"moretodo=y",
+"cmd_with_log=null",
+"#set_home",
+"if [ -f $pgxc_ctl_home/.pgxc_ctl_rc ]; then",
+" source $pgxc_ctl_home/.pgxc_ctl_rc",
+"fi",
+"",
+"configFile=\"\"",
+"",
+"while [ $moretodo == y ]; do",
+" if [ $# -gt 0 ]; then",
+" case $1 in",
+" -v )",
+" shift;",
+" verbose=y;",
+" continue;;",
+" --verbose )",
+" shift;",
+" verbose=y;",
+" continue;;",
+" --silent )",
+" verbose=n;",
+" continue;;",
+" -d ) # debug option",
+" shift;",
+" DEBUG=y;",
+" continue;;",
+" --debug )",
+" shift;",
+" DEBUG=y;",
+" continue;;",
+" -c ) # Configuraton file",
+" shift;",
+" if [ $# -le 0 ]; then",
+" echo ERROR: no -c option value found",
+" exit 1",
+" else",
+" configFile=$1",
+" shift",
+" fi;",
+" continue;;",
+" --configuration ) # Configuraion file",
+" shift;",
+" if [ $# -le 0 ]; then",
+" echo ERROR: no --configuration option value found",
+" exit 1",
+" else",
+" configFile=$1",
+" shift",
+" fi;",
+" continue;;",
+" --home ) # PGXC_CTL_HOME",
+" shift;",
+" if [ $# -le 0 ]; then",
+" echo ERROR: no pgxc_ctl_home specified",
+" exit 1",
+" else",
+" pgxc_ctl_home=$1",
+" cd $pgxc_ctl_home",
+" shift",
+" fi;",
+" continue;;",
+" --signature ) # Check signature",
+" shift;",
+" if [ $# -le 0 ]; then",
+" echo ERROR: Signature does not match",
+" exit 1",
+" fi",
+" if [ \"$1\" != \"$signature\" ]; then",
+" echo ERROR: Signature does not match",
+" exit 1",
+" fi",
+" shift",
+" continue;;",
+" * )",
+" moretodo=n",
+" continue;;",
+" esac",
+" else",
+" moretodo=n",
+" fi",
+"done",
+"",
+"echo $signature",
+"# Read configuration file --> Should be activated only when debug option is off",
+"",
+"if [ -f $pgxc_ctl_home/pgxc_ctl_rc ]; then",
+" source $pgxc_ctl_home/pgxc_ctl_rc",
+"fi",
+"",
+"if [ \"$configFile\" != \"\" ] && [ -f \"$configFile\" ]; then",
+" source $configFile",
+"fi",
+"# Log option can be overriden by command-line option",
+"",
+"print_values",
+"",
+"",
+NULL
+};
+
+/*
+ * Prototype of pgxc_ctl configuration file.
+ *
+ * It should be self descripting. Can be extracted to your pgxc_ctl
+ * work directory with 'prepare config' command.
+ */
+
+char *pgxc_ctl_conf_prototype[] = {
+"#!/bin/bash",
+"#",
+"# Postgres-XC Configuration file for pgxc_ctl utility. ",
+"#",
+"# Configuration file can be specified as -c option from pgxc_ctl command. Default is",
+"# $PGXC_CTL_HOME/pgxc_ctl.org.",
+"#",
+"# This is bash script so you can make any addition for your convenience to configure",
+"# your Postgres-XC cluster.",
+"#",
+"# Please understand that pgxc_ctl provides only a subset of configuration which pgxc_ctl",
+"# provide. Here's several several assumptions/restrictions pgxc_ctl depends on.",
+"#",
+"# 1) All the resources of pgxc nodes has to be owned by the same user. Same user means",
+"# user with the same user name. User ID may be different from server to server.",
+"# This must be specified as a variable $pgxcOwner.",
+"#",
+"# 2) All the servers must be reacheable via ssh without password. It is highly recommended",
+"# to setup key-based authentication among all the servers.",
+"#",
+"# 3) All the databases in coordinator/datanode has at least one same superuser. Pgxc_ctl",
+"# uses this user to connect to coordinators and datanodes. Again, no password should",
+"# be used to connect. You have many options to do this, pg_hba.conf, pg_ident.conf and",
+"# others. Pgxc_ctl provides a way to configure pg_hba.conf but not pg_ident.conf. This",
+"# will be implemented in the later releases.",
+"#",
+"# 4) Gtm master and slave can have different port to listen, while coordinator and datanode",
+"# slave should be assigned the same port number as master.",
+"#",
+"# 5) Port nuber of a coordinator slave must be the same as its master.",
+"#",
+"# 6) Master and slave are connected using synchronous replication. Asynchronous replication",
+"# have slight (almost none) chance to bring total cluster into inconsistent state.",
+"# This chance is very low and may be negligible. Support of asynchronous replication",
+"# may be supported in the later release.",
+"#",
+"# 7) Each coordinator and datanode can have only one slave each. Cascaded replication and",
+"# multiple slave are not supported in the current pgxc_ctl.",
+"#",
+"# 8) Killing nodes may end up with IPC resource leak, such as semafor and shared memory.",
+"# Only listening port (socket) will be cleaned with clean command.",
+"#",
+"# 9) Backup and restore are not supported in pgxc_ctl at present. This is a big task and",
+"# may need considerable resource.",
+"#",
+"#========================================================================================",
+"#",
+"#",
+"# pgxcInstallDir variable is needed if you invoke \"deploy\" command from pgxc_ctl utility.",
+"# If don't you don't need this variable.",
+"pgxcInstallDir=$HOME/pgxc",
+"#---- OVERALL -----------------------------------------------------------------------------",
+"#",
+"pgxcOwner=koichi # owner of the Postgres-XC databaseo cluster. Here, we use this",
+" # both as linus user and database user. This must be",
+" # the super user of each coordinator and datanode.",
+"pgxcUser=$pgxcOwner # OS user of Postgres-XC owner",
+"",
+"tmpDir=/tmp # temporary dir used in XC servers",
+"localTmpDir=$tmpDir # temporary dir used here locally",
+"",
+"configBackup=n # If you want config file backup, specify y to this value.",
+"configBackupHost=pgxc-linker # host to backup config file",
+"configBackupDir=$HOME/pgxc # Backup directory",
+"configBackupFile=pgxc_ctl.bak # Backup file name --> Need to synchronize when original changed.",
+"",
+"#---- GTM ------------------------------------------------------------------------------------",
+"",
+"# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.",
+"# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update",
+"# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command",
+"# will not stop the current GTM. It is up to the operator.",
+"",
+"#---- Overall -------",
+"gtmName=gtm",
+"",
+"#---- GTM Master -----------------------------------------------",
+"",
+"#---- Overall ----",
+"gtmMasterServer=node13",
+"gtmMasterPort=20001",
+"gtmMasterDir=$HOME/pgxc/nodes/gtm",
+"",
+"#---- Configuration ---",
+"gtmExtraConfig=none # Will be added gtm.conf for both Master and Slave (done at initilization only)",
+"gtmMasterSpecificExtraConfig=none # Will be added to Master's gtm.conf (done at initialization only)",
+"",
+"#---- GTM Slave -----------------------------------------------",
+"",
+"# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave",
+"# for backup.",
+"",
+"#---- Overall ------",
+"gtmSlave=y # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and",
+" # all the following variables will be reset.",
+"gtmSlaveServer=node12 # value none means GTM slave is not available. Give none if you don't configure GTM Slave.",
+"gtmSlavePort=20001 # Not used if you don't configure GTM slave.",
+"gtmSlaveDir=$HOME/pgxc/nodes/gtm # Not used if you don't configure GTM slave.",
+"# Please note that when you have GTM failover, then there will be no slave available until you configure the slave",
+"# again. (pgxc_add_gtm_slave function will handle it)",
+"",
+"#---- Configuration ----",
+"gtmSlaveSpecificExtraConfig=none # Will be added to Slave's gtm.conf (done at initialization only)",
+"",
+"#---- GTM Proxy -------------------------------------------------------------------------------------------------------",
+"# GTM proxy will be selected based upon which server each component runs on.",
+"# When fails over to the slave, the slave inherits its master's gtm proxy. It should be",
+"# reconfigured based upon the new location.",
+"#",
+"# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart",
+"#",
+"# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects",
+"# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.",
+"",
+"#---- Shortcuts ------",
+"gtmProxyDir=$HOME/pgxc/nodes/gtm_pxy",
+"",
+"#---- Overall -------",
+"gtmProxy=y # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies",
+" # only when you dont' configure GTM slaves.",
+" # If you specify this value not to y, the following parameters will be set to default empty values.",
+" # If we find there're no valid Proxy server names (means, every servers are specified",
+" # as none), then gtmProxy value will be set to \"n\" and all the entries will be set to",
+" # empty values.",
+"gtmProxyNames=(gtm_pxy1 gtm_pxy2 gtm_pxy3 gtm_pxy4) # No used if it is not configured",
+"gtmProxyServers=(node06 node07 node08 node09) # Specify none if you dont' configure it.",
+"gtmProxyPorts=(20001 20001 20001 20001) # Not used if it is not configured.",
+"gtmProxyDirs=($gtmProxyDir $gtmProxyDir $gtmProxyDir $gtmProxyDir) # Not used if it is not configured.",
+"",
+"#---- Configuration ----",
+"gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy. Coordinator section has an example.",
+"gtmPxySpecificExtraConfig=(none none none none)",
+"",
+"#---- Coordinators ----------------------------------------------------------------------------------------------------",
+"",
+"#---- shortcuts ----------",
+"coordMasterDir=$HOME/pgxc/nodes/coord",
+"coordSlaveDir=$HOME/pgxc/nodes/coord_slave",
+"coordArchLogDir=$HOME/pgxc/nodes/coord_archlog",
+"",
+"#---- Overall ------------",
+"coordNames=(coord1 coord2 coord3 coord4) # Master and slave use the same name",
+"coordPorts=(20004 20005 20004 20005) # Master and slave use the same port",
+"poolerPorts=(20010 20011 20010 20011) # Master and slave use the same pooler port",
+"coordPgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts",
+" # the same connection",
+" # This entry allows only $pgxcOwner to connect.",
+" # If you'd like to setup another connection, you should",
+" # supply these entries through files specified below.",
+"# Note: The above parameter is extracted as \"host all all 0.0.0.0/0 trust\". If you don't want",
+"# such setups, specify the value () to this variable and suplly what you want using coordExtraPgHba",
+"# and/or coordSpecificExtraPgHba variables.",
+"",
+"#---- Master -------------",
+"coordMasterServers=(node06 node07 node08 node09) # none means this master is not available",
+"coordMasterDirs=($coordMasterDir $coordMasterDir $coordMasterDir $coordMasterDir)",
+"coordMaxWALsernder=5 # max_wal_senders: needed to configure slave. If zero value is specified,",
+" # it is expected to supply this parameter explicitly by external files",
+" # specified in the following. If you don't configure slaves, leave this value to zero.",
+"coordMaxWALSenders=($coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder)",
+" # max_wal_senders configuration for each coordinator.",
+"",
+"#---- Slave -------------",
+"coordSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following",
+" # configuration parameters will be set to empty values.",
+" # If no effective server names are found (that is, every servers are specified as none),",
+" # then coordSlave value will be set to n and all the following values will be set to",
+" # empty values.",
+"coordSlaveSync=y # Specify to connect with synchronized mode.",
+"coordSlaveServers=(node07 node08 node09 node06) # none means this slave is not available",
+"coordSlaveDirs=($coordSlaveDir $coordSlaveDir $coordSlaveDir $coordSlaveDir)",
+"coordArchLogDirs=($coordArchLogDir $coordArchLogDir $coordArchLogDir $coordArchLogDir)",
+"",
+"#---- Configuration files---",
+"# Need these when you'd like setup specific non-default configuration ",
+"# These files will go to corresponding files for the master.",
+"# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries ",
+"# Or you may supply these files manually.",
+"coordExtraConfig=coordExtraConfig # Extra configuration file for coordinators. ",
+" # This file will be added to all the coordinators'",
+" # postgresql.conf",
+"# Pleae note that the following sets up minimum parameters which you may want to change.",
+"# You can put your postgresql.conf lines here.",
+"cat > $coordExtraConfig <<EOF",
+"#================================================",
+"# Added to all the coordinator postgresql.conf",
+"# Original: $coordExtraConfig",
+"log_destination = 'stderr'",
+"logging_collector = on",
+"log_directory = 'pg_log'",
+"listen_addresses = '*'",
+"max_connections = 100",
+"EOF",
+"",
+"# Additional Configuration file for specific coordinator master.",
+"# You can define each setting by similar means as above.",
+"coordSpecificExtraConfig=(none none none none)",
+"coordExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the coordinators' pg_hba.conf",
+"coordSpecificExtraPgHba=(none none none none)",
+"",
+"#----- Additional Slaves -----",
+"#",
+"# Please note that this section is just a suggestion how we extend the configuration for",
+"# multiple and cascaded replication. They're not used in the current version.",
+"#",
+"coordAdditionalSlaves=n # Additional slave can be specified as follows: where you",
+"coordAdditionalSlaveSet=(cad1) # Each specifies set of slaves. This case, two set of slaves are",
+" # configured",
+"cad1_Sync=n # All the slaves at \"cad1\" are connected with asynchronous mode.",
+" # If not, specify \"y\"",
+" # The following lines specifies detailed configuration for each",
+" # slave tag, cad1. You can define cad2 similarly.",
+"cad1_Servers=(node08 node09 node06 node07) # Hosts",
+"cad1_dir=$HOME/pgxc/nodes/coord_slave_cad1",
+"cad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)",
+"cad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1",
+"cad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)",
+"",
+"",
+"#---- Datanodes -------------------------------------------------------------------------------------------------------",
+"",
+"#---- Shortcuts --------------",
+"datanodeMasterDir=$HOME/pgxc/nodes/dn_master",
+"datanodeSlaveDir=$HOME/pgxc/nodes/dn_slave",
+"datanodeArchLogDir=$HOME/pgxc/nodes/datanode_archlog",
+"",
+"#---- Overall ---------------",
+"#primaryDatanode=datanode1 # Primary Node.",
+"# At present, xc has a priblem to issue ALTER NODE against the primay node. Until it is fixed, the test will be done",
+"# without this feature.",
+"primaryDatanode=datanode1 # Primary Node.",
+"datanodeNames=(datanode1 datanode2 datanode3 datanode4)",
+"datanodePorts=(20008 20009 20008 20009) # Master and slave use the same port!",
+#ifdef XCP
+"datanodePoolerPorts=(20012 20013 20012 20013) # Master and slave use the same port!",
+#endif
+"datanodePgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts",
+" # the same connection",
+" # This list sets up pg_hba.conf for $pgxcOwner user.",
+" # If you'd like to setup other entries, supply them",
+" # through extra configuration files specified below.",
+"# Note: The above parameter is extracted as \"host all all 0.0.0.0/0 trust\". If you don't want",
+"# such setups, specify the value () to this variable and suplly what you want using datanodeExtraPgHba",
+"# and/or datanodeSpecificExtraPgHba variables.",
+"",
+"#---- Master ----------------",
+"datanodeMasterServers=(node06 node07 node08 node09) # none means this master is not available.",
+" # This means that there should be the master but is down.",
+" # The cluster is not operational until the master is",
+" # recovered and ready to run. ",
+"datanodeMasterDirs=($datanodeMasterDir $datanodeMasterDir $datanodeMasterDir $datanodeMasterDir)",
+"datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is ",
+" # specified, it is expected this parameter is explicitly supplied",
+" # by external configuration files.",
+" # If you don't configure slaves, leave this value zero.",
+"datanodeMaxWALSenders=($datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender)",
+" # max_wal_senders configuration for each datanode",
+"",
+"#---- Slave -----------------",
+"datanodeSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following",
+" # configuration parameters will be set to empty values.",
+" # If no effective server names are found (that is, every servers are specified as none),",
+" # then datanodeSlave value will be set to n and all the following values will be set to",
+" # empty values.",
+"datanodeSlaveServers=(node07 node08 node09 node06) # value none means this slave is not available",
+"datanodeSlaveSync=y # If datanode slave is connected in synchronized mode",
+"datanodeSlaveDirs=($datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir)",
+"datanodeArchLogDirs=( $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir )",
+"",
+"# ---- Configuration files ---",
+"# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.",
+"# These files will go to corresponding files for the master.",
+"# Or you may supply these files manually.",
+"datanodeExtraConfig=none # Extra configuration file for datanodes. This file will be added to all the ",
+" # datanodes' postgresql.conf",
+"datanodeSpecificExtraConfig=(none none none none)",
+"datanodeExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the datanodes' postgresql.conf",
+"datanodeSpecificExtraPgHba=(none none none none)",
+"",
+"#----- Additional Slaves -----",
+"datanodeAdditionalSlaves=n # Additional slave can be specified as follows: where you",
+"# datanodeAdditionalSlaveSet=(dad1 dad2) # Each specifies set of slaves. This case, two set of slaves are",
+" # configured",
+"# dad1_Sync=n # All the slaves at \"cad1\" are connected with asynchronous mode.",
+" # If not, specify \"y\"",
+" # The following lines specifies detailed configuration for each",
+" # slave tag, cad1. You can define cad2 similarly.",
+"# dad1_Servers=(node08 node09 node06 node07) # Hosts",
+"# dad1_dir=$HOME/pgxc/nodes/coord_slave_cad1",
+"# dad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)",
+"# dad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1",
+"# dad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)",
+"",
+"#---- WAL archives -------------------------------------------------------------------------------------------------",
+"walArchive=n # If you'd like to configure WAL archive, edit this section.",
+" # Pgxc_ctl assumes that if you configure WAL archive, you configure it",
+" # for all the coordinators and datanodes.",
+" # Default is \"no\". Please specify \"y\" here to turn it on.",
+"#",
+"# End of Configuration Section",
+"#",
+"#==========================================================================================================================",
+"",
+"#========================================================================================================================",
+"# The following is for extension. Just demonstrate how to write such extension. There's no code",
+"# which takes care of them so please ignore the following lines. They are simply ignored by pgxc_ctl.",
+"# No side effects.",
+"#=============<< Beginning of future extension demonistration >> ========================================================",
+"# You can setup more than one backup set for various purposes, such as disaster recovery.",
+"walArchiveSet=(war1 war2)",
+"war1_source=(master) # you can specify master, slave or ano other additional slaves as a source of WAL archive.",
+" # Default is the master",
+"wal1_source=(slave)",
+"wal1_source=(additiona_coordinator_slave_set additional_datanode_slave_set)",
+"war1_host=node10 # All the nodes are backed up at the same host for a given archive set",
+"war1_backupdir=$HOME/pgxc/backup_war1",
+"wal2_source=(master)",
+"war2_host=node11",
+"war2_backupdir=$HOME/pgxc/backup_war2",
+"#=============<< End of future extension demonistration >> ========================================================",
+NULL
+};
diff --git a/contrib/pgxc_ctl/pgxc_ctl_bash.org b/contrib/pgxc_ctl/pgxc_ctl_bash.org
new file mode 100755
index 0000000000..f1c142d298
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_bash.org
@@ -0,0 +1,885 @@
+#!/bin/bash
+#
+# TODO
+# * Backup configuration file (at update, too) to some remote site for pgxc_ctl HA feature too.
+# * Write output of *_ctl, intdb and intgtm result to log files
+# * Write every operation to the log. Log file can be specified with --l file or --log file
+# * Configure log level
+# * Switch log log file
+# * Log option to the configuration file so that this can be failed over.
+# * Log to a remote server?
+# * Multiple log?
+#
+# Configuration file. Configuration file can be specified as -c option of
+# the command like, or PGXCCONFIG environment variable. If both are
+# not specified, the following configuration file will be used.
+#
+# Change in the cluster status due to failover will be added to the configuration file so that
+# new master can be invoked as the master when restarted.
+#
+# All such addition will be tagged with proper comment and date/time info. If you'd like to
+# cancel such changes, you can remove or comment-out such additional lines.
+#
+#
+#==========================================================================================================================
+#
+# Configuration Section
+#
+# This section should be in the file $configFile for
+# user's configuration.
+#
+# Several assumptons:
+# 1) configuration file will be set to data directory.
+# configuration file name is fixed to postgresql.conf
+# 2) pg_hba.conf will be set to data directory. File name is
+# fixed to pg_hba.conf
+#
+#================================================================
+# MEMO
+#
+# max_connections, min_pool_size, max_pool_size --> should be configurable!
+# They're not cluster specific. So we may give a chance to include
+# these specific options to be included from external files.
+# They should not change by failover so they just have to be
+# configured at first time only.
+#===============================================================
+#
+#
+#---- Home dir of pgxc_ctl
+pgxc_ctl_home=$HOME/.pgxc_ctl
+#---- Configuration File
+pgxcInstallDir=$HOME/pgxc
+configFile=$pgxcInstallDir/pgxcConf
+#---- OVERALL -----------------------------------------------------------------------------------------------------------
+#
+pgxcOwner=pgxc # owner of the Postgres-XC database cluster. Here, we use this
+ # both as linus user and database user. This must be
+ # the super user of each coordinator and datanode.
+pgxcUser=$pgxcOwner # OS user of Postgres-XC owner
+
+tmpDir=/tmp # temporary dir used in XC servers
+localTmpDir=$tmpDir # temporary dir used here locally
+
+logOpt=y # If you want log
+logDir=$pgxc_ctl_home/pgxc_ctl_log # Directory to write pgxc_ctl logs
+
+configBackup=y # If you want config file backup
+configBackupHost=pgxc-linker # host to backup config file
+configBackupDir=$pgxcInstallDir
+configBackupFile=`basename $configFile` # Backup file name --> Need to synchronize when original changed.
+
+#---- GTM --------------------------------------------------------------------------------------------------------------
+
+# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.
+# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update
+# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command
+# will not stop the current GTM. It is up to the operator.
+
+#---- Overall -------
+gtmName=gtm
+
+#---- GTM Master -----------------------------------------------
+
+#---- Overall ----
+gtmMasterServer=node13
+gtmMasterPort=20001
+gtmMasterDir=$HOME/pgxc/nodes/gtm
+
+#---- Configuration ---
+gtmExtraConfig=none # Will be added gtm.conf for both Master and Slave (done at initilization only)
+gtmMasterSpecificExtraConfig=none # Will be added to Master's gtm.conf (done at initialization only)
+
+#---- GTM Slave -----------------------------------------------
+
+# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave
+# for backup.
+
+#---- Overall ------
+gtmSlave=y # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and
+ # all the following variables will be reset.
+gtmSlaveServer=node12 # value none means GTM slave is not available. Give none if you don't configure GTM Slave.
+gtmSlavePort=20001 # Not used if you don't configure GTM slave.
+gtmSlaveDir=$HOME/pgxc/nodes/gtm # Not used if you don't configure GTM slave.
+# Please note that when you have GTM failover, then there will be no slave available until you configure the slave
+# again. (pgxc_add_gtm_slave function will handle it)
+
+#---- Configuration ----
+gtmSlaveSpecificExtraConfig=none # Will be added to Slave's gtm.conf (done at initialization only)
+
+#---- GTM Proxy -------------------------------------------------------------------------------------------------------
+# GTM proxy will be selected based upon which server each component runs on.
+# When fails over to the slave, the slave inherits its master's gtm proxy. It should be
+# reconfigured based upon the new location.
+#
+# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart
+#
+# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects
+# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.
+
+#---- Shortcuts ------
+gtmProxyDir=$HOME/pgxc/nodes/gtm_pxy
+
+#---- Overall -------
+gtmProxy=y # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies
+ # only when you dont' configure GTM slaves.
+ # If you specify this value not to y, the following parameters will be set to default empty values.
+ # If we find there're no valid Proxy server names (means, every servers are specified
+ # as none), then gtmProxy value will be set to "n" and all the entries will be set to
+ # empty values.
+gtmProxyNames=(gtm_pxy1 gtm_pxy2 gtm_pxy3 gtm_pxy4) # No used if it is not configured
+gtmProxyServers=(node06 node07 node08 node09) # Specify none if you dont' configure it.
+gtmProxyPorts=(20001 20001 20001 20001) # Not used if it is not configured.
+gtmProxyDirs=($gtmProxyDir $gtmProxyDir $gtmProxyDir $gtmProxyDir) # Not used if it is not configured.
+
+#---- Configuration ----
+gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy
+gtmPxySpecificExtraConfig=(none none none none)
+
+#---- Coordinators ----------------------------------------------------------------------------------------------------
+
+#---- shortcuts ----------
+coordMasterDir=$HOME/pgxc/nodes/coord
+coordSlaveDir=$HOME/pgxc/nodes/coord_slave
+coordArchLogDir=$HOME/pgxc/nodes/coord_archlog
+
+#---- Overall ------------
+coordNames=(coord1 coord2 coord3 coord4) # Master and slave use the same name
+coordPorts=(20004 20005 20004 20005) # Master and slave use the same port
+poolerPorts=(20010 20011 20010 20011) # Master and slave use the same pooler port
+coordPgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This entry allows only $pgxcOwner to connect.
+ # If you'd like to setup another connection, you should
+ # supply these entries through files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using coordExtraPgHba
+# and/or coordSpecificExtraPgHba variables.
+
+#---- Master -------------
+coordMasterServers=(node06 node07 node08 node09) # none means this master is not available
+coordMasterDirs=($coordMasterDir $coordMasterDir $coordMasterDir $coordMasterDir)
+coordMaxWALsernder=5 # max_wal_senders: needed to configure slave. If zero value is specified,
+ # it is expected to supply this parameter explicitly by external files
+ # specified in the following. If you don't configure slaves, leave this value to zero.
+coordMaxWALSenders=($coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder)
+ # max_wal_senders configuration for each coordinator.
+
+#---- Slave -------------
+coordSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then coordSlave value will be set to n and all the following values will be set to
+ # empty values.
+coordSlaveSync=y # Specify to connect with synchronized mode.
+coordSlaveServers=(node07 node08 node09 node06) # none means this slave is not available
+coordSlaveDirs=($coordSlaveDir $coordSlaveDir $coordSlaveDir $coordSlaveDir)
+coordArchLogDirs=($coordArchLogDir $coordArchLogDir $coordArchLogDir $coordArchLogDir)
+
+#---- Configuration files---
+# Need these when you'd like setup specific non-default configuration
+# These files will go to corresponding files for the master.
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries
+# Or you may supply these files manually.
+coordExtraConfig=none # Extra configuration file for coordinators. This file will be added to all the coordinators'
+ # postgresql.conf
+coordSpecificExraConfig=(none none none none)
+coordExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the coordinators' pg_hba.conf
+coordSpecificExtraPgHba=(none none none none)
+
+#----- Additional Slaves -----
+coordAdditionalSlaves=n # Additional slave can be specified as follows: where you
+coordAdditionalSlaveSet=(cad1) # Each specifies set of slaves. This case, two set of slaves are
+ # configured
+cad1_Sync=n # All the slaves at "cad1" are connected with asynchronous mode.
+ # If not, specify "y"
+ # The following lines specifies detailed configuration for each
+ # slave tag, cad1. You can define cad2 similarly.
+cad1_Servers=(node08 node09 node06 node07) # Hosts
+cad1_dir=$HOME/pgxc/nodes/coord_slave_cad1
+cad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)
+cad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1
+cad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)
+
+
+#---- Datanodes -------------------------------------------------------------------------------------------------------
+
+#---- Shortcuts --------------
+datanodeMasterDir=$HOME/pgxc/nodes/dn_master
+datanodeSlaveDir=$HOME/pgxc/nodes/dn_slave
+datanodeArchLogDir=$HOME/pgxc/nodes/datanode_archlog
+
+#---- Overall ---------------
+#primaryDatanode=datanode1 # Primary Node.
+# At present, xc has a priblem to issue ALTER NODE against the primay node. Until it is fixed, the test will be done
+# without this feature.
+primaryDatanode=datanode1 # Primary Node.
+datanodeNames=(datanode1 datanode2 datanode3 datanode4)
+datanodePorts=(20008 20009 20008 20009) # Master and slave use the same port!
+datanodePgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This list sets up pg_hba.conf for $pgxcOwner user.
+ # If you'd like to setup other entries, supply them
+ # through extra configuration files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using datanodeExtraPgHba
+# and/or datanodeSpecificExtraPgHba variables.
+
+#---- Master ----------------
+datanodeMasterServers=(node06 node07 node08 node09) # none means this master is not available.
+ # This means that there should be the master but is down.
+ # The cluster is not operational until the master is
+ # recovered and ready to run.
+datanodeMasterDirs=($datanodeMasterDir $datanodeMasterDir $datanodeMasterDir $datanodeMasterDir)
+datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is
+ # specified, it is expected this parameter is explicitly supplied
+ # by external configuration files.
+ # If you don't configure slaves, leave this value zero.
+datanodeMaxWalSenders=($datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender)
+ # max_wal_senders configuration for each datanode
+
+#---- Slave -----------------
+datanodeSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then datanodeSlave value will be set to n and all the following values will be set to
+ # empty values.
+datanodeSlaveServers=(node07 node08 node09 node06) # value none means this slave is not available
+datanodeSlaveDirs=($datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir)
+datanodeArchLogDirs=( $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir )
+
+# ---- Configuration files ---
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.
+# These files will go to corresponding files for the master.
+# Or you may supply these files manually.
+datanodeExtraConfig=none # Extra configuration file for datanodes. This file will be added to all the
+ # datanodes' postgresql.conf
+datanodeSpecificExtraConfig=(none none none none)
+datanodeExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the datanodes' postgresql.conf
+datanodeSpecificExtraPgHba=(none none none none)
+
+#----- Additional Slaves -----
+datanodeAdditionalSlaves=n # Additional slave can be specified as follows: where you
+# datanodeAdditionalSlaveSet=(dad1 dad2) # Each specifies set of slaves. This case, two set of slaves are
+ # configured
+# dad1_Sync=n # All the slaves at "cad1" are connected with asynchronous mode.
+ # If not, specify "y"
+ # The following lines specifies detailed configuration for each
+ # slave tag, cad1. You can define cad2 similarly.
+# dad1_Servers=(node08 node09 node06 node07) # Hosts
+# dad1_dir=$HOME/pgxc/nodes/coord_slave_cad1
+# dad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)
+# dad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1
+# dad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)
+
+#---- WAL archives -------------------------------------------------------------------------------------------------
+walArchive=n # If you'd like to configure WAL archive, edit this section.
+ # Pgxc_ctl assumes that if you configure WAL archive, you configure it
+ # for all the coordinators and datanodes.
+ # Default is "no". Please specify "y" here to turn it on.
+# You can setup more than one backup set for various purposes, such as disaster recovery.
+walArchiveSet=(war1 war2)
+war1_source=(master) # you can specify master, slave or ano other additional slaves as a source of WAL archive.
+ # Default is the master
+wal1_source=(slave)
+wal1_source=(additiona_coordinator_slave_set additional_datanode_slave_set)
+war1_host=node10 # All the nodes are backed up at the same host for a given archive set
+war1_backupdir=$HOME/pgxc/backup_war1
+wal2_source=(master)
+war2_host=node11
+war2_backupdir=$HOME/pgxc/backup_war2
+#
+# End of Configuration Section
+#
+#==========================================================================================================================
+
+# Common variables ######################################################################
+xc_prompt='PGXC$ '
+interactive=n
+verbose=n
+progname=$0
+bin=pgxc_ctl # Just in case. Never touch this
+logfile=none
+
+# Create config file template
+#
+# If you change the structure of configuration file section, you must reflect the change in the part below.
+#
+function create_config_file_template
+{
+ cat > $configFile <<EOF
+#!/bin/bash
+#
+# TODO
+# * Backup configuration file (at update, too) to some remote site for pgxc_ctl HA feature too.
+# * Write output of *_ctl, intdb and intgtm result to log files
+# * Write every operation to the log. Log file can be specified with --l file or --log file
+# * Configure log level
+# * Switch log log file
+# * Log option to the configuration file so that this can be failed over.
+# * Log to a remote server?
+# * Multiple log?
+#
+# Configuration file. Configuration file can be specified as -c option of
+# the command like, or PGXCCONFIG environment variable. If both are
+# not specified, the following configuration file will be used.
+#
+# Change in the cluster status due to failover will be added to the configuration file so that
+# new master can be invoked as the master when restarted.
+#
+# All such addition will be tagged with proper comment and date/time info. If you'd like to
+# cancel such changes, you can remove or comment-out such additional lines.
+#
+#
+#==========================================================================================================================
+#
+# Configuration Section
+#
+# This section should be in the file $configFile for
+# user's configuration.
+#
+# Several assumptons:
+# 1) configuration file will be set to data directory.
+# configuration file name is fixed to postgresql.conf
+# 2) pg_hba.conf will be set to data directory. File name is
+# fixed to pg_hba.conf
+#
+#================================================================
+# MEMO
+#
+# max_connections, min_pool_size, max_pool_size --> should be configurable!
+# They're not cluster specific. So we may give a chance to include
+# these specific options to be included from external files.
+# They should not change by failover so they just have to be
+# configured at first time only.
+#===============================================================
+#
+#
+#---- Home dir of pgxc_ctl
+pgxc_ctl_home=$HOME/.pgxc_ctl
+#---- Configuration File
+pgxcInstallDir=$HOME/pgxc
+configFile=$pgxcInstallDir/pgxcConf
+#---- OVERALL -----------------------------------------------------------------------------------------------------------
+#
+pgxcOwner=pgxc # owner of the Postgres-XC database cluster. Here, we use this
+ # both as linus user and database user. This must be
+ # the super user of each coordinator and datanode.
+pgxcUser=$pgxcOwner # OS user of Postgres-XC owner
+
+tmpDir=/tmp # temporary dir used in XC servers
+localTmpDir=$tmpDir # temporary dir used here locally
+
+logOpt=y # If you want log
+logDir=$pgxc_ctl_home/pgxc_ctl_log # Directory to write pgxc_ctl logs
+
+configBackup=y # If you want config file backup
+configBackupHost=pgxc-linker # host to backup config file
+configBackupDir=$pgxcInstallDir
+configBackupFile=`basename $configFile` # Backup file name --> Need to synchronize when original changed.
+
+#---- GTM --------------------------------------------------------------------------------------------------------------
+
+# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.
+# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update
+# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command
+# will not stop the current GTM. It is up to the operator.
+
+#---- Overall -------
+gtmName=gtm
+
+#---- GTM Master -----------------------------------------------
+
+#---- Overall ----
+gtmMasterServer=node13
+gtmMasterPort=20001
+gtmMasterDir=$HOME/pgxc/nodes/gtm
+
+#---- Configuration ---
+gtmExtraConfig=none # Will be added gtm.conf for both Master and Slave (done at initilization only)
+gtmMasterSpecificExtraConfig=none # Will be added to Master's gtm.conf (done at initialization only)
+
+#---- GTM Slave -----------------------------------------------
+
+# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave
+# for backup.
+
+#---- Overall ------
+gtmSlave=y # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and
+ # all the following variables will be reset.
+gtmSlaveServer=node12 # value none means GTM slave is not available. Give none if you don't configure GTM Slave.
+gtmSlavePort=20001 # Not used if you don't configure GTM slave.
+gtmSlaveDir=$HOME/pgxc/nodes/gtm # Not used if you don't configure GTM slave.
+# Please note that when you have GTM failover, then there will be no slave available until you configure the slave
+# again. (pgxc_add_gtm_slave function will handle it)
+
+#---- Configuration ----
+gtmSlaveSpecificExtraConfig=none # Will be added to Slave's gtm.conf (done at initialization only)
+
+#---- GTM Proxy -------------------------------------------------------------------------------------------------------
+# GTM proxy will be selected based upon which server each component runs on.
+# When fails over to the slave, the slave inherits its master's gtm proxy. It should be
+# reconfigured based upon the new location.
+#
+# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart
+#
+# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects
+# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.
+
+#---- Shortcuts ------
+gtmProxyDir=$HOME/pgxc/nodes/gtm_pxy
+
+#---- Overall -------
+gtmProxy=y # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies
+ # only when you dont' configure GTM slaves.
+ # If you specify this value not to y, the following parameters will be set to default empty values.
+ # If we find there're no valid Proxy server names (means, every servers are specified
+ # as none), then gtmProxy value will be set to "n" and all the entries will be set to
+ # empty values.
+gtmProxyNames=(gtm_pxy1 gtm_pxy2 gtm_pxy3 gtm_pxy4) # No used if it is not configured
+gtmProxyServers=(node06 node07 node08 node09) # Specify none if you dont' configure it.
+gtmProxyPorts=(20001 20001 20001 20001) # Not used if it is not configured.
+gtmProxyDirs=($gtmProxyDir $gtmProxyDir $gtmProxyDir $gtmProxyDir) # Not used if it is not configured.
+
+#---- Configuration ----
+gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy
+gtmPxySpecificExtraConfig=(none none none none)
+
+#---- Coordinators ----------------------------------------------------------------------------------------------------
+
+#---- shortcuts ----------
+coordMasterDir=$HOME/pgxc/nodes/coord
+coordSlaveDir=$HOME/pgxc/nodes/coord_slave
+coordArchLogDir=$HOME/pgxc/nodes/coord_archlog
+
+#---- Overall ------------
+coordNames=(coord1 coord2 coord3 coord4) # Master and slave use the same name
+coordPorts=(20004 20005 20004 20005) # Master and slave use the same port
+poolerPorts=(20010 20011 20010 20011) # Master and slave use the same pooler port
+coordPgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This entry allows only $pgxcOwner to connect.
+ # If you'd like to setup another connection, you should
+ # supply these entries through files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using coordExtraPgHba
+# and/or coordSpecificExtraPgHba variables.
+
+#---- Master -------------
+coordMasterServers=(node06 node07 node08 node09) # none means this master is not available
+coordMasterDirs=($coordMasterDir $coordMasterDir $coordMasterDir $coordMasterDir)
+coordMaxWALsernder=5 # max_wal_senders: needed to configure slave. If zero value is specified,
+ # it is expected to supply this parameter explicitly by external files
+ # specified in the following. If you don't configure slaves, leave this value to zero.
+coordMaxWALSenders=($coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder)
+ # max_wal_senders configuration for each coordinator.
+
+#---- Slave -------------
+coordSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then coordSlave value will be set to n and all the following values will be set to
+ # empty values.
+coordSlaveSync=y # Specify to connect with synchronized mode.
+coordSlaveServers=(node07 node08 node09 node06) # none means this slave is not available
+coordSlaveDirs=($coordSlaveDir $coordSlaveDir $coordSlaveDir $coordSlaveDir)
+coordArchLogDirs=($coordArchLogDir $coordArchLogDir $coordArchLogDir $coordArchLogDir)
+
+#---- Configuration files---
+# Need these when you'd like setup specific non-default configuration
+# These files will go to corresponding files for the master.
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries
+# Or you may supply these files manually.
+coordExtraConfig=none # Extra configuration file for coordinators. This file will be added to all the coordinators'
+ # postgresql.conf
+coordSpecificExraConfig=(none none none none)
+coordExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the coordinators' pg_hba.conf
+coordSpecificExtraPgHba=(none none none none)
+
+#----- Additional Slaves -----
+coordAdditionalSlaveSet=n # Additional slave can be specified as follows: where you
+# coordAdditionalSlaveSet=(cad1 cad2) # Each specifies set of slaves. This case, two set of slaves are
+ # configured
+# cad1_Sync=n # All the slaves at "cad1" are connected with asynchronous mode.
+ # If not, specify "y"
+ # The following lines specifies detailed configuration for each
+ # slave tag, cad1. You can define cad2 similarly.
+# cad1_Servers=(node08 node09 node06 node07) # Hosts
+# cad1_dir=$HOME/pgxc/nodes/coord_slave_cad1
+# cad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)
+# cad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1
+# cad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)
+
+
+#---- Datanodes -------------------------------------------------------------------------------------------------------
+
+#---- Shortcuts --------------
+datanodeMasterDir=$HOME/pgxc/nodes/dn_master
+datanodeSlaveDir=$HOME/pgxc/nodes/dn_slave
+datanodeArchLogDir=$HOME/pgxc/nodes/datanode_archlog
+
+#---- Overall ---------------
+#primaryDatanode=datanode1 # Primary Node.
+# At present, xc has a priblem to issue ALTER NODE against the primay node. Until it is fixed, the test will be done
+# without this feature.
+primaryDatanode=datanode1 # Primary Node.
+datanodeNames=(datanode1 datanode2 datanode3 datanode4)
+datanodePorts=(20008 20009 20008 20009) # Master and slave use the same port!
+datanodePgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This list sets up pg_hba.conf for $pgxcOwner user.
+ # If you'd like to setup other entries, supply them
+ # through extra configuration files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using datanodeExtraPgHba
+# and/or datanodeSpecificExtraPgHba variables.
+
+#---- Master ----------------
+datanodeMasterServers=(node06 node07 node08 node09) # none means this master is not available.
+ # This means that there should be the master but is down.
+ # The cluster is not operational until the master is
+ # recovered and ready to run.
+datanodeMasterDirs=($datanodeMasterDir $datanodeMasterDir $datanodeMasterDir $datanodeMasterDir)
+datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is
+ # specified, it is expected this parameter is explicitly supplied
+ # by external configuration files.
+ # If you don't configure slaves, leave this value zero.
+datanodeMaxWalSenders=($datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender)
+ # max_wal_senders configuration for each datanode
+
+#---- Slave -----------------
+datanodeSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then datanodeSlave value will be set to n and all the following values will be set to
+ # empty values.
+datanodeSlaveServers=(node07 node08 node09 node06) # value none means this slave is not available
+datanodeSlaveDirs=($datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir)
+datanodeArchLogDirs=( $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir )
+
+# ---- Configuration files ---
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.
+# These files will go to corresponding files for the master.
+# Or you may supply these files manually.
+datanodeExtraConfig=none # Extra configuration file for datanodes. This file will be added to all the
+ # datanodes' postgresql.conf
+datanodeSpecificExtraConfig=(none none none none)
+datanodeExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the datanodes' postgresql.conf
+datanodeSpecificExtraPgHba=(none none none none)
+
+#----- Additional Slaves -----
+datanodeAdditionalSlaveSet=n # Additional slave can be specified as follows: where you
+# datanodeAdditionalSlaveSet=(dad1 dad2) # Each specifies set of slaves. This case, two set of slaves are
+ # configured
+# dad1_Sync=n # All the slaves at "cad1" are connected with asynchronous mode.
+ # If not, specify "y"
+ # The following lines specifies detailed configuration for each
+ # slave tag, cad1. You can define cad2 similarly.
+# dad1_Servers=(node08 node09 node06 node07) # Hosts
+# dad1_dir=$HOME/pgxc/nodes/coord_slave_cad1
+# dad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)
+# dad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1
+# dad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)
+
+#---- WAL archives -------------------------------------------------------------------------------------------------
+walArchive=n # If you'd like to configure WAL archive, edit this section.
+ # Pgxc_ctl assumes that if you configure WAL archive, you configure it
+ # for all the coordinators and datanodes.
+ # Default is "no". Please specify "y" here to turn it on.
+# You can setup more than one backup set for various purposes, such as disaster recovery.
+walArchiveSet=(war1 war2)
+war1_source=master # you can specify master, slave or ano other additional slaves as a source of WAL archive.
+ # Default is the master
+wal1_source=slave
+wal1_source=(additiona_coordinator_slave_set additional_datanode_slave_set)
+war1_host=node10 # All the nodes are backed up at the same host for a given archive set
+war1_backupdir=$HOME/pgxc/backup_war1
+wal2_source=master
+war2_host=node11
+war2_backupdir=$HOME/pgxc/backup_war2
+#
+# End of Configuration Section
+#
+#==========================================================================================================================
+EOF
+ chmod +x $configFile
+}
+
+
+#===========================================================
+#
+# Extract parsed configuration values
+#
+#===========================================================
+
+# $1 is variable name of the array to print
+function print_array
+{
+ echo -n $1 " "
+ eval echo '$'{$1[@]}
+}
+
+
+function print_values
+{
+ local i
+ declare -i i
+ local el
+
+ # Home
+ echo pgxc_ctl_home $pgxc_ctl_home
+ echo pgxcInstallDir $pgxcInstallDir
+ echo configFile $configFile
+
+ # Overall
+ echo pgxcOwner $pgxcOwner
+ echo pgxcUser $pgxcUser
+ echo tmpDir $tmpDir
+ echo localTmpDir $localTmpDir
+ echo logOpt $logOpt
+ echo logDir $logDir
+ echo configBackup $configBackup
+ echo configBackupHost $configBackupHost
+ echo configBackupDir $configBackupDir
+ echo configBackupFile $configBackupFile
+
+ # GTM overall
+ echo gtmName $gtmName
+
+ # GTM master
+ echo gtmMasterServer $gtmMasterServer
+ echo gtmMasterPort $gtmMasterPort
+ echo gtmMasterDir $gtmMasterDir
+ echo gtmExtraConfig $gtmExtraConfig
+ echo gtmMasterSpecificExtraConfig $gtmMasterSpecificExtraConfig
+
+ # GTM slave
+ echo gtmSlave $gtmSlave
+ echo gtmSlaveServer $gtmSlaveServer
+ echo gtmSlavePort $gtmSlavePort
+ echo gtmSlaveDir $gtmSlaveDir
+ echo gtmSlaveSpecificExtraConfig $gtmSlaveSpecificExtraConfig
+
+ # GTM Proxy
+ echo gtmProxy $gtmProxy
+ print_array gtmProxyNames
+ print_array gtmProxyServers
+ print_array gtmProxyPorts
+ print_array gtmProxyDirs
+ echo gtmPxyExtraConfig $gtmPxyExtraConfig
+ print_array gtmPxySpecificExtraConfig
+
+ # Coordinators overall
+ print_array coordNames
+ print_array coordPorts
+ print_array poolerPorts
+ print_array coordPgHbaEntries
+
+ # Coordinators master
+ print_array coordMasterServers
+ print_array coordMasterDirs
+ print_array coordMaxWALSenders
+
+ # Coordinators slave
+ echo coordSlave $coordSlave
+ echo coordSlaveSync $coordSlaveSync
+ print_array coordSlaveDirs
+ print_array coordArchLogDirs
+
+ # Coordinator Configuration files
+ echo coordExtraConfig $coordExtraConfig
+ print_array coordSpecificExraConfig
+ echo coordExtraPgHba $coordExtraPgHba
+ print_array coordSpecificExtraPgHba
+
+ # Coordinator Additional Slaves
+ echo coordAdditionalSlaves $coordAdditionalSlaves
+ if [ $coordAdditionalSlaves == "y" ]; then
+ print_array coordAdditionalSlaveSet
+ for ((i=0; i<${#coordAdditionalSlaveSet[@]}; i++)); do
+ el=${coordAdditionalSlaveSet[$i]}
+ echo -n ${el}_Sync " "
+ eval echo '$'"$el"_Sync
+ print_array ${el}_Servers
+ print_array ${el}_Dirs
+ print_array ${el}_ArchLogDirs
+ done
+ fi
+
+ # Datanodes overall
+ echo primaryDatanode $primaryDatanode
+ print_array datanodeNames
+ print_array datanodePorts
+ print_array datanodePgHbaEntries
+
+ # Datanodes masters
+ print_array datanodeMasterServers
+ print_array datanodeMasterDirs
+ print_array datanodeMaxWalSenders
+
+ # Datanodes slaves
+ echo datanodeSlave $datanodeSlave
+ print_array datanodeSlaveServers
+ print_array datanodeSlaveDirs
+ print_array datanodeArchLogDirs
+
+ # Datanode configuration files
+ echo datanodeExtraConfig $datanodeExtraConfig
+ print_array datanodeSpecificExtraConfig
+ echo datanodeExtraPgHba $datanodeExtraPgHba
+ print_array datanodeSpecificExtraPgHba
+
+ # Datanodes additional slaves
+ echo datanodeAdditionalSlaves $datanodeAdditionalSlaves
+ if [ $datanodeAdditionalSlaves == "y" ]; then
+ print_array datanodeAdditionalSlaveSet
+ for ((i=0; i<${#datanodeAdditionalSlaveSet[@]}; i++)); do
+ el=${datanodeAdditionalSlaveSet[$i]}
+ echo -n ${el}_Sync " "
+ eval echo '$'"$el"_Sync
+ print_array ${el}_Servers
+ print_array ${el}_Dirs
+ print_array ${el}_ArchLogDirs
+ done
+ fi
+
+ # WAL Archives
+ echo walArchive $walArchive
+ print_array walArchiveSet
+ if [ $walArchive == "y" ]; then
+ for ((i=0; i<${#walArchvieSet[@]}; i++)); do
+ print_array ${el}_source
+ echo -n ${el}_host
+ eval echo '$'"$el"_host
+ echo -n ${el}_backupdir
+ eval echo '$'"$el"_backupdir
+ done
+ fi
+
+ # Other options
+ echo xc_prompt $xc_prompt
+ echo verbose $verbose
+ echo bin $bin
+}
+
+
+
+#============================================================
+#
+# Common functions
+#
+#============================================================
+
+# Optionally $1 will be $PGXC_CTL_HOME settings.
+function set_home
+{
+ if [ $# > 1 ]; then
+ echo "Invalid set_home function call"
+ return 1
+ fi
+ if [ $# == 1 ]; then
+ if [ -d $1 ]; then
+ pgxc_ctl_home=$1
+ else
+ eecho "set_home: $1 is not a directory."
+ return 1
+ fi
+ elif [ $PGXC_CTL_HOME != "" ]; then
+ if [ -d $PGXC_CTL_HOME ]; then
+ pgxc_ctl_home=$PGXC_CTL_HOME
+ else
+ eecho "set_home: env PGXC_CTL_HOME($PGXC_CTL_HOME) is not a directory."
+ return 1;
+ fi
+ fi
+ cd $pgxc_ctl_home;
+}
+
+###############################################################################
+#
+# EXECUTING SECTION
+#
+###############################################################################
+
+#=======================================================
+# Things to be done at first
+#=======================================================
+
+# Handle options
+progname=$0
+moretodo=y
+cmd_with_log=null
+#set_home
+if [ -f $pgxc_ctl_home/.pgxc_ctl_rc ]; then
+ source $pgxc_ctl_home/.pgxc_ctl_rc
+fi
+
+while [ $moretodo == y ]; do
+ if [ $# -gt 0 ]; then
+ case $1 in
+ -v )
+ shift;
+ verbose=y;
+ continue;;
+ --verbose )
+ shift;
+ verbose=y;
+ continue;;
+ --silent )
+ verbose=n;
+ continue;;
+ -d ) # debug option
+ shift;
+ DEBUG=y;
+ continue;;
+ --debug )
+ shift;
+ DEBUG=y;
+ continue;;
+ -c ) # Configuraton file
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no -c option value found
+ exit 1
+ else
+ configFile=$1
+ shift
+ fi;
+ continue;;
+ --configuration ) # Configuraion file
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no --configuration option value found
+ exit 1
+ else
+ configFile=$1
+ shift
+ fi;
+ continue;;
+ --home ) # PGXC_CTL_HOME
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no pgxc_ctl_home specified
+ exit 1
+ else
+ pgxc_ctl_home=$1
+ cd $pgxc_ctl_home
+ shift
+ fi;
+ continue;;
+ * )
+ moretodo=n
+ continue;;
+ esac
+ else
+ moretodo=n
+ fi
+done
+
+# Read configuration file --> Should be activated only when debug option is off
+if [ -f $configFile ]; then
+ source $configFile
+fi
+# Log option can be overriden by command-line option
+
+print_values
+
diff --git a/contrib/pgxc_ctl/pgxc_ctl_bash_2 b/contrib/pgxc_ctl/pgxc_ctl_bash_2
new file mode 100755
index 0000000000..4c47568f08
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_bash_2
@@ -0,0 +1,302 @@
+#!/bin/bash
+# Common variables ######################################################################
+xc_prompt='PGXC$ '
+interactive=n
+verbose=n
+progname=$0
+
+bin=pgxc_ctl # Just in case. Never touch this
+logfile=none
+
+#===========================================================
+#
+# Extract parsed configuration values
+#
+#===========================================================
+
+# $1 is variable name of the array to print
+function print_array
+{
+ echo -n $1 " "
+ eval echo '$'{$1[@]}
+}
+
+
+function print_values
+{
+ local i
+ declare -i i
+ local el
+
+ # Install Directory
+ echo pgxcInstallDir $pgxcInstallDir
+
+ # Overall
+ echo pgxcOwner $pgxcOwner
+ echo pgxcUser $pgxcUser
+ echo tmpDir $tmpDir
+ echo localTmpDir $localTmpDir
+ echo configBackup $configBackup
+ echo configBackupHost $configBackupHost
+ echo configBackupDir $configBackupDir
+ echo configBackupFile $configBackupFile
+
+ # GTM overall
+ echo gtmName $gtmName
+
+ # GTM master
+ echo gtmMasterServer $gtmMasterServer
+ echo gtmMasterPort $gtmMasterPort
+ echo gtmMasterDir $gtmMasterDir
+ echo gtmExtraConfig $gtmExtraConfig
+ echo gtmMasterSpecificExtraConfig $gtmMasterSpecificExtraConfig
+
+ # GTM slave
+ echo gtmSlave $gtmSlave
+ echo gtmSlaveServer $gtmSlaveServer
+ echo gtmSlavePort $gtmSlavePort
+ echo gtmSlaveDir $gtmSlaveDir
+ echo gtmSlaveSpecificExtraConfig $gtmSlaveSpecificExtraConfig
+
+ # GTM Proxy
+ echo gtmProxy $gtmProxy
+ print_array gtmProxyNames
+ print_array gtmProxyServers
+ print_array gtmProxyPorts
+ print_array gtmProxyDirs
+ echo gtmPxyExtraConfig $gtmPxyExtraConfig
+ print_array gtmPxySpecificExtraConfig
+
+ # Coordinators overall
+ print_array coordNames
+ print_array coordPorts
+ print_array poolerPorts
+ print_array coordPgHbaEntries
+
+ # Coordinators master
+ print_array coordMasterServers
+ print_array coordMasterDirs
+ print_array coordMaxWALSenders
+
+ # Coordinators slave
+ echo coordSlave $coordSlave
+ echo coordSlaveSync $coordSlaveSync
+ print_array coordSlaveServers
+ print_array coordSlaveDirs
+ print_array coordArchLogDirs
+
+ # Coordinator Configuration files
+ echo coordExtraConfig $coordExtraConfig
+ print_array coordSpecificExtraConfig
+ echo coordExtraPgHba $coordExtraPgHba
+ print_array coordSpecificExtraPgHba
+
+ # Coordinator Additional Slaves
+ echo coordAdditionalSlaves $coordAdditionalSlaves
+ if [ "$coordAdditionalSlaves" == "y" ]; then
+ print_array coordAdditionalSlaveSet
+ for ((i=0; i<${#coordAdditionalSlaveSet[@]}; i++)); do
+ el=${coordAdditionalSlaveSet[$i]}
+ echo -n ${el}_Sync " "
+ eval echo '$'"$el"_Sync
+ print_array ${el}_Servers
+ print_array ${el}_Dirs
+ print_array ${el}_ArchLogDirs
+ done
+ fi
+
+ # Datanodes overall
+ echo primaryDatanode $primaryDatanode
+ print_array datanodeNames
+ print_array datanodePorts
+ print_array datanodePgHbaEntries
+
+ # Datanodes masters
+ print_array datanodeMasterServers
+ print_array datanodeMasterDirs
+ print_array datanodeMaxWALSenders
+
+ # Datanodes slaves
+ echo datanodeSlave $datanodeSlave
+ echo datanodeSlaveSync $datanodeSlaveSync
+ print_array datanodeSlaveServers
+ print_array datanodeSlaveDirs
+ print_array datanodeArchLogDirs
+
+ # Datanode configuration files
+ echo datanodeExtraConfig $datanodeExtraConfig
+ print_array datanodeSpecificExtraConfig
+ echo datanodeExtraPgHba $datanodeExtraPgHba
+ print_array datanodeSpecificExtraPgHba
+
+ # Datanodes additional slaves
+ echo datanodeAdditionalSlaves $datanodeAdditionalSlaves
+ if [ "$datanodeAdditionalSlaves" == "y" ]; then
+ print_array datanodeAdditionalSlaveSet
+ for ((i=0; i<${#datanodeAdditionalSlaveSet[@]}; i++)); do
+ el=${datanodeAdditionalSlaveSet[$i]}
+ echo -n ${el}_Sync " "
+ eval echo '$'"$el"_Sync
+ print_array ${el}_Servers
+ print_array ${el}_Dirs
+ print_array ${el}_ArchLogDirs
+ done
+ fi
+
+ # WAL Archives
+ echo walArchive $walArchive
+ print_array walArchiveSet
+ if [ "$walArchive" == "y" ]; then
+ for ((i=0; i<${#walArchvieSet[@]}; i++)); do
+ print_array ${el}_source
+ echo -n ${el}_host
+ eval echo '$'"$el"_host
+ echo -n ${el}_backupdir
+ eval echo '$'"$el"_backupdir
+ done
+ fi
+}
+
+
+
+#============================================================
+#
+# Common functions
+#
+#============================================================
+
+# Optionally $1 will be $PGXC_CTL_HOME settings.
+function set_home
+{
+ if [ $# > 1 ]; then
+ echo "Invalid set_home function call"
+ return 1
+ fi
+ if [ $# == 1 ]; then
+ if [ -d $1 ]; then
+ pgxc_ctl_home=$1
+ else
+ eecho "set_home: $1 is not a directory."
+ return 1
+ fi
+ elif [ $PGXC_CTL_HOME != "" ]; then
+ if [ -d $PGXC_CTL_HOME ]; then
+ pgxc_ctl_home=$PGXC_CTL_HOME
+ else
+ eecho "set_home: env PGXC_CTL_HOME($PGXC_CTL_HOME) is not a directory."
+ return 1;
+ fi
+ fi
+ cd $pgxc_ctl_home;
+}
+
+###############################################################################
+#
+# EXECUTING SECTION
+#
+###############################################################################
+
+#=======================================================
+# Things to be done at first
+#=======================================================
+
+# Handle options
+progname=$0
+moretodo=y
+cmd_with_log=null
+#set_home
+if [ -f $pgxc_ctl_home/.pgxc_ctl_rc ]; then
+ source $pgxc_ctl_home/.pgxc_ctl_rc
+fi
+
+configFile=""
+
+while [ $moretodo == y ]; do
+ if [ $# -gt 0 ]; then
+ case $1 in
+ -v )
+ shift;
+ verbose=y;
+ continue;;
+ --verbose )
+ shift;
+ verbose=y;
+ continue;;
+ --silent )
+ verbose=n;
+ continue;;
+ -d ) # debug option
+ shift;
+ DEBUG=y;
+ continue;;
+ --debug )
+ shift;
+ DEBUG=y;
+ continue;;
+ -c ) # Configuraton file
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no -c option value found
+ exit 1
+ else
+ configFile=$1
+ shift
+ fi;
+ continue;;
+ --configuration ) # Configuraion file
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no --configuration option value found
+ exit 1
+ else
+ configFile=$1
+ shift
+ fi;
+ continue;;
+ --home ) # PGXC_CTL_HOME
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no pgxc_ctl_home specified
+ exit 1
+ else
+ pgxc_ctl_home=$1
+ cd $pgxc_ctl_home
+ shift
+ fi;
+ continue;;
+ --signature ) # Check signature
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: Signature does not match
+ exit 1
+ fi
+ if [ "$1" != "$signature" ]; then
+ echo ERROR: Signature does not match
+ exit 1
+ fi
+ shift
+ continue;;
+ * )
+ moretodo=n
+ continue;;
+ esac
+ else
+ moretodo=n
+ fi
+done
+
+echo $signature
+# Read configuration file --> Should be activated only when debug option is off
+
+if [ -f $pgxc_ctl_home/pgxc_ctl_rc ]; then
+ source $pgxc_ctl_home/pgxc_ctl_rc
+fi
+
+if [ "$configFile" != "" ] && [ -f "$configFile" ]; then
+ source $configFile
+fi
+# Log option can be overriden by command-line option
+
+print_values
+
+
diff --git a/contrib/pgxc_ctl/pgxc_ctl_conf_part b/contrib/pgxc_ctl/pgxc_ctl_conf_part
new file mode 100755
index 0000000000..11431ae2da
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_conf_part
@@ -0,0 +1,318 @@
+#!/bin/bash
+#
+# Postgres-XC Configuration file for pgxc_ctl utility.
+#
+# Configuration file can be specified as -c option from pgxc_ctl command. Default is
+# $PGXC_CTL_HOME/pgxc_ctl.org.
+#
+# This is bash script so you can make any addition for your convenience to configure
+# your Postgres-XC cluster.
+#
+# Please understand that pgxc_ctl provides only a subset of configuration which pgxc_ctl
+# provide. Here's several several assumptions/restrictions pgxc_ctl depends on.
+#
+# 1) All the resources of pgxc nodes has to be owned by the same user. Same user means
+# user with the same user name. User ID may be different from server to server.
+# This must be specified as a variable $pgxcOwner.
+#
+# 2) All the servers must be reacheable via ssh without password. It is highly recommended
+# to setup key-based authentication among all the servers.
+#
+# 3) All the databases in coordinator/datanode has at least one same superuser. Pgxc_ctl
+# uses this user to connect to coordinators and datanodes. Again, no password should
+# be used to connect. You have many options to do this, pg_hba.conf, pg_ident.conf and
+# others. Pgxc_ctl provides a way to configure pg_hba.conf but not pg_ident.conf. This
+# will be implemented in the later releases.
+#
+# 4) Gtm master and slave can have different port to listen, while coordinator and datanode
+# slave should be assigned the same port number as master.
+#
+# 5) Port nuber of a coordinator slave must be the same as its master.
+#
+# 6) Master and slave are connected using synchronous replication. Asynchronous replication
+# have slight (almost none) chance to bring total cluster into inconsistent state.
+# This chance is very low and may be negligible. Support of asynchronous replication
+# may be supported in the later release.
+#
+# 7) Each coordinator and datanode can have only one slave each. Cascaded replication and
+# multiple slave are not supported in the current pgxc_ctl.
+#
+# 8) Killing nodes may end up with IPC resource leak, such as semafor and shared memory.
+# Only listening port (socket) will be cleaned with clean command.
+#
+# 9) Backup and restore are not supported in pgxc_ctl at present. This is a big task and
+# may need considerable resource.
+#
+#========================================================================================
+#
+#
+# pgxcInstallDir variable is needed if you invoke "deploy" command from pgxc_ctl utility.
+# If don't you don't need this variable.
+pgxcInstallDir=$HOME/pgxc
+#---- OVERALL -----------------------------------------------------------------------------
+#
+pgxcOwner=koichi # owner of the Postgres-XC databaseo cluster. Here, we use this
+ # both as linus user and database user. This must be
+ # the super user of each coordinator and datanode.
+pgxcUser=$pgxcOwner # OS user of Postgres-XC owner
+
+tmpDir=/tmp # temporary dir used in XC servers
+localTmpDir=$tmpDir # temporary dir used here locally
+
+configBackup=n # If you want config file backup, specify y to this value.
+configBackupHost=pgxc-linker # host to backup config file
+configBackupDir=$HOME/pgxc # Backup directory
+configBackupFile=pgxc_ctl.bak # Backup file name --> Need to synchronize when original changed.
+
+#---- GTM ------------------------------------------------------------------------------------
+
+# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.
+# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update
+# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command
+# will not stop the current GTM. It is up to the operator.
+
+#---- Overall -------
+gtmName=gtm
+
+#---- GTM Master -----------------------------------------------
+
+#---- Overall ----
+gtmMasterServer=node13
+gtmMasterPort=20001
+gtmMasterDir=$HOME/pgxc/nodes/gtm
+
+#---- Configuration ---
+gtmExtraConfig=none # Will be added gtm.conf for both Master and Slave (done at initilization only)
+gtmMasterSpecificExtraConfig=none # Will be added to Master's gtm.conf (done at initialization only)
+
+#---- GTM Slave -----------------------------------------------
+
+# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave
+# for backup.
+
+#---- Overall ------
+gtmSlave=y # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and
+ # all the following variables will be reset.
+gtmSlaveServer=node12 # value none means GTM slave is not available. Give none if you don't configure GTM Slave.
+gtmSlavePort=20001 # Not used if you don't configure GTM slave.
+gtmSlaveDir=$HOME/pgxc/nodes/gtm # Not used if you don't configure GTM slave.
+# Please note that when you have GTM failover, then there will be no slave available until you configure the slave
+# again. (pgxc_add_gtm_slave function will handle it)
+
+#---- Configuration ----
+gtmSlaveSpecificExtraConfig=none # Will be added to Slave's gtm.conf (done at initialization only)
+
+#---- GTM Proxy -------------------------------------------------------------------------------------------------------
+# GTM proxy will be selected based upon which server each component runs on.
+# When fails over to the slave, the slave inherits its master's gtm proxy. It should be
+# reconfigured based upon the new location.
+#
+# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart
+#
+# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects
+# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.
+
+#---- Shortcuts ------
+gtmProxyDir=$HOME/pgxc/nodes/gtm_pxy
+
+#---- Overall -------
+gtmProxy=y # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies
+ # only when you dont' configure GTM slaves.
+ # If you specify this value not to y, the following parameters will be set to default empty values.
+ # If we find there're no valid Proxy server names (means, every servers are specified
+ # as none), then gtmProxy value will be set to "n" and all the entries will be set to
+ # empty values.
+gtmProxyNames=(gtm_pxy1 gtm_pxy2 gtm_pxy3 gtm_pxy4) # No used if it is not configured
+gtmProxyServers=(node06 node07 node08 node09) # Specify none if you dont' configure it.
+gtmProxyPorts=(20001 20001 20001 20001) # Not used if it is not configured.
+gtmProxyDirs=($gtmProxyDir $gtmProxyDir $gtmProxyDir $gtmProxyDir) # Not used if it is not configured.
+
+#---- Configuration ----
+gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy. Coordinator section has an example.
+gtmPxySpecificExtraConfig=(none none none none)
+
+#---- Coordinators ----------------------------------------------------------------------------------------------------
+
+#---- shortcuts ----------
+coordMasterDir=$HOME/pgxc/nodes/coord
+coordSlaveDir=$HOME/pgxc/nodes/coord_slave
+coordArchLogDir=$HOME/pgxc/nodes/coord_archlog
+
+#---- Overall ------------
+coordNames=(coord1 coord2 coord3 coord4) # Master and slave use the same name
+coordPorts=(20004 20005 20004 20005) # Master and slave use the same port
+poolerPorts=(20010 20011 20010 20011) # Master and slave use the same pooler port
+coordPgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This entry allows only $pgxcOwner to connect.
+ # If you'd like to setup another connection, you should
+ # supply these entries through files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using coordExtraPgHba
+# and/or coordSpecificExtraPgHba variables.
+
+#---- Master -------------
+coordMasterServers=(node06 node07 node08 node09) # none means this master is not available
+coordMasterDirs=($coordMasterDir $coordMasterDir $coordMasterDir $coordMasterDir)
+coordMaxWALsernder=5 # max_wal_senders: needed to configure slave. If zero value is specified,
+ # it is expected to supply this parameter explicitly by external files
+ # specified in the following. If you don't configure slaves, leave this value to zero.
+coordMaxWALSenders=($coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder)
+ # max_wal_senders configuration for each coordinator.
+
+#---- Slave -------------
+coordSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then coordSlave value will be set to n and all the following values will be set to
+ # empty values.
+coordSlaveSync=y # Specify to connect with synchronized mode.
+coordSlaveServers=(node07 node08 node09 node06) # none means this slave is not available
+coordSlaveDirs=($coordSlaveDir $coordSlaveDir $coordSlaveDir $coordSlaveDir)
+coordArchLogDirs=($coordArchLogDir $coordArchLogDir $coordArchLogDir $coordArchLogDir)
+
+#---- Configuration files---
+# Need these when you'd like setup specific non-default configuration
+# These files will go to corresponding files for the master.
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries
+# Or you may supply these files manually.
+coordExtraConfig=coordExtraConfig # Extra configuration file for coordinators.
+ # This file will be added to all the coordinators'
+ # postgresql.conf
+# Pleae note that the following sets up minimum parameters which you may want to change.
+# You can put your postgresql.conf lines here.
+cat > $coordExtraConfig <<EOF
+#================================================
+# Added to all the coordinator postgresql.conf
+# Original: $coordExtraConfig
+log_destination = 'stderr'
+logging_collector = on
+log_directory = 'pg_log'
+listen_addresses = '*'
+max_connections = 100
+EOF
+
+# Additional Configuration file for specific coordinator master.
+# You can define each setting by similar means as above.
+coordSpecificExtraConfig=(none none none none)
+coordExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the coordinators' pg_hba.conf
+coordSpecificExtraPgHba=(none none none none)
+
+#----- Additional Slaves -----
+#
+# Please note that this section is just a suggestion how we extend the configuration for
+# multiple and cascaded replication. They're not used in the current version.
+#
+coordAdditionalSlaves=n # Additional slave can be specified as follows: where you
+coordAdditionalSlaveSet=(cad1) # Each specifies set of slaves. This case, two set of slaves are
+ # configured
+cad1_Sync=n # All the slaves at "cad1" are connected with asynchronous mode.
+ # If not, specify "y"
+ # The following lines specifies detailed configuration for each
+ # slave tag, cad1. You can define cad2 similarly.
+cad1_Servers=(node08 node09 node06 node07) # Hosts
+cad1_dir=$HOME/pgxc/nodes/coord_slave_cad1
+cad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)
+cad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1
+cad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)
+
+
+#---- Datanodes -------------------------------------------------------------------------------------------------------
+
+#---- Shortcuts --------------
+datanodeMasterDir=$HOME/pgxc/nodes/dn_master
+datanodeSlaveDir=$HOME/pgxc/nodes/dn_slave
+datanodeArchLogDir=$HOME/pgxc/nodes/datanode_archlog
+
+#---- Overall ---------------
+#primaryDatanode=datanode1 # Primary Node.
+# At present, xc has a priblem to issue ALTER NODE against the primay node. Until it is fixed, the test will be done
+# without this feature.
+primaryDatanode=datanode1 # Primary Node.
+datanodeNames=(datanode1 datanode2 datanode3 datanode4)
+datanodePorts=(20008 20009 20008 20009) # Master and slave use the same port!
+datanodePoolerPorts=(20012 20013 20012 20013) # Master and slave use the same port!
+datanodePgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This list sets up pg_hba.conf for $pgxcOwner user.
+ # If you'd like to setup other entries, supply them
+ # through extra configuration files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using datanodeExtraPgHba
+# and/or datanodeSpecificExtraPgHba variables.
+
+#---- Master ----------------
+datanodeMasterServers=(node06 node07 node08 node09) # none means this master is not available.
+ # This means that there should be the master but is down.
+ # The cluster is not operational until the master is
+ # recovered and ready to run.
+datanodeMasterDirs=($datanodeMasterDir $datanodeMasterDir $datanodeMasterDir $datanodeMasterDir)
+datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is
+ # specified, it is expected this parameter is explicitly supplied
+ # by external configuration files.
+ # If you don't configure slaves, leave this value zero.
+datanodeMaxWALSenders=($datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender)
+ # max_wal_senders configuration for each datanode
+
+#---- Slave -----------------
+datanodeSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then datanodeSlave value will be set to n and all the following values will be set to
+ # empty values.
+datanodeSlaveServers=(node07 node08 node09 node06) # value none means this slave is not available
+datanodeSlaveSync=y # If datanode slave is connected in synchronized mode
+datanodeSlaveDirs=($datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir)
+datanodeArchLogDirs=( $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir )
+
+# ---- Configuration files ---
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.
+# These files will go to corresponding files for the master.
+# Or you may supply these files manually.
+datanodeExtraConfig=none # Extra configuration file for datanodes. This file will be added to all the
+ # datanodes' postgresql.conf
+datanodeSpecificExtraConfig=(none none none none)
+datanodeExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the datanodes' postgresql.conf
+datanodeSpecificExtraPgHba=(none none none none)
+
+#----- Additional Slaves -----
+datanodeAdditionalSlaves=n # Additional slave can be specified as follows: where you
+# datanodeAdditionalSlaveSet=(dad1 dad2) # Each specifies set of slaves. This case, two set of slaves are
+ # configured
+# dad1_Sync=n # All the slaves at "cad1" are connected with asynchronous mode.
+ # If not, specify "y"
+ # The following lines specifies detailed configuration for each
+ # slave tag, cad1. You can define cad2 similarly.
+# dad1_Servers=(node08 node09 node06 node07) # Hosts
+# dad1_dir=$HOME/pgxc/nodes/coord_slave_cad1
+# dad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)
+# dad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1
+# dad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)
+
+#---- WAL archives -------------------------------------------------------------------------------------------------
+walArchive=n # If you'd like to configure WAL archive, edit this section.
+ # Pgxc_ctl assumes that if you configure WAL archive, you configure it
+ # for all the coordinators and datanodes.
+ # Default is "no". Please specify "y" here to turn it on.
+#
+# End of Configuration Section
+#
+#==========================================================================================================================
+
+#========================================================================================================================
+# The following is for extension. Just demonstrate how to write such extension. There's no code
+# which takes care of them so please ignore the following lines. They are simply ignored by pgxc_ctl.
+# No side effects.
+#=============<< Beginning of future extension demonistration >> ========================================================
+# You can setup more than one backup set for various purposes, such as disaster recovery.
+walArchiveSet=(war1 war2)
+war1_source=(master) # you can specify master, slave or ano other additional slaves as a source of WAL archive.
+ # Default is the master
+wal1_source=(slave)
+wal1_source=(additiona_coordinator_slave_set additional_datanode_slave_set)
+war1_host=node10 # All the nodes are backed up at the same host for a given archive set
+war1_backupdir=$HOME/pgxc/backup_war1
+wal2_source=(master)
+war2_host=node11
+war2_backupdir=$HOME/pgxc/backup_war2
+#=============<< End of future extension demonistration >> ========================================================
diff --git a/contrib/pgxc_ctl/pgxc_ctl_log.c b/contrib/pgxc_ctl/pgxc_ctl_log.c
new file mode 100644
index 0000000000..8934dbfa3f
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_log.c
@@ -0,0 +1,333 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgxc_ctl_log.c
+ *
+ * Logging module of Postgres-XC configuration and operation tool.
+ *
+ *
+ * Portions Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * To allow mutiple pgxc_ctl to run in parallel and write a log to the same file,
+ * this module uses fctl to lock log I/O. You can lock/unlock in stack. Anyway
+ * actual lock will be captured/released at the bottom level of this stack.
+ * If you'd like to have a block of the logs to be in a single block, not interrupted
+ * bo other pgxc_ctl log, you should be careful to acquire the lock and release it
+ * reasonablly.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+
+#include "pgxc_ctl.h"
+#include "pgxc_ctl_log.h"
+#include "varnames.h"
+#include "variables.h"
+#include "config.h"
+#include "utils.h"
+
+FILE *logFile = NULL;
+char logFileName[MAXPATH+1];
+static char *pgxcCtlGetTime(void);
+static int lockStack = 0;
+#define lockStackLimit 8
+
+int logMsgLevel = INFO;
+int printMsgLevel = WARNING;
+int printLocation = FALSE;
+int logLocation = FALSE;
+
+
+/*
+ * Path is NULL if name is effective.
+ * Path is valid if name is NULL
+ */
+static void set_msgLogLevel(void)
+{
+ if (sval(VAR_logMessage) == NULL)
+ logMsgLevel = WARNING;
+ else if (strcasecmp(sval(VAR_logMessage), "panic") == 0)
+ logMsgLevel = PANIC;
+ else if (strcasecmp(sval(VAR_logMessage), "error") == 0)
+ logMsgLevel = ERROR;
+ else if (strcasecmp(sval(VAR_logMessage), "warning") == 0)
+ logMsgLevel = WARNING;
+ else if (strcasecmp(sval(VAR_logMessage), "notice") == 0)
+ logMsgLevel = NOTICE;
+ else if (strcasecmp(sval(VAR_logMessage), "info") == 0)
+ logMsgLevel = INFO;
+ else if (strcasecmp(sval(VAR_logMessage), "debug1") == 0)
+ logMsgLevel = DEBUG1;
+ else if (strcasecmp(sval(VAR_logMessage), "debug2") == 0)
+ logMsgLevel = DEBUG2;
+ else if (strcasecmp(sval(VAR_logMessage), "debug3") == 0)
+ logMsgLevel = DEBUG3;
+ else
+ logMsgLevel = INFO;
+}
+
+static void set_printLogLevel(void)
+{
+ if (sval(VAR_printMessage) == NULL)
+ printMsgLevel = ERROR;
+ else if (strcasecmp(sval(VAR_printMessage), "panic") == 0)
+ printMsgLevel = PANIC;
+ else if (strcasecmp(sval(VAR_printMessage), "error") == 0)
+ printMsgLevel = ERROR;
+ else if (strcasecmp(sval(VAR_printMessage), "warning") == 0)
+ printMsgLevel = WARNING;
+ else if (strcasecmp(sval(VAR_printMessage), "notice") == 0)
+ printMsgLevel = NOTICE;
+ else if (strcasecmp(sval(VAR_printMessage), "info") == 0)
+ printMsgLevel = INFO;
+ else if (strcasecmp(sval(VAR_printMessage), "debug1") == 0)
+ printMsgLevel = DEBUG1;
+ else if (strcasecmp(sval(VAR_printMessage), "debug2") == 0)
+ printMsgLevel = DEBUG2;
+ else if (strcasecmp(sval(VAR_printMessage), "debug3") == 0)
+ printMsgLevel = DEBUG3;
+ else
+ printMsgLevel = WARNING;
+}
+
+void initLog(char *path, char *name)
+{
+ if(logFile)
+ return;
+ if(name)
+ strncat(logFileName, name, MAXPATH);
+ else
+ snprintf(logFileName, MAXPATH, "%s/%d_pgxc_ctl.log", path, getpid());
+ if ((logFile = fopen(logFileName, "a")) == NULL)
+ fprintf(stderr, "Could not open log file %s, %s\n", logFileName, strerror(errno));
+ /* Setup log/print message level */
+ set_msgLogLevel();
+ set_printLogLevel();
+ printLocation = (isVarYes(VAR_printLocation)) ? TRUE : FALSE;
+ logLocation = (isVarYes(VAR_logLocation)) ? TRUE : FALSE;
+ lockStack = 0;
+}
+
+void closeLog()
+{
+ fclose(logFile);
+ logFile = NULL;
+}
+
+static char *fname;
+static char *funcname;
+static int lineno;
+
+void elog_start(const char *file, const char *func, int line)
+{
+ fname = Strdup(file);
+ funcname = Strdup(func);
+ lineno = line;
+}
+
+static void clean_location(void)
+{
+ freeAndReset(fname);
+ freeAndReset(funcname);
+ lineno = -1;
+}
+
+
+static void elogMsgRaw0(int level, const char *msg, int flag)
+{
+ if (logFile && level >= logMsgLevel)
+ {
+ if (logLocation && flag)
+ fprintf(logFile, "%s(%d):%s %s:%s(%d) %s", progname, getpid(), pgxcCtlGetTime(),
+ fname, funcname, lineno, msg);
+ else
+ fprintf(logFile, "%s(%d):%s %s", progname, getpid(), pgxcCtlGetTime(), msg);
+ fflush(logFile);
+ }
+ if (level >= printMsgLevel)
+ {
+ if (printLocation && flag)
+ fprintf(((outF) ? outF : stderr), "%s:%s(%d) %s", fname, funcname, lineno, msg);
+ else
+ fputs(msg, (outF) ? outF : stderr);
+ fflush((outF) ? outF : stderr);
+ }
+ clean_location();
+}
+
+void elogMsgRaw(int level, const char *msg)
+{
+ lockLogFile();
+ elogMsgRaw0(level, msg, TRUE);
+ unlockLogFile();
+}
+
+void elogFinish(int level, const char *fmt, ...)
+{
+ char msg[MAXLINE+1];
+ va_list arg;
+
+ lockLogFile();
+ if ((level >= logMsgLevel) || (level >= printMsgLevel))
+ {
+ va_start(arg, fmt);
+ vsnprintf(msg, MAXLINE, fmt, arg);
+ va_end(arg);
+ elogMsgRaw(level, msg);
+ }
+ unlockLogFile();
+}
+
+void elogFileRaw(int level, char *path)
+{
+ FILE *f;
+ char s[MAXLINE+1];
+
+ lockLogFile();
+ if ((f = fopen(path, "r")))
+ {
+ while(fgets(s, MAXLINE, f))
+ elogMsgRaw0(level, s, FALSE);
+ fclose(f);
+ }
+ else
+ elog(ERROR, "ERROR: Cannot open \"%s\" for read, %s\n", path, strerror(errno));
+ unlockLogFile();
+}
+
+static char timebuf[MAXTOKEN+1];
+
+/*
+ * Please note that this routine is not reentrant
+ */
+static char *pgxcCtlGetTime(void)
+{
+ struct tm *tm_s;
+ time_t now;
+
+ now = time(NULL);
+ tm_s = localtime(&now);
+/* tm_s = gmtime(&now); */
+
+ snprintf(timebuf, MAXTOKEN, "%02d%02d%02d%02d%02d_%02d",
+ ((tm_s->tm_year+1900) >= 2000) ? (tm_s->tm_year + (1900 - 2000)) : tm_s->tm_year,
+ tm_s->tm_mon+1, tm_s->tm_mday, tm_s->tm_hour, tm_s->tm_min, tm_s->tm_sec);
+ return timebuf;
+}
+
+void writeLogRaw(const char *fmt, ...)
+{
+ char msg[MAXLINE+1];
+ va_list arg;
+
+ va_start(arg, fmt);
+ vsnprintf(msg, MAXLINE, fmt, arg);
+ va_end(arg);
+ if (logFile)
+ {
+ lockLogFile();
+ fprintf(logFile, "%s(%d):%s %s", progname, getpid(), pgxcCtlGetTime(), msg);
+ fflush(logFile);
+ unlockLogFile();
+ }
+ fputs(msg, logFile ? logFile : stderr);
+ fflush(outF ? outF : stderr);
+}
+
+void writeLogOnly(const char *fmt, ...)
+{
+ char msg[MAXLINE+1];
+ va_list arg;
+
+ if (logFile)
+ {
+ va_start(arg, fmt);
+ vsnprintf(msg, MAXLINE, fmt, arg);
+ va_end(arg);
+ lockLogFile();
+ fprintf(logFile, "%s(%d):%s %s", progname, getpid(), pgxcCtlGetTime(), msg);
+ fflush(logFile);
+ unlockLogFile();
+ }
+}
+
+int setLogMsgLevel(int newLevel)
+{
+ int rc;
+
+ rc = logMsgLevel;
+ logMsgLevel = newLevel;
+ return rc;
+}
+
+int getLogMsgLevel(void)
+{
+ return logMsgLevel;
+}
+
+int setPrintMsgLevel(int newLevel)
+{
+ int rc;
+
+ rc = printMsgLevel;
+ printMsgLevel = newLevel;
+ return rc;
+}
+
+int getPrintMsgLevel(void)
+{
+ return printMsgLevel;
+}
+
+void lockLogFile(void)
+{
+ struct flock lock1;
+
+ if (logFile == NULL)
+ return;
+ if (lockStack > lockStackLimit)
+ {
+ fprintf(stderr, "Log file lock stack exceeded the limit %d. Something must be wrong.\n", lockStackLimit);
+ return;
+ }
+ if (lockStack == 0)
+ {
+ lock1.l_type = F_WRLCK;
+ lock1.l_start = 0;
+ lock1.l_len = 0;
+ lock1.l_whence = SEEK_SET;
+ fcntl(fileno(logFile), F_SETLKW, &lock1);
+ }
+ lockStack++;
+}
+
+
+void unlockLogFile(void)
+{
+ struct flock lock1;
+
+ if (logFile == NULL)
+ return;
+ lockStack--;
+ if (lockStack < 0)
+ {
+ fprintf(stderr, "Log file stack is below zero. Something must be wrong.\n");
+ return;
+ }
+ if (lockStack == 0)
+ {
+ lock1.l_type = F_UNLCK;
+ lock1.l_start = 0;
+ lock1.l_len = 0;
+ lock1.l_whence = SEEK_SET;
+ fcntl(fileno(logFile), F_SETLKW, &lock1);
+ }
+}
diff --git a/contrib/pgxc_ctl/pgxc_ctl_log.h b/contrib/pgxc_ctl/pgxc_ctl_log.h
new file mode 100644
index 0000000000..790b258a1c
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_log.h
@@ -0,0 +1,63 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgxc_ctl_log.h
+ *
+ * Logging module of Postgres-XC configuration and operation tool.
+ *
+ * Portions Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef LOG_H
+#define LOG_H
+#include "pgxc_ctl.h"
+
+#define MAXMSG 4096
+
+/* Control verbosity */
+
+#define DEBUG3 10
+#define DEBUG2 11
+#define DEBUG1 12
+#define INFO 13 /* Default for logMsgLevel */
+#define NOTICE2 14
+#define NOTICE 15 /* Default for printMsgLevel */
+#define WARNING 16
+#define ERROR 17
+#define PANIC 18
+#define MANDATORY 19
+
+extern FILE *logFile;
+extern void elog_start(const char *file, const char *func, int line);
+extern void elogFinish(int level, const char *fmt,...) __attribute__((format(printf, 2, 3)));
+extern void elogMsgRaw(int level, const char *msg);
+extern void elogFileRaw(int level, char *fn);
+extern void initLog(char *path, char *name);
+extern void closeLog(void);
+extern void writeLogRaw(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+extern void writeLogOnly(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+extern int setLogMsgLevel(int newLevel);
+extern int getLogMsgLevel(void);
+extern int setPrintMsgLevel(int newLevel);
+extern int getPrintMsgLevel(void);
+extern void lockLogFile(void);
+extern void unlockLogFile(void);
+
+#define elog elog_start(__FILE__, __FUNCTION__, __LINE__), elogFinish
+#define elogMsg elog_start(__FILE__, __FUNCTION__, __LINE__), elogMsgRaw
+#define elogFile elog_start(__FILE__, __FUNCTION__, __LINE__), elogFileRaw
+/*
+#define elog elogFinish
+#define elogMsg elogMsgRaw
+#define elogFile elogFileRaw
+*/
+
+extern char logFileName[MAXPATH+1];
+
+
+extern int logMsgLevel;
+extern int printMsgLevel;
+extern int printLocation;
+extern int logLocation;
+
+#endif /* LOG_H */
diff --git a/contrib/pgxc_ctl/signature.h b/contrib/pgxc_ctl/signature.h
new file mode 100644
index 0000000000..3998195cc4
--- /dev/null
+++ b/contrib/pgxc_ctl/signature.h
@@ -0,0 +1,15 @@
+/*-------------------------------------------------------------------------
+ *
+ * signature.h
+ *
+ * Signature of module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef SIGNATURE_H
+#define SIGNATURE_H
+/* Signature file to identify the make */
+#define signature "140308_0826_521531776"
+#endif /* SIGNATURE_H */
diff --git a/contrib/pgxc_ctl/utils.c b/contrib/pgxc_ctl/utils.c
new file mode 100644
index 0000000000..9d691f2ea9
--- /dev/null
+++ b/contrib/pgxc_ctl/utils.c
@@ -0,0 +1,381 @@
+/*-------------------------------------------------------------------------
+ *
+ * utils.c
+ *
+ * Utility module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * Variable useful tools/small routines.
+ */
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+#include <stdio.h>
+
+#include "../../src/interfaces/libpq/libpq-fe.h"
+#include "utils.h"
+#include "pgxc_ctl.h"
+#include "pgxc_ctl_log.h"
+#include "do_shell.h"
+#include "config.h"
+#include "variables.h"
+#include "varnames.h"
+
+static int Malloc_ed = 0;
+static int Strdup_ed = 0;
+static int Freed = 0;
+
+void *Malloc(size_t size)
+{
+ void *rv = malloc(size);
+
+ Malloc_ed++;
+ if (rv == NULL)
+ {
+ elog(PANIC, "PANIC: No more memory. See core file for details.\n");
+ abort();
+ }
+ return(rv);
+}
+
+char **addToList(char **List, char *val)
+{
+ char **rv;
+ int ii;
+
+ for (ii = 0; List[ii]; ii++);
+ rv = Realloc(List, sizeof(char *) * ii);
+ rv[ii - 1] = NULL;
+ return rv;
+}
+
+void *Malloc0(size_t size)
+{
+ void *rv = malloc(size);
+
+ Malloc_ed++;
+ if (rv == NULL)
+ {
+ elog(PANIC, "PANIC: No more memory. See core file for details.\n");
+ abort();
+ }
+ memset(rv, 0, size);
+ return(rv);
+}
+
+void *Realloc(void *ptr, size_t size)
+{
+ void *rv = realloc(ptr, size);
+
+ if (rv == NULL)
+ {
+ elog(PANIC, "PANIC: No more memory. See core file for details.\n");
+ abort();
+ }
+ return(rv);
+}
+
+void Free(void *ptr)
+{
+ Freed++;
+ if (ptr)
+ free(ptr);
+}
+
+/*
+ * If flag is TRUE and chdir fails, then exit(1)
+ */
+int Chdir(char *path, int flag)
+{
+ if (chdir(path))
+ {
+ elog(ERROR, "ERROR: Could not change work directory to \"%s\". %s%s\n",
+ path,
+ flag == TRUE ? "Exiting. " : "",
+ strerror(errno));
+ if (flag == TRUE)
+ exit(1);
+ else
+ return -1;
+ }
+ return 0;
+}
+
+FILE *Fopen(char *path, char *mode)
+{
+ FILE *rv;
+
+ if ((rv = fopen(path, mode)) == NULL)
+ elog(ERROR, "ERROR: Could not open the file \"%s\" in \"%s\", %s\n", path, mode, strerror(errno));
+ return(rv);
+}
+
+
+char *Strdup(const char *s)
+{
+ char *rv;
+
+ Strdup_ed++;
+ rv = strdup(s);
+ if (rv == NULL)
+ {
+ elog(PANIC, "PANIC: No more memory. See core file for details.\n");
+ abort();
+ }
+ return(rv);
+}
+
+void appendFiles(FILE *f, char **fileList)
+{
+ FILE *src;
+ int ii;
+ char buf[MAXLINE+1];
+
+ if (fileList)
+ for (ii = 0; fileList[ii]; ii++)
+ {
+ if (!is_none(fileList[ii]))
+ {
+ if ((src = fopen(fileList[ii], "r")) == 0)
+ {
+ elog(ERROR, "ERROR: could not open file %s for read, %s\n", fileList[ii], strerror(errno));
+ continue;
+ }
+ while (fgets(buf, MAXLINE, src))
+ fputs(buf, f);
+ fclose(src);
+ }
+ }
+}
+
+FILE *prepareLocalStdin(char *buf, int len, char **fileList)
+{
+ FILE *f;
+ if ((f = fopen(createLocalFileName(STDIN, buf, len), "w")) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open file %s for write, %s\n", buf, strerror(errno));
+ return(NULL);
+ }
+ appendFiles(f, fileList);
+ return(f);
+}
+
+char *timeStampString(char *buf, int len)
+{
+ time_t nowTime;
+ struct tm nowTm;
+
+ nowTime = time(NULL);
+ localtime_r(&nowTime, &nowTm);
+
+ snprintf(buf, len, "%04d%02d%02d_%02d:%02d:%02d",
+ nowTm.tm_year+1900, nowTm.tm_mon+1, nowTm.tm_mday,
+ nowTm.tm_hour, nowTm.tm_min, nowTm.tm_sec);
+ return(buf);
+}
+
+char **makeActualNodeList(char **nodeList)
+{
+ char **actualNodeList;
+ int ii, jj;
+
+ for (ii = 0, jj = 0; nodeList[ii]; ii++)
+ {
+ if (!is_none(nodeList[ii]))
+ jj++;
+ }
+ actualNodeList = Malloc0(sizeof(char *) * (jj + 1));
+ for (ii = 0, jj = 0; nodeList[ii]; ii++)
+ {
+ if (!is_none(nodeList[ii]))
+ {
+ actualNodeList[jj] = Strdup(nodeList[ii]);
+ jj++;
+ }
+ }
+ return actualNodeList;
+}
+
+int gtmProxyIdx(char *gtmProxyName)
+{
+ int ii;
+
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ {
+ if (strcmp(aval(VAR_gtmProxyNames)[ii], gtmProxyName) == 0)
+ return ii;
+ }
+ return -1;
+}
+
+int coordIdx(char *coordName)
+{
+ int ii;
+
+ if (is_none(coordName))
+ return -1;
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (strcmp(aval(VAR_coordNames)[ii], coordName) == 0)
+ return ii;
+ }
+ return -1;
+}
+
+int datanodeIdx(char *datanodeName)
+{
+ int ii;
+
+ if (is_none(datanodeName))
+ return -1;
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (strcmp(aval(VAR_datanodeNames)[ii], datanodeName) == 0)
+ return ii;
+ }
+ return -1;
+}
+
+int getEffectiveGtmProxyIdxFromServerName(char *serverName)
+{
+ int ii;
+
+ if (serverName == NULL)
+ return (-1);
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ {
+ if (strcmp(aval(VAR_gtmProxyServers)[ii], serverName) == 0)
+ return ii;
+ }
+ return -1;
+}
+
+
+
+/*
+ * Please note that this function deeply depend upon
+ * the environment.
+ *
+ * It works find with CentOS/Ubuntu/ReadHat Linux but
+ * may need another tweak for other operation systems
+ * such as Solaris, FreeBSD, MacOS.
+ */
+pid_t get_prog_pid(char *host, char *progname, char *dir)
+{
+ char cmd[MAXLINE+1];
+ char pid_s[MAXLINE+1];
+ int ii;
+ FILE *wkf;
+ char *token;
+ char *line;
+
+ snprintf(cmd, MAXLINE,
+ "ssh %s@%s "
+ "\"ps -f -C %s | grep %s\"",
+ sval(VAR_pgxcUser), host, progname, dir);
+ wkf = popen(cmd, "r");
+ if (wkf == NULL)
+ {
+ elog(ERROR, "ERROR: cannot obtain pid value of the remote postmaster, host \"%s\" dir \"%s\", %s\n",
+ host, dir, strerror(errno));
+ return(-1);
+ }
+ fgets(pid_s, MAXLINE, wkf);
+ fclose(wkf);
+ /* Get the second token */
+ line = pid_s;
+ if ((line = get_word(line, &token)) == NULL)
+ return 0;
+ get_word(line, &token);
+ if (token == NULL)
+ return 0;
+ for (ii = 0; token[ii]; ii++)
+ if (token[ii] < '0' || token[ii] > '9')
+ return 0;
+ return(atoi(token));
+}
+
+int pingNode(char *host, char *port)
+{
+ PGPing status;
+ char conninfo[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ conninfo[0] = 0;
+ if (host)
+ {
+ snprintf(editBuf, MAXPATH, "host = '%s' ", host);
+ strncat(conninfo, editBuf, MAXLINE);
+ }
+ if (port)
+ {
+ snprintf(editBuf, MAXPATH, "port = %d ", atoi(port));
+ strncat(conninfo, editBuf, MAXLINE);
+ }
+ if (conninfo[0])
+ {
+ status = PQping(conninfo);
+ if (status == PQPING_OK)
+ return 0;
+ else
+ return 1;
+ }
+ else
+ return -1;
+}
+
+void trimNl(char *s)
+{
+ for (;*s && *s != '\n'; s++);
+ *s = 0;
+}
+
+char *getChPidList(char *host, pid_t ppid)
+{
+ FILE *wkf;
+ char cmd[MAXLINE+1];
+ char line[MAXLINE+1];
+ char *rv = Malloc(MAXLINE+1);
+
+ rv[0] = 0;
+ snprintf(cmd, MAXLINE, "ssh %s@%s pgrep -P %d",
+ sval(VAR_pgxcUser), host, ppid);
+ wkf = popen(cmd, "r");
+ if (wkf == NULL)
+ return NULL;
+ while (fgets(line, MAXLINE, wkf))
+ {
+ trimNl(line);
+ strncat(rv, line, MAXLINE);
+ strncat(rv, " ", MAXLINE);
+ }
+ return rv;
+}
+
+char *getIpAddress(char *hostName)
+{
+ char command[MAXLINE+1];
+ char *ipAddr;
+ FILE *f;
+
+ snprintf(command, MAXLINE, "ping -c1 %s | head -n 1 | sed 's/^[^(]*(\\([^)]*\\).*$/\\1/'", hostName);
+ if ((f = popen(command, "r")) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open the command, \"%s\", %s\n", command, strerror(errno));
+ return NULL;
+ }
+ ipAddr = Malloc(MAXTOKEN+1);
+ fgets(ipAddr, MAXTOKEN, f);
+ fclose(f);
+ trimNl(ipAddr);
+ return ipAddr;
+}
+
diff --git a/contrib/pgxc_ctl/utils.h b/contrib/pgxc_ctl/utils.h
new file mode 100644
index 0000000000..0f3089354f
--- /dev/null
+++ b/contrib/pgxc_ctl/utils.h
@@ -0,0 +1,48 @@
+/*-------------------------------------------------------------------------
+ *
+ * utils.h
+ *
+ * Utilty module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdlib.h>
+#include <stdio.h>
+
+extern void *Malloc(size_t size);
+extern void *Malloc0(size_t size);
+extern void *Realloc(void *ptr, size_t size);
+extern void Free(void *ptr);
+extern int Chdir(char *path, int flag);
+extern FILE *Fopen(char *path, char *mode);
+extern char *Strdup(const char *s);
+extern char **addToList(char **List, char *val);
+extern void appendFiles(FILE *f, char **fileList);
+extern FILE *prepareLocalStdin(char *buf, int len, char **fileList);
+extern char *timeStampString(char *buf, int len);
+extern char **makeActualNodeList(char **nodeList);
+extern int gtmProxyIdx(char *gtmProxyName);
+extern int coordIdx(char *coordName);
+extern int datanodeIdx(char *datanodeName);
+extern int getEffectiveGtmProxyIdxFromServerName(char *serverName);
+extern pid_t get_prog_pid(char *host, char *progname, char *dir);
+extern int pingNode(char *host, char *port);
+extern void trimNl(char *s);
+extern char *getChPidList(char *host, pid_t ppid);
+extern char *getIpAddress(char *hostName);
+
+#define get_postmaster_pid(host, dir) get_prog_pid(host, "postgres", dir)
+#define get_gtm_pid(host, dir) get_prog_pid(host, "gtm", dir)
+#define get_gtmProxy_pid(host, dir) get_prog_pid(host, "gtm_proxy", dir)
+#define freeAndReset(x) do{Free(x);(x)=NULL;}while(0)
+#define myWEXITSTATUS(rc) ((rc) & 0x000000FF)
+
+/* Printout variable in bash format */
+#define svalFormat "%s=%s\n"
+#define expandSval(name) name, sval(name)
+#define avalFormat "%s=( %s )\n"
+#define expandAval(name) name, listValue(name)
+#define fprintAval(f, name) do{fprintf(f, avalFormat, expandAval(name));}while(0)
+#define fprintSval(f, name) do{fprintf(f, svalFormat, expandSval(name));}while(0)
diff --git a/contrib/pgxc_ctl/variables.c b/contrib/pgxc_ctl/variables.c
new file mode 100644
index 0000000000..09f01e7f8f
--- /dev/null
+++ b/contrib/pgxc_ctl/variables.c
@@ -0,0 +1,453 @@
+/*-------------------------------------------------------------------------
+ *
+ * varibales.c
+ *
+ * Variable haneling module of Postgres-XC configuration and operation tool.
+ *
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdlib.h>
+#include <string.h>
+#include "variables.h"
+#include "utils.h"
+#include "pgxc_ctl_log.h"
+
+pgxc_ctl_var *var_head = NULL;
+pgxc_ctl_var *var_tail = NULL;
+
+static void clear_var(pgxc_ctl_var *var);
+/*
+ * Hash bucket size is up to 256
+ */
+static int hash_val(char *name)
+{
+ unsigned char *name_u = (unsigned char *)name;
+ unsigned char v;
+
+ for(v = 0; *name_u; name_u++)
+ v += *name_u;
+ return (v%NUM_HASH_BUCKET);
+}
+
+#define LIMIT_TO_DOUBLE 128
+#define INCR_OVER_DOUBLE 10
+static int next_size(int sz)
+{
+ if (sz <= 0)
+ return 1;
+ if (sz <= LIMIT_TO_DOUBLE)
+ return sz*2;
+ else
+ return sz + INCR_OVER_DOUBLE;
+}
+
+void init_var_hash()
+{
+ int i;
+
+ for (i = 0; i < NUM_HASH_BUCKET; i++)
+ {
+ var_hash[i].el_size = 1;
+ var_hash[i].el_used = 0;
+ var_hash[i].el = (pgxc_ctl_var **)Malloc(sizeof(pgxc_ctl_var *));
+ var_hash[i].el[0] = NULL;
+ }
+}
+
+static void remove_from_hash(pgxc_ctl_var *var)
+{
+ int hash_v = hash_val(var->varname);
+ int ii, jj;
+
+ for(ii = 0; var_hash[hash_v].el[ii]; ii++)
+ {
+ if (var_hash[hash_v].el[ii] != var)
+ continue;
+ else
+ {
+ for(jj = ii; var_hash[hash_v].el[jj]; jj++)
+ var_hash[hash_v].el[jj] = var_hash[hash_v].el[jj + 1];
+ var_hash[hash_v].el_used--;
+ return;
+ }
+ }
+ return;
+}
+
+void add_var_hash(pgxc_ctl_var *var)
+{
+ int hash_v = hash_val(var->varname);
+ if (var_hash[hash_v].el_used + 1 >= var_hash[hash_v].el_size)
+ {
+ var_hash[hash_v].el_size = next_size(var_hash[hash_v].el_size);
+ var_hash[hash_v].el = (pgxc_ctl_var **)Realloc(var_hash[hash_v].el, sizeof(pgxc_ctl_var *) * var_hash[hash_v].el_size);
+ }
+ var_hash[hash_v].el[var_hash[hash_v].el_used++] = var;
+ var_hash[hash_v].el[var_hash[hash_v].el_used] = NULL;
+}
+
+pgxc_ctl_var *new_var(char *name)
+{
+ pgxc_ctl_var *newv;
+
+ if (find_var(name))
+ {
+ elog(ERROR, "ERROR: Variable %s already defined. Check your configuration.\n", name);
+ return NULL;
+ }
+
+ newv = (pgxc_ctl_var *)Malloc(sizeof(pgxc_ctl_var));
+ if (var_head == NULL)
+ {
+ var_head = var_tail = newv;
+ newv->prev = NULL;
+ }
+ else
+ {
+ newv->prev = var_tail;
+ var_tail->next = newv;
+ var_tail = newv;
+ }
+ newv->next = NULL;
+ newv->varname = Strdup(name);
+ newv->val_size = 1;
+ newv->val_used = 0;
+ newv->val = (char **)Malloc(sizeof(char *));
+ newv->val[0] = NULL;
+ add_var_hash(newv);
+ return(newv);
+}
+
+void remove_var(pgxc_ctl_var *var)
+{
+ if ((var_head == var_tail) && (var_head == var))
+ var_head = var_tail = NULL;
+ else if (var_head == var)
+ {
+ var_head = var_head->next;
+ var_head->prev = NULL;
+ }
+ else if (var_tail == var)
+ {
+ var_tail->next = NULL;
+ var_tail = var_tail->prev;
+ }
+ else
+ {
+ var->prev->next = var->next;
+ var->next->prev = var->prev;
+ }
+ clear_var(var);
+}
+
+static void clear_var(pgxc_ctl_var *var)
+{
+ int ii;
+
+ remove_from_hash(var);
+ for (ii = 0; var->val[ii]; ii++)
+ free(var->val[ii]);
+ free(var->varname);
+ free(var);
+
+}
+
+void add_val(pgxc_ctl_var *var, char *val)
+{
+ if (var->val_size <= var->val_used+1)
+ {
+ var->val_size = next_size(var->val_size);
+ var->val = (char **)Realloc(var->val, sizeof(char *)*var->val_size);
+ }
+ var->val[var->val_used++] = Strdup(val);
+ var->val[var->val_used] = NULL;
+}
+
+void add_val_name(char *name, char *val)
+{
+ pgxc_ctl_var *var;
+ if (!(var = find_var(name)))
+ return;
+ add_val(var, name);
+ return;
+}
+
+
+pgxc_ctl_var *find_var(char *name)
+{
+ pgxc_var_hash *hash = &var_hash[hash_val(name)];
+ int i;
+
+ for (i = 0; i < hash->el_used; i++)
+ {
+ if (strcmp(hash->el[i]->varname, name) == 0)
+ return hash->el[i];
+ }
+ return NULL;
+}
+
+char *sval(char *name)
+{
+ pgxc_ctl_var *var = find_var(name);
+ if (!var)
+ return NULL;
+ return var->val[0];
+}
+
+char **aval(char *name)
+{
+ pgxc_ctl_var *var = find_var(name);
+ if (!var)
+ return NULL;
+ return var->val;
+}
+
+void reset_value(pgxc_ctl_var *var)
+{
+ int i;
+ for (i = 0; var->val[i]; i++)
+ {
+ Free (var->val[i]);
+ var->val[i] = NULL;
+ }
+ var->val_used = 0;
+}
+
+void assign_val(char *destName, char *srcName)
+{
+ pgxc_ctl_var *dest = find_var(destName);
+ pgxc_ctl_var *src = find_var(srcName);
+ int ii;
+
+ reset_value(dest);
+ for (ii = 0; ii < src->val_used; ii++)
+ add_val(dest, src->val[ii]);
+}
+
+void assign_sval(char *destName, char *val)
+{
+ pgxc_ctl_var *dest = find_var(destName);
+
+ reset_value(dest);
+ add_val(dest, val);
+}
+
+void reset_var(char *name)
+{
+ confirm_var(name);
+ reset_value(find_var(name));
+}
+
+void reset_var_val(char *name, char *val)
+{
+ reset_var(name);
+ add_val(find_var(name), val);
+}
+
+pgxc_ctl_var *confirm_var(char *name)
+{
+ pgxc_ctl_var *rc;
+ if ((rc = find_var(name)))
+ return rc;
+ return new_var(name);
+}
+
+void print_vars(void)
+{
+ pgxc_ctl_var *cur;
+
+ lockLogFile();
+ for(cur = var_head; cur; cur=cur->next)
+ print_var(cur->varname);
+ unlockLogFile();
+}
+
+void print_var(char *vname)
+{
+ pgxc_ctl_var *var;
+ char outBuf[MAXLINE + 1];
+
+ outBuf[0] = 0;
+ if ((var = find_var(vname)) == NULL)
+ {
+ elog(ERROR, "ERROR: Variable %s not found.\n", vname);
+ return;
+ }
+ else
+ {
+ char **curv;
+ char editbuf[MAXPATH];
+
+ snprintf(editbuf, MAXPATH, "%s (", vname);
+ strncat(outBuf, editbuf, MAXLINE);
+ for (curv=var->val; *curv; curv++)
+ {
+ snprintf(editbuf, MAXPATH, " \"%s\" ", *curv);
+ strncat(outBuf, editbuf, MAXLINE);
+ }
+ strncat(outBuf, ")", MAXLINE);
+ elog(NOTICE, "%s\n", outBuf);
+ }
+
+}
+
+void log_var(char *varname)
+{
+ if (logFile)
+ print_var(varname);
+}
+
+int arraySizeName(char *name)
+{
+ pgxc_ctl_var *var;
+
+ if ((var = find_var(name)) == NULL)
+ return -1;
+ return(arraySize(var));
+}
+
+int arraySize(pgxc_ctl_var *var)
+{
+ return var->val_used;
+}
+
+char **add_member(char **array, char *val)
+{
+ char **rv;
+ int ii;
+
+ for (ii = 0; array[ii]; ii++);
+ rv = Realloc(array, sizeof(char *) * (ii + 2));
+ rv[ii] = Strdup(val);
+ rv[ii+1] = NULL;
+ return(rv);
+}
+
+void clean_array(char **array)
+{
+ int ii;
+ if (array)
+ {
+ for(ii = 0; array[ii]; ii++)
+ Free(array[ii]);
+ Free(array);
+ }
+}
+
+void var_assign(char **dest, char *src)
+{
+ Free(*dest);
+ *dest = src;
+}
+
+char *listValue(char *name)
+{
+ pgxc_ctl_var *dest;
+ int ii;
+ char *buf;
+
+ if ((dest = find_var(name)) == NULL)
+ return Strdup("");
+ buf = Malloc(MAXLINE+1);
+ buf[0]=0;
+ for(ii = 0; ii < dest->val_used; ii++)
+ {
+ strncat(buf, dest->val[ii], MAXLINE);
+ strncat(buf, " ", MAXLINE);
+ }
+ return buf;
+}
+
+int ifExists(char *name, char *value)
+{
+ pgxc_ctl_var *var = find_var(name);
+ int ii;
+
+ if (!var)
+ return FALSE;
+ for (ii = 0; ii < var->val_used; ii++)
+ if (strcmp((var->val)[ii], value) == 0)
+ return TRUE;
+ return FALSE;
+}
+
+int IfExists(char *name, char *value)
+{
+ pgxc_ctl_var *var = find_var(name);
+ int ii;
+
+ if (!var)
+ return FALSE;
+ for (ii = 0; ii < var->val_used; ii++)
+ if (strcasecmp((var->val)[ii], value) == 0)
+ return TRUE;
+ return FALSE;
+}
+
+int extendVar(char *name, int newSize, char *def_value)
+{
+ pgxc_ctl_var *target;
+ char **old_val;
+ int old_size;
+ int ii;
+
+ if ((target = find_var(name)) == NULL)
+ return -1;
+ if (def_value == NULL)
+ def_value = "none";
+ if (target->val_size < newSize)
+ {
+ old_val = target->val;
+ old_size = target->val_size;
+ target->val = Malloc0(sizeof(char *) * (newSize +1));
+ memcpy(target->val, old_val, sizeof(char *) * old_size);
+ target->val_size = newSize;
+ Free(old_val);
+ for (ii = target->val_used; ii < newSize; ii++)
+ (target->val)[ii] = Strdup(def_value);
+ target->val_used = newSize;
+ }
+ else if (target->val_used < newSize)
+ {
+ for (ii = target->val_used; ii < newSize; ii++)
+ (target->val)[ii] = Strdup(def_value);
+ target->val_used = newSize;
+ }
+ return 0;
+}
+
+
+/*
+ * If pad is NULL, then "none" will be padded.
+ * Returns *val if success, NULL if failed
+ */
+void assign_arrayEl(char *name, int idx, char *val, char *pad)
+{
+ pgxc_ctl_var *var = confirm_var(name);
+
+ if (pad == NULL)
+ pad = "none";
+ /*
+ * Pad if needed
+ */
+ extendVar(name, idx+1, pad);
+ Free(var->val[idx]);
+ var->val[idx] = Strdup(val);
+}
+
+
+int doesExist(char *name, int idx)
+{
+ pgxc_ctl_var *var;
+
+ if (name == NULL)
+ return 0;
+ if ((var = find_var(name)) == NULL)
+ return 0;
+ if (var->val_used <= idx)
+ return 0;
+ return 1;
+}
diff --git a/contrib/pgxc_ctl/variables.h b/contrib/pgxc_ctl/variables.h
new file mode 100644
index 0000000000..6998277e04
--- /dev/null
+++ b/contrib/pgxc_ctl/variables.h
@@ -0,0 +1,76 @@
+/*-------------------------------------------------------------------------
+ *
+ * variables.h
+ *
+ * Variable handling module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef VARIABLES_H
+#define VARIABLES_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#define NUM_HASH_BUCKET 128
+
+typedef struct pgxc_ctl_var {
+ struct pgxc_ctl_var *next;
+ struct pgxc_ctl_var *prev;
+ char *varname;
+ int val_size;
+ int val_used;
+ char **val;
+} pgxc_ctl_var;
+
+
+extern pgxc_ctl_var *var_head;
+extern pgxc_ctl_var *var_tail;
+
+typedef struct pgxc_var_hash {
+ int el_size;
+ int el_used;
+ pgxc_ctl_var **el;
+} pgxc_var_hash;
+
+
+pgxc_var_hash var_hash[NUM_HASH_BUCKET];
+
+void init_var_hash(void);
+void add_var_hash(pgxc_ctl_var *var);
+pgxc_ctl_var *new_var(char *name);
+void add_val(pgxc_ctl_var *var, char *val);
+void add_val_name(char *name, char *val);
+pgxc_ctl_var *find_var(char *name);
+char *sval(char *name);
+char **aval(char *name);
+int arraySizeName(char *name);
+int arraySize(pgxc_ctl_var *var);
+void print_vars(void);
+void print_var(char *vname);
+void reset_value(pgxc_ctl_var *var);
+void assign_val(char *dest, char *src);
+void assign_sval(char *name, char *val);
+void assign_arrayEl(char *name, int idx, char *val, char *pad);
+pgxc_ctl_var *confirm_var(char *name);
+void reset_var_val(char *name, char *val);
+void reset_var(char *name);
+void remove_var(pgxc_ctl_var *var);
+void reset_value(pgxc_ctl_var *var);
+void log_var(char *name);
+char **add_member(char **array, char *val);
+void var_assign(char **dest, char *src);
+char *listValue(char *name);
+int extendVar(char *name, int newSize, char *def_value);
+int doesExist(char *name, int idx);
+
+#define AddMember(a, b) do{if((a) == NULL) (a) = Malloc0(sizeof(char *)); (a) = add_member((a), (b));}while(0)
+void clean_array(char **array);
+#define CleanArray(a) do{clean_array(a); (a) = NULL;}while(0)
+#define VAR(a) find_var(a)
+
+int ifExists(char *name, char *value);
+int IfExists(char *name, char *value);
+
+#endif /* VARIABLES _H */
diff --git a/contrib/pgxc_ctl/varnames.h b/contrib/pgxc_ctl/varnames.h
new file mode 100644
index 0000000000..9494b76b70
--- /dev/null
+++ b/contrib/pgxc_ctl/varnames.h
@@ -0,0 +1,148 @@
+/*-------------------------------------------------------------------------
+ *
+ * varnames.h
+ *
+* Variable name definition of Postgres-XC configuration and operation tool.
+ *
+ *
+ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2010-2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef VARNAMES_H
+#define VAR_VARNAMES_H
+
+/* Install Directory */
+#define VAR_pgxcInstallDir "pgxcInstallDir" /* Not mandatory */
+
+/* Overall */
+#define VAR_pgxcOwner "pgxcOwner"
+#define VAR_pgxcUser "pgxcUser"
+#define VAR_tmpDir "tmpDir"
+#define VAR_localTmpDir "localTmpDir"
+#define VAR_logOpt "logOpt"
+#define VAR_logDir "logDir"
+#define VAR_configBackup "configBackup"
+#define VAR_configBackupHost "configBackupHost"
+#define VAR_configBackupDir "configBackupDir"
+#define VAR_configBackupFile "configBackupFile"
+#define VAR_allServers "allServers"
+
+/* GTM overall */
+#define VAR_gtmName "gtmName"
+
+/* GTM master */
+#define VAR_gtmMasterServer "gtmMasterServer"
+#define VAR_gtmMasterPort "gtmMasterPort"
+#define VAR_gtmMasterDir "gtmMasterDir"
+#define VAR_gtmExtraConfig "gtmExtraConfig"
+#define VAR_gtmMasterSpecificExtraConfig "gtmMasterSpecificExtraConfig"
+
+/* GTM slave */
+#define VAR_gtmSlave "gtmSlave"
+#define VAR_gtmSlaveServer "gtmSlaveServer"
+#define VAR_gtmSlavePort "gtmSlavePort"
+#define VAR_gtmSlaveDir "gtmSlaveDir"
+#define VAR_gtmSlaveSpecificExtraConfig "gtmSlaveSpecificExtraConfig"
+
+/* GTM Proxy */
+#define VAR_gtmProxy "gtmProxy"
+#define VAR_gtmProxyNames "gtmProxyNames"
+#define VAR_gtmProxyServers "gtmProxyServers"
+#define VAR_gtmProxyPorts "gtmProxyPorts"
+#define VAR_gtmProxyDirs "gtmProxyDirs"
+#define VAR_gtmPxyExtraConfig "gtmPxyExtraConfig"
+#define VAR_gtmPxySpecificExtraConfig "gtmPxySpecificExtraConfig"
+
+/* Coordinators overall */
+#define VAR_coordNames "coordNames"
+#define VAR_coordPorts "coordPorts"
+#define VAR_poolerPorts "poolerPorts"
+#define VAR_coordPgHbaEntries "coordPgHbaEntries"
+
+/* Coordinators master */
+#define VAR_coordMasterServers "coordMasterServers"
+#define VAR_coordMasterDirs "coordMasterDirs"
+#define VAR_coordMaxWALSenders "coordMaxWALSenders"
+
+/* Coordinators slave */
+#define VAR_coordSlave "coordSlave"
+#define VAR_coordSlaveServers "coordSlaveServers"
+#define VAR_coordSlaveSync "coordSlaveSync"
+#define VAR_coordSlaveDirs "coordSlaveDirs"
+#define VAR_coordArchLogDirs "coordArchLogDirs"
+
+/* Coordinator configuration files */
+#define VAR_coordExtraConfig "coordExtraConfig"
+#define VAR_coordSpecificExtraConfig "coordSpecificExtraConfig"
+#define VAR_coordExtraPgHba "coordExtraPgHba"
+#define VAR_coordSpecificExtraPgHba "coordSpecificExtraPgHba"
+
+/* Coordinators additional slaves */
+/* Actual additional slave configuration will be obtained from coordAdditionalSlaveSet */
+#define VAR_coordAdditionalSlaves "coordAdditionalSlaves"
+#define VAR_coordAdditionalSlaveSet "coordAdditionalSlaveSet"
+
+
+/* Datanodes overall */
+#define VAR_coordAdditionalSlaveSet "coordAdditionalSlaveSet"
+#define VAR_datanodeNames "datanodeNames"
+#define VAR_datanodePorts "datanodePorts"
+#ifdef XCP
+#define VAR_datanodePoolerPorts "datanodePoolerPorts"
+#endif
+#define VAR_datanodePgHbaEntries "datanodePgHbaEntries"
+#define VAR_primaryDatanode "primaryDatanode"
+
+/* Datanode masters */
+#define VAR_datanodeMasterServers "datanodeMasterServers"
+#define VAR_datanodeMasterDirs "datanodeMasterDirs"
+#define VAR_datanodeMaxWALSenders "datanodeMaxWALSenders"
+
+/* Datanode slaves */
+#define VAR_datanodeSlave "datanodeSlave"
+#define VAR_datanodeSlaveServers "datanodeSlaveServers"
+#define VAR_datanodeSlaveSync "datanodeSlaveSync"
+#define VAR_datanodeSlaveDirs "datanodeSlaveDirs"
+#define VAR_datanodeArchLogDirs "datanodeArchLogDirs"
+
+/* Datanode configuration files */
+#define VAR_datanodeExtraConfig "datanodeExtraConfig"
+#define VAR_datanodeSpecificExtraConfig "datanodeSpecificExtraConfig"
+#define VAR_datanodeExtraPgHba "datanodeExtraPgHba"
+#define VAR_datanodeSpecificExtraPgHba "datanodeSpecificExtraPgHba"
+
+/* Datanode additional slaves */
+/* Actual additional slave configuration will be obtained from datanodeAdditionalSlaveSet */
+#define VAR_datanodeAdditionalSlaves "datanodeAdditionalSlaves"
+#define VAR_datanodeAdditionalSlaveSet "datanodeAdditionalSlaveSet"
+
+/* WAL Archives */
+/* Actual wal archive will be obtained from walArchiveSet */
+#define VAR_walArchive "walArchive"
+#define VAR_walArchiveSet "walArchiveSet"
+
+/* Connection to datanode/coordinator */
+
+#define VAR_pgxcCtlName "pgxcCtlName"
+#define VAR_defaultDatabase "defaultDatabase"
+
+/* Other Options */
+
+#define VAR_pgxc_ctl_home "pgxc_ctl_home"
+#define VAR_xc_prompt "xc_prompt"
+#define VAR_verbose "verbose"
+#define VAR_logDir "logDir"
+#define VAR_logFile "logFile"
+#define VAR_tmpDir "tmpDir"
+#define VAR_localTmpDir "localTmpDir"
+#define VAR_configFile "configFile"
+#define VAR_echoAll "echoAll"
+#define VAR_debug "debug"
+#define VAR_logMessage "logMessage"
+#define VAR_printMessage "printMessage"
+#define VAR_logLocation "logLocation"
+#define VAR_printLocation "printLocation"
+
+#endif /* VARNAMES_H */