diff options
author | Martin Pihlak | 2012-08-16 09:47:10 +0000 |
---|---|---|
committer | Martin Pihlak | 2012-08-16 09:47:10 +0000 |
commit | e1c22bd6d598dcf86a2a930bd19904e330b085b6 (patch) | |
tree | c32d23315afe3d3c15b24f62dcc212df03e34671 | |
parent | 37e91db8fb3a1fcd9a7a54934a2e882a5a953d0c (diff) | |
parent | f8d90af0912db0a2ed29aa5e8fb912be3ab309a6 (diff) |
Merge branch 'master' of internal-git:/git/dba/skytools-3
179 files changed, 5017 insertions, 617 deletions
@@ -7,19 +7,36 @@ Contributors ------------ André Malo Andrew Dunstan +Artyom Nosov Asko Oja Asko Tiidumaa +Cédric Villemain Charles Duffy Devrim Gündüz Dimitri Fontaine Dmitriy V'jukov +Egon Valdmees Erik Jones Götz Lange +Hannu Krosing Hans-Juergen Schoenig +Jason Buberel +Juta Vaks Kristo Kaiv Luc Van Hoeylandt +Lukáš Lalinský Marcin Stępnicki +Mark Kirkwood +Martin Otto Martin Pihlak +Nico Mandery Pierre-Emmanuel André +Priit Kustala +Sébastien Lardière +Sergey Burladyan +Sergey Konoplev +Shoaib Mir +Steve Singer +Tarvi Pillessaar Zoltán Böszörményi @@ -10,7 +10,7 @@ SUBDIRS = sql doc # modules that use doctest for regtests DOCTESTMODS = skytools.quoting skytools.parsing skytools.timeutil \ skytools.sqltools skytools.querybuilder skytools.natsort \ - skytools.utf8 skytools.sockutil + skytools.utf8 skytools.sockutil skytools.fileutil all: python-all sub-all config.mak @@ -1,3 +1,140 @@ + +2012-07-19 - SkyTools 3.1 - "Carnival-on-a-Stick" + + = Features = + + * londiste: Wait commands + > londiste $ini add-table .. --wait-sync + > londiste $ini wait-sync + Wait until all tables are fully copied over. + > londiste $ini wait-root + Wait for next tick on root reaches local node. + > londiste $ini wait-provider + Wait for next tick on provider reaches local node. + + * londiste execute: support meta attributes in sql + This allows execute ADD COLUMN from root, over whole + cascade even when not all nodes contain the table. + + --*-- Local-Table: mytable + ALTER TABLE mytable ADD COLUMN foo text; + + * Convert DB modules to extensions (9.1+). Now following modules + are available as extensions: pgq, pgq_node, pgq_coop, pgq_ext, londiste. + All the old files are kept in old locations to avoid any breakage. + + Known problems in Postgres extensions infrastructure (as of 9.1.4): + + - pg_dump crashes when extensions has sequence whose value needs to be dumped. + Thus pgq.batch_id_seq cannot be made dumpable, thus it's value + gets lost during dump+reload. + + - CREATE EXTENSION pgq_coop FROM unpackaged; + DROP EXTENSION pgq_coop; + will not drop schema. Plain CREATE+DROP works fine. + + = Minor Features = + + * londiste.applied_execute: drop queue_name from pkey + * pgq.LocalConsumer: consumer that tracks processed ticks in local file + * simple_local_consumer: LocalConsumer that runs query for each event. + * pgq.Consumer: 'table_filter' config param, filters on ev_extra1, + where PgQ triggers put source table name. + * londiste.periodic_maintenance: cleanup for applied_execute + * pgq.maint_operations: include londiste.periodic_maintenance + * skytools.exists_view(): new function + * skytools.fileutil: new module, contains write_atomic() + * londiste.handler: make encoding= param available to part & londiste handlers + * debian: build modules for all installed server-dev versions + + = Fixes = + + * CascadedConsumer: re-initialize provider connection when location changes + * pgq_node.drop_node: mention node name in info message + * drop-node: move find_root before local drop + * pgq.maint_rotate_tables: check txid sanity, fail if bad + * sql_tokenizer: allow idents starting with underscore + * BaseScript: write pidfile atomically to avoid corrupt pidfiles. + * londiste replay: skip provider checks if copy_node is used + * CascadedWorker: don't move watermark on source further than incoming batch. + +2012-05-30 - SkyTools 3.0.3 - "Biometric Authentication by Yodeling" + + = Features = + + * londiste copy: copy table from another node (add-table --copy-node=NODE) + * londiste remove-table: drop sequence too + * public.create_partition: move under londiste schema, it's now generic enough + * londiste.create_partitions: Support copying rules from master table + * handlers.dispatch: use londiste.create_partitions, use old func as fallback + * walmgr: add option for init-slave to add password from file to .pgpass + * walmgr: add command synch-standby + + = Fixes = + + * CascadedWorker: merge-leaf-to-branch needs to publish wm info + * pgq_node.create_node: create branch nodes with disable_insert set + * pgq.insert_event: ignore disable_insert if in 'replica' role + * create_partition: public grant needs special casing + * skytools.dbstruct: better rule name redirection + * debian: add build-deps, force debian/control rebuild + * pgq_node.unregister_location: do automatic unsubscribe + * pgq_node.drop_node: drop queue data even if node_info is empty + * londiste add-table: --expect-sync does not require table existance on provider + + = Cleanups = + + * skytools_upgrade: show old version before upgrade + * CascadeAdmin: add node name prefix messages from db + * handlers.dispatch: add comment about differences from part_func + * londiste.find_table_oid: drop old version - 9.1+ panics when argument name changes + * dbservice.get_record_list: do not use parse_pgarray on python lists + + = Win32 = + + * skytools.scripting: catch EINTR from sleep() + * signal_pidfile: support sig=0 on win32 + * skytools.scripting: detect if signal name exists + + = Schema version changes = + + * pgq_node.version(): 3.0.0.18 + * londiste.version(): 3.0.0.16 + +2012-05-10 - SkyTools 3.0.2 - "Consumes 30% Less Brain Than Competing Products" + + = Features = + + * Draft implementation of fine-grained grants for SQL + functions. See commit doc/sql-grants.txt for details. + New roles are created during upgrade, but new grants + need to be applied manually. + + * pgq.create_queue: instead pre-determined grants, + copy grants for event tables from pgq.event_template. + + * simple_consumer.py script, for simply event processing + by simply launching a SQL function for each event. + + * londiste.handlers.part: make hash function configurable + + * psycopgwrapper: allow name= argument to .cursor() + + = Fixes = + + * londiste: Always use TRUNCATE CASCADE + + * pgq.cascade.CascadedWorker: publish_local_wm() is missing a commit + + * walmgr: fix backup from slave (Sergey Burladyan) + + = Cleanups = + + * pgq.insert_event(): outer wrapper does not need to be secdef + + * source.cfg: minor improvements + + * sql/pgq: use terse verbosity in regtests + 2012-04-18 - SkyTools 3.0.1 - "All The Snow You Can Eat" = Features = diff --git a/configure.ac b/configure.ac index c5f6da66..6a244cc7 100644 --- a/configure.ac +++ b/configure.ac @@ -1,6 +1,6 @@ dnl Process this file with autoconf to produce a configure script. -AC_INIT(skytools, 3.0.1) +AC_INIT(skytools, 3.1) AC_CONFIG_SRCDIR(python/londiste.py) AC_CONFIG_HEADER(lib/usual/config.h) AC_PREREQ([2.59]) diff --git a/debian/changelog b/debian/changelog index 83808cf5..a7719045 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,21 @@ +skytools3 (3.1) experimental; urgency=low + + * v3.1 + + -- Marko Kreen <[email protected]> Thu, 19 Jul 2012 11:04:58 +0300 + +skytools3 (3.0.3) experimental; urgency=low + + * v3.0.3 + + -- Marko Kreen <[email protected]> Wed, 30 May 2012 15:57:04 +0300 + +skytools3 (3.0.2) experimental; urgency=low + + * v3.0.2 + + -- Marko Kreen <[email protected]> Thu, 10 May 2012 21:29:16 +0300 + skytools3 (3.0.1) experimental; urgency=low * v3.0.1 diff --git a/debian/control b/debian/control index b64afcff..b5ffb67d 100644 --- a/debian/control +++ b/debian/control @@ -2,7 +2,9 @@ Source: skytools3 Section: database Priority: extra Maintainer: Dimitri Fontaine <[email protected]> -Build-Depends: debhelper (>= 7.0.50~), autotools-dev, autoconf (>= 2.65), automake, libtool, libevent-dev, python-all-dev, python-support, python-psycopg2, xmlto, asciidoc, libpq-dev, postgresql-server-dev-all +Build-Depends: debhelper (>= 7.0.50~), autotools-dev, autoconf (>= 2.65), automake, libtool, libevent-dev, + python-all-dev, python-support, python-psycopg2, xmlto, asciidoc, libpq-dev, postgresql-server-dev-all, + postgresql-server-dev-8.4 | postgresql-server-dev-9.0 | postgresql-server-dev-9.1 | postgresql-server-dev-9.2 Standards-Version: 3.9.1 Homepage: http://wiki.postgresql.org/wiki/Skytools Vcs-Git: http://github.com/markokr/skytools.git diff --git a/debian/control.in b/debian/control.in index 4e7b2057..df265cbc 100644 --- a/debian/control.in +++ b/debian/control.in @@ -2,7 +2,9 @@ Source: skytools3 Section: database Priority: extra Maintainer: Dimitri Fontaine <[email protected]> -Build-Depends: debhelper (>= 7.0.50~), autotools-dev, autoconf (>= 2.65), automake, libtool, libevent-dev, python-all-dev, python-support, python-psycopg2, xmlto, asciidoc, libpq-dev, postgresql-server-dev-all +Build-Depends: debhelper (>= 7.0.50~), autotools-dev, autoconf (>= 2.65), automake, libtool, libevent-dev, + python-all-dev, python-support, python-psycopg2, xmlto, asciidoc, libpq-dev, postgresql-server-dev-all, + postgresql-server-dev-8.4 | postgresql-server-dev-9.0 | postgresql-server-dev-9.1 | postgresql-server-dev-9.2 Standards-Version: 3.9.1 Homepage: http://wiki.postgresql.org/wiki/Skytools Vcs-Git: http://github.com/markokr/skytools.git diff --git a/debian/genpg.sh b/debian/genpg.sh new file mode 100755 index 00000000..35514926 --- /dev/null +++ b/debian/genpg.sh @@ -0,0 +1,52 @@ +#! /bin/sh + +# generate per-version files + +for v in 8.3 8.4 9.0 9.1 9.2; do + +echo "usr/share/doc/postgresql-$v" > "postgresql-$v-pgq3.dirs" + +cat > "postgresql-$v-pgq3.docs" <<EOF +sql/pgq/README.pgq +sql/pgq_ext/README.pgq_ext +EOF + +cat > "postgresql-$v-pgq3.install" <<EOF +usr/lib/postgresql/$v/lib/pgq_triggers.so +usr/lib/postgresql/$v/lib/pgq_lowlevel.so +usr/share/postgresql/$v/contrib/pgq.upgrade.sql +usr/share/postgresql/$v/contrib/pgq_triggers.sql +usr/share/postgresql/$v/contrib/pgq_lowlevel.sql +usr/share/postgresql/$v/contrib/pgq_node.sql +usr/share/postgresql/$v/contrib/pgq_coop.upgrade.sql +usr/share/postgresql/$v/contrib/pgq_ext.sql +usr/share/postgresql/$v/contrib/londiste.sql +usr/share/postgresql/$v/contrib/pgq_node.upgrade.sql +usr/share/postgresql/$v/contrib/pgq.sql +usr/share/postgresql/$v/contrib/pgq_coop.sql +usr/share/postgresql/$v/contrib/londiste.upgrade.sql +usr/share/postgresql/$v/contrib/uninstall_pgq.sql +usr/share/postgresql/$v/contrib/newgrants_londiste.sql +usr/share/postgresql/$v/contrib/newgrants_pgq_coop.sql +usr/share/postgresql/$v/contrib/newgrants_pgq_ext.sql +usr/share/postgresql/$v/contrib/newgrants_pgq_node.sql +usr/share/postgresql/$v/contrib/newgrants_pgq.sql +usr/share/postgresql/$v/contrib/oldgrants_londiste.sql +usr/share/postgresql/$v/contrib/oldgrants_pgq_coop.sql +usr/share/postgresql/$v/contrib/oldgrants_pgq_ext.sql +usr/share/postgresql/$v/contrib/oldgrants_pgq_node.sql +usr/share/postgresql/$v/contrib/oldgrants_pgq.sql +EOF + +if test "$v" = "9.1" -o "$v" = "9.2"; then + modver="3.1" + for mod in pgq pgq_node pgq_coop pgq_ext londiste; do + ( + echo "usr/share/postgresql/$v/extension/${mod}.control" + echo "usr/share/postgresql/$v/extension/${mod}--${modver}.sql" + echo "usr/share/postgresql/$v/extension/${mod}--unpackaged--${modver}.sql" + ) >> "postgresql-$v-pgq3.install" + done +fi + +done diff --git a/debian/pg_buildext b/debian/pg_buildext new file mode 100755 index 00000000..02f29c44 --- /dev/null +++ b/debian/pg_buildext @@ -0,0 +1,105 @@ +#!/bin/sh +# +# build a PostgreSQL module based on PGXS for give list of supported major +# versions +# +# Author: Dimitri Fontaine <[email protected]> + +set -e + +action="$1" +srcdir="${2:-`pwd`}" +target="$3" +opt="$4" + +die() { + echo "`basename $0`: error: $*" >&2 + exit 1 +} + +prepare_env() { + version=$1 + vtarget=`echo $target | sed -e "s:%v:$version:g"` + pgc="/usr/lib/postgresql/$version/bin/pg_config" + [ -e "$pgc" ] || die "$pgc does not exists" +} + +configure() { + prepare_env $1 + confopts=`echo $opt | sed -e "s:%v:$version:g"` + + mkdir -p $vtarget + ( echo "calling configure in $vtarget" && + cd $vtarget && $srcdir/configure $confopts PG_CONFIG="$pgc" ) +} + +build() { + prepare_env $1 + cflags="`$pgc --cflags` `echo $opt | sed -e "s:%v:$version:g"`" + + mkdir -p $vtarget + # if a Makefile was created by configure, use it, else the top level Makefile + [ -f $vtarget/Makefile ] || makefile="-f $srcdir/Makefile" + make -C $vtarget $makefile CFLAGS="$cflags" PG_CONFIG="$pgc" VPATH="$srcdir" +} + +install() { + prepare_env $1 + package=`echo $opt | sed -e "s:%v:$version:g"` + + mkdir -p $vtarget + # if a Makefile was created by configure, use it, else the top level Makefile + [ -f $vtarget/Makefile ] || makefile="-f $srcdir/Makefile" + make -C $vtarget $makefile install DESTDIR="$srcdir/debian/$package" PG_CONFIG="$pgc" VPATH="$srcdir" +} + +clean() { + prepare_env $1 + + # if a Makefile was created by configure, use it, else the top level Makefile + [ -f $vtarget/Makefile ] || makefile="-f $srcdir/Makefile" + [ -d $vtarget ] && make -C $vtarget clean $makefile PG_CONFIG="$pgc" VPATH="$srcdir" + rm -rf $vtarget +} + +versions() { + if true; then + # print all installed versions. assume deps take care of them. + for v in `cat $srcdir/debian/pgversions`; do + test -x /usr/lib/postgresql/$v/bin/pg_config && echo $v || true + done + else + # print *only* officially supported versions ??? + [ -e /usr/share/postgresql-common/supported-versions ] || + die "/usr/share/postgresql-common/supported-versions not found" + [ -e $srcdir/debian/pgversions ] || die "$srcdir/debian/pgversions not found" + for v in `/usr/share/postgresql-common/supported-versions` + do + grep -q "^$v" $srcdir/debian/pgversions && echo $v + done + fi +} + +[ "$srcdir" ] || die "syntax: pg_buildext <action> <srcdir> ..." +[ -d $srcdir ] || die "no such directory '$srcdir'" + +VERSIONS=`versions` + +for v in $VERSIONS +do + case "$action" in + "supported-versions") + echo $v + ;; + + configure|build|install|clean) + [ "$target" ] || die "syntax: pg_buildext $action <srcdir> <target> ..." + # be verbose? + $action $v + ;; + + *) + die "unsupported $action." + ;; + esac +done diff --git a/debian/postgresql-8.3-pgq3.install b/debian/postgresql-8.3-pgq3.install index b5f8603f..5953b6a2 100644 --- a/debian/postgresql-8.3-pgq3.install +++ b/debian/postgresql-8.3-pgq3.install @@ -12,3 +12,13 @@ usr/share/postgresql/8.3/contrib/pgq.sql usr/share/postgresql/8.3/contrib/pgq_coop.sql usr/share/postgresql/8.3/contrib/londiste.upgrade.sql usr/share/postgresql/8.3/contrib/uninstall_pgq.sql +usr/share/postgresql/8.3/contrib/newgrants_londiste.sql +usr/share/postgresql/8.3/contrib/newgrants_pgq_coop.sql +usr/share/postgresql/8.3/contrib/newgrants_pgq_ext.sql +usr/share/postgresql/8.3/contrib/newgrants_pgq_node.sql +usr/share/postgresql/8.3/contrib/newgrants_pgq.sql +usr/share/postgresql/8.3/contrib/oldgrants_londiste.sql +usr/share/postgresql/8.3/contrib/oldgrants_pgq_coop.sql +usr/share/postgresql/8.3/contrib/oldgrants_pgq_ext.sql +usr/share/postgresql/8.3/contrib/oldgrants_pgq_node.sql +usr/share/postgresql/8.3/contrib/oldgrants_pgq.sql diff --git a/debian/postgresql-8.4-pgq3.install b/debian/postgresql-8.4-pgq3.install index d9aa6bd2..27120cc4 100644 --- a/debian/postgresql-8.4-pgq3.install +++ b/debian/postgresql-8.4-pgq3.install @@ -12,3 +12,13 @@ usr/share/postgresql/8.4/contrib/pgq.sql usr/share/postgresql/8.4/contrib/pgq_coop.sql usr/share/postgresql/8.4/contrib/londiste.upgrade.sql usr/share/postgresql/8.4/contrib/uninstall_pgq.sql +usr/share/postgresql/8.4/contrib/newgrants_londiste.sql +usr/share/postgresql/8.4/contrib/newgrants_pgq_coop.sql +usr/share/postgresql/8.4/contrib/newgrants_pgq_ext.sql +usr/share/postgresql/8.4/contrib/newgrants_pgq_node.sql +usr/share/postgresql/8.4/contrib/newgrants_pgq.sql +usr/share/postgresql/8.4/contrib/oldgrants_londiste.sql +usr/share/postgresql/8.4/contrib/oldgrants_pgq_coop.sql +usr/share/postgresql/8.4/contrib/oldgrants_pgq_ext.sql +usr/share/postgresql/8.4/contrib/oldgrants_pgq_node.sql +usr/share/postgresql/8.4/contrib/oldgrants_pgq.sql diff --git a/debian/postgresql-9.0-pgq3.install b/debian/postgresql-9.0-pgq3.install index 44754dd0..6f77a460 100644 --- a/debian/postgresql-9.0-pgq3.install +++ b/debian/postgresql-9.0-pgq3.install @@ -12,3 +12,13 @@ usr/share/postgresql/9.0/contrib/pgq.sql usr/share/postgresql/9.0/contrib/pgq_coop.sql usr/share/postgresql/9.0/contrib/londiste.upgrade.sql usr/share/postgresql/9.0/contrib/uninstall_pgq.sql +usr/share/postgresql/9.0/contrib/newgrants_londiste.sql +usr/share/postgresql/9.0/contrib/newgrants_pgq_coop.sql +usr/share/postgresql/9.0/contrib/newgrants_pgq_ext.sql +usr/share/postgresql/9.0/contrib/newgrants_pgq_node.sql +usr/share/postgresql/9.0/contrib/newgrants_pgq.sql +usr/share/postgresql/9.0/contrib/oldgrants_londiste.sql +usr/share/postgresql/9.0/contrib/oldgrants_pgq_coop.sql +usr/share/postgresql/9.0/contrib/oldgrants_pgq_ext.sql +usr/share/postgresql/9.0/contrib/oldgrants_pgq_node.sql +usr/share/postgresql/9.0/contrib/oldgrants_pgq.sql diff --git a/debian/postgresql-9.1-pgq3.install b/debian/postgresql-9.1-pgq3.install index e8115b5e..f9708aa5 100644 --- a/debian/postgresql-9.1-pgq3.install +++ b/debian/postgresql-9.1-pgq3.install @@ -12,3 +12,28 @@ usr/share/postgresql/9.1/contrib/pgq.sql usr/share/postgresql/9.1/contrib/pgq_coop.sql usr/share/postgresql/9.1/contrib/londiste.upgrade.sql usr/share/postgresql/9.1/contrib/uninstall_pgq.sql +usr/share/postgresql/9.1/contrib/newgrants_londiste.sql +usr/share/postgresql/9.1/contrib/newgrants_pgq_coop.sql +usr/share/postgresql/9.1/contrib/newgrants_pgq_ext.sql +usr/share/postgresql/9.1/contrib/newgrants_pgq_node.sql +usr/share/postgresql/9.1/contrib/newgrants_pgq.sql +usr/share/postgresql/9.1/contrib/oldgrants_londiste.sql +usr/share/postgresql/9.1/contrib/oldgrants_pgq_coop.sql +usr/share/postgresql/9.1/contrib/oldgrants_pgq_ext.sql +usr/share/postgresql/9.1/contrib/oldgrants_pgq_node.sql +usr/share/postgresql/9.1/contrib/oldgrants_pgq.sql +usr/share/postgresql/9.1/extension/pgq.control +usr/share/postgresql/9.1/extension/pgq--3.1.sql +usr/share/postgresql/9.1/extension/pgq--unpackaged--3.1.sql +usr/share/postgresql/9.1/extension/pgq_node.control +usr/share/postgresql/9.1/extension/pgq_node--3.1.sql +usr/share/postgresql/9.1/extension/pgq_node--unpackaged--3.1.sql +usr/share/postgresql/9.1/extension/pgq_coop.control +usr/share/postgresql/9.1/extension/pgq_coop--3.1.sql +usr/share/postgresql/9.1/extension/pgq_coop--unpackaged--3.1.sql +usr/share/postgresql/9.1/extension/pgq_ext.control +usr/share/postgresql/9.1/extension/pgq_ext--3.1.sql +usr/share/postgresql/9.1/extension/pgq_ext--unpackaged--3.1.sql +usr/share/postgresql/9.1/extension/londiste.control +usr/share/postgresql/9.1/extension/londiste--3.1.sql +usr/share/postgresql/9.1/extension/londiste--unpackaged--3.1.sql diff --git a/debian/postgresql-9.2-pgq3.install b/debian/postgresql-9.2-pgq3.install index 3e6361fe..50bc2e38 100644 --- a/debian/postgresql-9.2-pgq3.install +++ b/debian/postgresql-9.2-pgq3.install @@ -12,3 +12,28 @@ usr/share/postgresql/9.2/contrib/pgq.sql usr/share/postgresql/9.2/contrib/pgq_coop.sql usr/share/postgresql/9.2/contrib/londiste.upgrade.sql usr/share/postgresql/9.2/contrib/uninstall_pgq.sql +usr/share/postgresql/9.2/contrib/newgrants_londiste.sql +usr/share/postgresql/9.2/contrib/newgrants_pgq_coop.sql +usr/share/postgresql/9.2/contrib/newgrants_pgq_ext.sql +usr/share/postgresql/9.2/contrib/newgrants_pgq_node.sql +usr/share/postgresql/9.2/contrib/newgrants_pgq.sql +usr/share/postgresql/9.2/contrib/oldgrants_londiste.sql +usr/share/postgresql/9.2/contrib/oldgrants_pgq_coop.sql +usr/share/postgresql/9.2/contrib/oldgrants_pgq_ext.sql +usr/share/postgresql/9.2/contrib/oldgrants_pgq_node.sql +usr/share/postgresql/9.2/contrib/oldgrants_pgq.sql +usr/share/postgresql/9.2/extension/pgq.control +usr/share/postgresql/9.2/extension/pgq--3.1.sql +usr/share/postgresql/9.2/extension/pgq--unpackaged--3.1.sql +usr/share/postgresql/9.2/extension/pgq_node.control +usr/share/postgresql/9.2/extension/pgq_node--3.1.sql +usr/share/postgresql/9.2/extension/pgq_node--unpackaged--3.1.sql +usr/share/postgresql/9.2/extension/pgq_coop.control +usr/share/postgresql/9.2/extension/pgq_coop--3.1.sql +usr/share/postgresql/9.2/extension/pgq_coop--unpackaged--3.1.sql +usr/share/postgresql/9.2/extension/pgq_ext.control +usr/share/postgresql/9.2/extension/pgq_ext--3.1.sql +usr/share/postgresql/9.2/extension/pgq_ext--unpackaged--3.1.sql +usr/share/postgresql/9.2/extension/londiste.control +usr/share/postgresql/9.2/extension/londiste--3.1.sql +usr/share/postgresql/9.2/extension/londiste--unpackaged--3.1.sql diff --git a/debian/rules b/debian/rules index 6596b382..42f6c149 100755 --- a/debian/rules +++ b/debian/rules @@ -1,20 +1,11 @@ #!/usr/bin/make -f # -*- makefile -*- -# Sample debian/rules that uses debhelper. -# -# This file was originally written by Joey Hess and Craig Small. -# As a special exception, when this file is copied by dh-make into a -# dh-make output file, you may use that output file without restriction. -# This special exception was added by Craig Small in version 0.37 of dh-make. -# -# Modified to make a template file for a multi-binary package with separated -# build-arch and build-indep targets by Bill Allombert 2001 # Uncomment this to turn on verbose mode. #export DH_VERBOSE=1 # This has to be exported to make some magic below work. -export DH_OPTIONS +#export DH_OPTIONS # The build system is averse to VPATH building, so we can't properly use # postgresql-server-dev-all @@ -22,30 +13,43 @@ SRCDIR = $(CURDIR) PKGVERS = $(shell dpkg-parsechangelog | awk -F '[:-]' '/^Version:/ { print substr($$2, 2) }') ORIG_EXCLUDE=--exclude-vcs --exclude=debian -include /usr/share/postgresql-common/pgxs_debian_control.mk +PG_BUILDEXT = ./debian/pg_buildext -config.mak: +## include /usr/share/postgresql-common/pgxs_debian_control.mk +# produce a debian/control file from a debian/control.in +debian/control-pgstamp: debian/control.in debian/pgversions + ( set -e; \ + VERSIONS=`$(PG_BUILDEXT) supported-versions $(CURDIR)`; \ + grep-dctrl -vP PGVERSION debian/control.in > debian/control.tmp; \ + for v in $$VERSIONS; do \ + grep -q "^$$v" debian/pgversions \ + && grep-dctrl -P PGVERSION debian/control.in \ + | sed -e "s:PGVERSION:$$v:" >> debian/control.tmp; \ + done; \ + mv debian/control.tmp debian/control ) || (rm -f debian/control.tmp; exit 1) + touch $@ + +config.mak: debian/control-pgstamp #./autogen.sh ./configure --prefix=/usr --with-asciidoc --with-sk3-subdir override_dh_auto_configure: config.mak -override_dh_auto_clean: config.mak debian/control +override_dh_auto_clean: config.mak $(MAKE) -C doc realclean dh_auto_clean -- distclean - for version in $$(pg_buildext supported-versions .) ; do \ - rm -rf debian/postgresql-$${version}-pgq3; \ + for version in `cat $(CURDIR)/debian/pgversions`; do \ + rm -rf "debian/postgresql-$${version}-pgq3"; \ done + rm -f debian/control-pgstamp -# upstream build system will build fine for one given PostgreSQL version, -# then we build the PostgreSQL module and files for yet another version of -# it so that we have binary packages for postgresql-8.4-pgq3 and 9.0. -override_dh_auto_install: +# build sql modules for several postgres versions +override_dh_auto_install: config.mak mkdir -p $(CURDIR)/debian/tmp dh_auto_install $(MAKE) -C doc htmlinstall DESTDIR=$(CURDIR)/debian/tmp # now care about any previous supported versions - for version in $$(pg_buildext supported-versions .) ; do \ + for version in $$($(PG_BUILDEXT) supported-versions $(CURDIR)); do \ echo "### Building for PostgreSQL $$version" && \ make -C sql clean install \ PG_CONFIG=/usr/lib/postgresql/$$version/bin/pg_config \ diff --git a/debian/skytools3.docs b/debian/skytools3.docs index b5dc5622..b68b9842 100644 --- a/debian/skytools3.docs +++ b/debian/skytools3.docs @@ -5,3 +5,5 @@ debian/tmp/usr/share/doc/skytools3/skytools3.html debian/tmp/usr/share/doc/skytools3/queue_splitter.html debian/tmp/usr/share/doc/skytools3/queue_mover.html debian/tmp/usr/share/doc/skytools3/londiste3.html +debian/tmp/usr/share/doc/skytools3/simple_consumer3.html +debian/tmp/usr/share/doc/skytools3/simple_local_consumer3.html diff --git a/debian/skytools3.install b/debian/skytools3.install index 6dd0067e..7c6145d5 100644 --- a/debian/skytools3.install +++ b/debian/skytools3.install @@ -3,6 +3,8 @@ usr/bin/londiste3 usr/bin/scriptmgr3 usr/bin/queue_mover3 usr/bin/queue_splitter3 +usr/bin/simple_consumer3 +usr/bin/simple_local_consumer3 debian/skytools.ini /etc usr/lib/python*/site-packages/skytools-3.0/londiste usr/share/skytools3 diff --git a/debian/skytools3.manpages b/debian/skytools3.manpages index a92296f5..5e3caae3 100644 --- a/debian/skytools3.manpages +++ b/debian/skytools3.manpages @@ -3,3 +3,5 @@ debian/tmp/usr/share/man/man1/qadmin.1 debian/tmp/usr/share/man/man1/londiste3.1 debian/tmp/usr/share/man/man1/queue_mover3.1 debian/tmp/usr/share/man/man1/queue_splitter3.1 +debian/tmp/usr/share/man/man1/simple_consumer3.1 +debian/tmp/usr/share/man/man1/simple_local_consumer3.1 diff --git a/doc/Makefile b/doc/Makefile index d62a7355..f1a972af 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -12,7 +12,9 @@ DOCHTML = \ TODO.html pgq-sql.html pgq-nodupes.html \ faq.html set.notes.html skytools3.html devnotes.html pgqd.html \ londiste3.html walmgr3.html qadmin.html scriptmgr.html \ + sql-grants.html \ skytools_upgrade.html queue_mover.html queue_splitter.html \ + simple_consumer3.html simple_local_consumer3.html \ howto/londiste3_cascaded_rep_howto.html \ howto/londiste3_merge_howto.html \ howto/londiste3_partitioning_howto.html \ @@ -22,7 +24,7 @@ DOCHTML = \ MAN5 = MAN1_SFX = scriptmgr.1 skytools_upgrade.1 queue_mover.1 queue_splitter.1 -MAN1 = qadmin.1 pgqd.1 walmgr3.1 londiste3.1 +MAN1 = qadmin.1 pgqd.1 walmgr3.1 londiste3.1 simple_consumer3.1 simple_local_consumer3.1 FQHTML = $(addprefix html/doc/, $(DOCHTML)) $(addprefix html/, $(TOPHTML)) FQMAN1 = $(addprefix man/, $(MAN1)) diff --git a/doc/common.config.txt b/doc/common.config.txt index 7a74623f..1575d8cb 100644 --- a/doc/common.config.txt +++ b/doc/common.config.txt @@ -26,11 +26,11 @@ ifdef::pgq[] === Common PgQ consumer parameters === - pgq_queue_name:: + queue_name:: Queue name to attach to. No default. - pgq_consumer_id:: + consumer_name:: Consumers ID to use when registering. Default: %(job_name)s diff --git a/doc/common.switches.txt b/doc/common.switches.txt index 72da7bc3..fe5360da 100644 --- a/doc/common.switches.txt +++ b/doc/common.switches.txt @@ -14,6 +14,9 @@ Python programs. -d, --daemon:: make program go background + --ini:: + show commented template config file. + Following switches are used to control already running process. The pidfile is read from config then signal is sent to process id specified there. diff --git a/doc/londiste3.txt b/doc/londiste3.txt index 16865920..89351651 100644 --- a/doc/londiste3.txt +++ b/doc/londiste3.txt @@ -240,6 +240,9 @@ Do full copy of the table, again. --all:: Include all possible tables. + --wait-sync:: + Wait until newly added tables are synced fully. + --dest-table='table':: Redirect changes to different table. @@ -273,6 +276,11 @@ Do full copy of the table, again. --handler-arg='handler_arg':: Argument to custom handler. + --copy-node='NODE_NAME':: + Do initial copy from that node instead from provider. + Useful if provider does not contain table data locally + or is simply under load. + --copy-condition='copy_condition':: Set WHERE expression for copy. @@ -319,12 +327,86 @@ Repair data on subscriber. === execute [filepath] === -Execute SQL files on each nodes of the set. +Execute SQL files on each node of the cascaded queue. The SQL file is +executed locally in single transaction and inserted into queue in same +transaction. Thus guaranteeing that is will be replayed in subscriber +databases at correct position. + +The filename is stored in `londiste.applied_execute` table, and checked +before execution. If same filename already exists, the SQL execution +is skipped. + +==== SQL meta-data attributes ==== + +SQL file can contain attributes that limit where the SQL is executed: + + --*-- + --*-- Local-Table: mytable, othertable, + --*-- thirdtable + --*-- Local-Sequence: thisseq + --*-- + +The magic comments are searched only in file start, before any actual SQL +statement is seen. Empty lines and lines with regular SQL comments +are ignored. + +Supported keys: + + Local-Table:: + Table must be added to local node with `add-table`. + + Local-Sequence:: + Sequence must be added to local node with `add-seq`. + + Local-Destination:: + Table must be added to local node and actual destination table must exists. + This is for cases where table is added to some nodes with handler + that does not need actual table to exist. + + Need-Table:: + Physical table must exist in database. It does not matter if it is + replicated or not. + + Need-Sequence:: + Sequence must exist in database. + + Need-Function:: + Database function must exists. The function name is in form `function_name(nargs)`. + If the `(nargs)` portion is missed then nargs is taken as 0. + + Need-View:: + A view must exist in database. + + Need-Schema:: + Schema mist exist in database. + +Londiste supports table renaming, where table is attached to queue with one name +but events are applied to local table with different name. To make this +work with EXECUTE, the Local-Toble and Local-Destination support tag replacement, +where queue's table name that is mentioned in attribute is replaced +with actual table name in local database: + + --*-- Local-Table: mytable + ALTER TABLE @mytable@ ...; + === show-handlers ['handler'] === Show info about all or a specific handler. +=== wait-sync === + +Wait until all added tables are copied over. + +=== wait-provider === + +Wait until local node passes latest queue position on provider. + +=== wait-root === + +Wait until local node passes latest queue position on root. + + == INTERNAL COMMAND == === copy === diff --git a/doc/qadmin.txt b/doc/qadmin.txt index e406cad2..f66e97d9 100644 --- a/doc/qadmin.txt +++ b/doc/qadmin.txt @@ -13,12 +13,12 @@ qadmin - Easy to use admin console to examine and administer PgQ queues. This is a psql-like console for queue administration. -The console offers a large number of command to setup, control and +The console offers a large number of commands to setup, control and manage PgQ queueing system. It also offers a non-interactive mode to run one or more commands. -qadmin keeps an history file in the home of the user (`~/.qadmin_history`). +qadmin keeps its history file in the home of the user (`~/.qadmin_history`). == GENERAL OPTIONS == @@ -31,7 +31,7 @@ qadmin keeps an history file in the home of the user (`~/.qadmin_history`). == CONNECTION OPTIONS == -h host:: - Sspecify host to connect to (default: localhost via unix socket). + Specify host to connect to (default: localhost via unix socket). -p port:: Specify port. @@ -79,13 +79,13 @@ Create the specified queue. === alter queue <qname | *> set param =<foo=1>,<bar=2>; === -Set one or more parameters on one or all queue at once. +Set one or more parameters on one or all queues at once. === drop queue <qname>; === Drop the named queue. -=== register consumer <consumer> [on <qname> | at <tick_id> | copy <consumer> ]; === +=== register consumer <consumer> [on <qname> | at <tick_id> | copy <consumer>]; === Register a consumer on a queue, or at a specified tick or based on another consumer. @@ -129,6 +129,10 @@ Show DDL for the specified sequence. Show details of one or all consumers on one or all queues. +=== show node [ <node | *> [on <qname>] ]; === + +Show details of one or all nodes on one or all queues. + === show batch <batch_id>; === Show details of the batch, default queue must be set (see `connect queue`) @@ -140,7 +144,7 @@ queue must be set (see `connect queue`) == LONDISTE COMMANDS == -All this commands are applyed on the node where the console is connected +All these commands are applied on the node where the console is connected to. === londiste add table <tbl> [with ... ] === @@ -164,7 +168,7 @@ to. Create skip trigger. Same as S flag. with when='expr':: - If 'expr' returns false, don't insert event. + If 'expr' returns false, do not insert event. with ev_XX='EXPR':: Overwrite default ev_* columns (see below). @@ -187,7 +191,6 @@ Queue event fields: - ev_extra1 - table name - ev_extra2 - optional urlencoded backup - === londiste add sequence <seq>; === Add the specified sequence to Londiste replication. @@ -224,7 +227,7 @@ Catching-up: if dropped_ddl is not NULL, restore them. ==== copy_role = wait-copy ==== -On copy start wait, until role changes (to wait-replay). +On copy start, wait until role changes (to wait-replay). ==== copy_role = wait-replay ==== @@ -279,7 +282,6 @@ TODO : is it up-to-date ? - drop node <name> [on <qname>]; - takeover <oldnode>; -- show node [ <node | *> [on <qname>] ]; - show cascade; - show_queue_stats <q>; - status diff --git a/doc/scriptmgr.txt b/doc/scriptmgr.txt index ac73ed0f..370f820c 100644 --- a/doc/scriptmgr.txt +++ b/doc/scriptmgr.txt @@ -6,14 +6,14 @@ scriptmgr - utility for controlling other skytools scripts. == SYNOPSIS == - scriptmgr.py [switches] config.ini <command> [-a | job_name ... ] + scriptmgr.py [switches] config.ini <command> [-a | -t service | job_name...] == DESCRIPTION == scriptmgr is used to manage several scripts together. It discovers -potential jobs based on config file glob expression. From config -file it gets both job_name and service type (that is the main section -name eg [cube_dispatcher]). For each service type there is subsection +potential jobs based on config file glob expression. From config file +it gets both job_name and service type (that is the main section name, +e.g. [cube_dispatcher]). For each service type there is subsection in the config how to handle it. Unknown services are ignored. == COMMANDS == @@ -27,27 +27,31 @@ Show status for all known jobs. === start === scriptmgr config.ini start -a + scriptmgr config.ini start -t service scriptmgr config.ini start job_name1 job_name2 ... -launch script(s) that are not running. +Launch script(s) that are not running. === stop === scriptmgr config.ini stop -a + scriptmgr config.ini stop -t service scriptmgr config.ini stop job_name1 job_name2 ... -stop script(s) that are running. +Stop script(s) that are running. === restart === scriptmgr config.ini restart -a + scriptmgr config.ini restart -t service scriptmgr config.ini restart job_name1 job_name2 ... -restart scripts. +Restart scripts. === reload === scriptmgr config.ini reload -a + scriptmgr config.ini reload -t service scriptmgr config.ini reload job_name1 job_name2 ... Send SIGHUP to scripts that are running. @@ -59,8 +63,8 @@ include::common.config.txt[] === scriptmgr parameters === config_list:: - List of glob patters for finding config files. Example: - + List of glob patterns for finding config files. Example: + config_list = ~/dbscripts/conf/*.ini, ~/random/conf/*.ini === Service section parameters === @@ -111,3 +115,8 @@ Options specific to scriptmgr: -a, --all:: Operate on all non-disabled scripts. +-t 'service', --type='service':: + Operate on all non-disabled scripts of this service type. + +-w, --wait:: + Wait for script(s) after signalling. diff --git a/doc/simple_consumer3.txt b/doc/simple_consumer3.txt new file mode 100644 index 00000000..c746f5a1 --- /dev/null +++ b/doc/simple_consumer3.txt @@ -0,0 +1,28 @@ + += simple_consumer3(1) = + +== NAME == + +simple_consumer3 - PgQ consumer that executes query for each event + +== SYNOPSIS == + + simple_consumer3.py [switches] config.ini + +== DESCRIPTION == + +For each event in batch it will execute query, filling event +values into it. + +Transactionality: query is executed in autocommit mode, +no batch tracking is done. That means on failure, +whole batch is fetched and all events are processed again. + +== CONFIG == + +Run `simple_consumer3 --ini` to see commented config template. + +== COMMAND LINE SWITCHES == + +include::common.switches.txt[] + diff --git a/doc/simple_local_consumer3.txt b/doc/simple_local_consumer3.txt new file mode 100644 index 00000000..04c4db5d --- /dev/null +++ b/doc/simple_local_consumer3.txt @@ -0,0 +1,28 @@ + += simple_local_consumer3(1) = + +== NAME == + +simple_local_consumer3 - PgQ consumer that executes query for each row + +== SYNOPSIS == + + simple_local_consumer3.py [switches] config.ini + +== DESCRIPTION == + +For each event in batch it will execute query, filling event +values into it. + +Transactionality: query is executed in autocommit mode, +completed batch is tracked in local file. It can be switched +between nodes in cascaded queue. + +== CONFIG == + +Run `simple_local_consumer3 --ini` to see commented config template. + +== COMMAND LINE SWITCHES == + +include::common.switches.txt[] + diff --git a/doc/skytools_upgrade.txt b/doc/skytools_upgrade.txt index 3189cb78..91dda01e 100644 --- a/doc/skytools_upgrade.txt +++ b/doc/skytools_upgrade.txt @@ -6,7 +6,7 @@ skytools_upgrade - utility for upgrading Skytools code in databases. == SYNOPSIS == - skytools_upgrade.py connstr [connstr ..] + skytools_upgrade.py [switches] connstr [connstr ...] == DESCRIPTION == @@ -20,14 +20,21 @@ It connects to given database, then looks for following schemas: Londiste replication. If schema exists, its version is detected by querying .version() -function under schema. If the function does not exists, there -is some heiristics built in to differentiate between 2.1.4 and -2.1.5 version of ther schemas. +function under schema. If the function does not exists, there +is some heuristics built in to differentiate between 2.1.4 and +2.1.5 version of the schemas. -If detected that version is older that current, it is upgraded +If detected that version is older than current, it is upgraded by applying upgrade scripts in order. == COMMAND LINE SWITCHES == include::common.switches.txt[] +Options specific to skytools_upgrade: + +--all:: + Upgrade all databases. + +--not-really:: + Don't actually do anything. diff --git a/doc/sql-grants.txt b/doc/sql-grants.txt new file mode 100644 index 00000000..f388910e --- /dev/null +++ b/doc/sql-grants.txt @@ -0,0 +1,40 @@ + += SQL permissions (draft) = + +== Setup == + +Currently following no-login roles are created during upgrade: +`pgq_reader`, `pgq_writer`, `pgq_admin`, `londiste_reader`, `londiste_writer`. + +Actual grants are not applied to functions, instead default +`public:execute` grants are kept. New grants can be applied +manually: + +newgrants_<schema>.sql:: + applies new rights, drop old public access + +oldgrants_<schema>.sql:: + restores old rights - public execute privilege to all functions + +== New roles == + +pgq_reader:: + Can consume queues (source-side) + +pgq_writer:: + Can write into queues (source-side / dest-side) + Can use `pgq_node`/`pgq_ext` schema as regular + consumer (dest-side) + +pgq_admin:: + Admin operations on queues, required for CascadedWorker on dest-side. + Member of `pgq_reader` and `pgq_writer`. + +londiste_reader:: + Member of `pgq_reader`, needs additional read access to tables. + (source-side) + +londiste_writer:: + Member of `pgq_admin`, needs additional write access to tables. + (dest-side) + diff --git a/doc/walmgr3.txt b/doc/walmgr3.txt index 5961be07..257ff2db 100644 --- a/doc/walmgr3.txt +++ b/doc/walmgr3.txt @@ -79,6 +79,15 @@ listed below. Provide the connection string to the streaming replication Master (used in Slave). + --add-password='plaintext_file':: + Add password for streaming replication from plain text file to .pgpass. + Additional fields for password file entry will be extracted from primary-conninfo + (used in Slave). + + --remove-password:: + Remove .pgpass entry, which was used for streaming replication + (used in Slave) + == DAEMON OPTIONS == -r, --reload:: @@ -123,6 +132,11 @@ Runs periodic command if configured. This enables to execute arbitrary commands on interval, useful for synchronizing scripts, config files, crontabs etc. +=== synch-standby === + +Enables/disables synchronous streaming replication for given application name(s). +Does additional check before enabling synchronous mode. + == SLAVE COMMANDS == === boot === diff --git a/python/londiste.py b/python/londiste.py index b7bc99e0..5d46bbdc 100755 --- a/python/londiste.py +++ b/python/londiste.py @@ -30,6 +30,7 @@ Replication Administration: seqs show all sequences on provider missing list tables subscriber has not yet attached to resync TBL ... do full copy again + wait-sync wait until all tables are in sync Replication Extra: check compare table structure on both sides @@ -48,7 +49,8 @@ cmd_handlers = ( 'change-provider', 'rename-node', 'status', 'pause', 'resume', 'node-info', 'drop-node', 'takeover'), londiste.LondisteSetup), (('add-table', 'remove-table', 'add-seq', 'remove-seq', 'tables', 'seqs', - 'missing', 'resync', 'check', 'fkeys', 'execute'), londiste.LondisteSetup), + 'missing', 'resync', 'wait-sync', 'wait-root', 'wait-provider', + 'check', 'fkeys', 'execute'), londiste.LondisteSetup), (('show-handlers',), londiste.LondisteSetup), (('worker',), londiste.Replicator), (('compare',), londiste.Comparator), @@ -110,6 +112,8 @@ class Londiste(skytools.DBScript): g = optparse.OptionGroup(p, "options for add") g.add_option("--all", action="store_true", help = "add: include add possible tables") + g.add_option("--wait-sync", action="store_true", + help = "add: wait until all tables are in sync"), g.add_option("--dest-table", help = "add: redirect changes to different table") g.add_option("--expect-sync", action="store_true", dest="expect_sync", @@ -130,6 +134,10 @@ class Londiste(skytools.DBScript): help="add: Custom handler for table") g.add_option("--handler-arg", action="append", help="add: Argument to custom handler") + g.add_option("--find-copy-node", dest="find_copy_node", action="store_true", + help = "add: walk upstream to find node to copy from") + g.add_option("--copy-node", dest="copy_node", + help = "add: use NODE as source for initial COPY") g.add_option("--copy-condition", dest="copy_condition", help = "add: set WHERE expression for copy") g.add_option("--merge-all", action="store_true", diff --git a/python/londiste/exec_attrs.py b/python/londiste/exec_attrs.py new file mode 100644 index 00000000..ce0f8221 --- /dev/null +++ b/python/londiste/exec_attrs.py @@ -0,0 +1,367 @@ +"""Custom parser for EXECUTE attributes. + +The values are parsed from SQL file given to EXECUTE. + +Format rules: + * Only lines starting with meta-comment prefix will be parsed: --*-- + * Empty or regular SQL comment lines are ignored. + * Parsing stops on first SQL statement. + * Meta-line format: "--*-- Key: value1, value2" + * If line ends with ',' then next line is taken as continuation. + +Supported keys: + * Local-Table: + * Local-Sequence: + * Local-Destination: + + * Need-Table + * Need-Sequence + * Need-Function + * Need-Schema + * Need-View + +Sample file:: + --*-- Local-Sequence: myseq + --*-- + --*-- Local-Table: table1, + --*-- table2, table3 + --*-- + +Tests: + +>>> a = ExecAttrs() +>>> a.add_value("Local-Table", "mytable") +>>> a.add_value("Local-Sequence", "seq1") +>>> a.add_value("Local-Sequence", "seq2") +>>> a.to_urlenc() +'local-table=mytable&local-sequence=seq1%2Cseq2' +>>> a.add_value("Local-Destination", "mytable-longname-more1") +>>> a.add_value("Local-Destination", "mytable-longname-more2") +>>> a.add_value("Local-Destination", "mytable-longname-more3") +>>> a.add_value("Local-Destination", "mytable-longname-more4") +>>> a.add_value("Local-Destination", "mytable-longname-more5") +>>> a.add_value("Local-Destination", "mytable-longname-more6") +>>> a.add_value("Local-Destination", "mytable-longname-more7") +>>> print a.to_sql() +--*-- Local-Table: mytable +--*-- Local-Sequence: seq1, seq2 +--*-- Local-Destination: mytable-longname-more1, mytable-longname-more2, +--*-- mytable-longname-more3, mytable-longname-more4, mytable-longname-more5, +--*-- mytable-longname-more6, mytable-longname-more7 +>>> a = ExecAttrs(sql = ''' +... +... -- +... +... --*-- Local-Table: foo , +... -- +... --*-- bar , +... --*-- +... --*-- zoo +... --*-- +... --*-- Local-Sequence: goo +... --*-- +... -- +... +... create fooza; +... ''') +>>> print a.to_sql() +--*-- Local-Table: foo, bar, zoo +--*-- Local-Sequence: goo +>>> seqs = {'public.goo': 'public.goo'} +>>> tables = {} +>>> tables['public.foo'] = 'public.foo' +>>> tables['public.bar'] = 'other.Bar' +>>> tables['public.zoo'] = 'Other.Foo' +>>> a.need_execute(None, tables, seqs) +True +>>> a.need_execute(None, [], []) +False +>>> sql = '''alter table @foo@; +... alter table @bar@; +... alter table @zoo@;''' +>>> print a.process_sql(sql, tables, seqs) +alter table public.foo; +alter table other."Bar"; +alter table "Other"."Foo"; +""" + +import skytools + +META_PREFIX = "--*--" + +class Matcher: + nice_name = None + def match(self, objname, curs, tables, seqs): + pass + def get_key(self): + return self.nice_name.lower() + def local_rename(self): + return False + +class LocalTable(Matcher): + nice_name = "Local-Table" + def match(self, objname, curs, tables, seqs): + return objname in tables + def local_rename(self): + return True + +class LocalSequence(Matcher): + nice_name = "Local-Sequence" + def match(self, objname, curs, tables, seqs): + return objname in seqs + def local_rename(self): + return True + +class LocalDestination(Matcher): + nice_name = "Local-Destination" + def match(self, objname, curs, tables, seqs): + if objname not in tables: + return False + dest_name = tables[objname] + return skytools.exists_table(curs, dest_name) + def local_rename(self): + return True + +class NeedTable(Matcher): + nice_name = "Need-Table" + def match(self, objname, curs, tables, seqs): + return skytools.exists_table(curs, objname) + +class NeedSequence(Matcher): + nice_name = "Need-Sequence" + def match(self, objname, curs, tables, seqs): + return skytools.exists_sequence(curs, objname) + +class NeedSchema(Matcher): + nice_name = "Need-Schema" + def match(self, objname, curs, tables, seqs): + return skytools.exists_schema(curs, objname) + +class NeedFunction(Matcher): + nice_name = "Need-Function" + def match(self, objname, curs, tables, seqs): + nargs = 0 + pos1 = objname.find('(') + if pos1 > 0: + pos2 = objname.find(')') + if pos2 > 0: + s = objname[pos1+1 : pos2] + objname = objname[:pos1] + nargs = int(s) + return skytools.exists_function(curs, objname, nargs) + +class NeedView(Matcher): + nice_name = "Need-View" + def match(self, objname, curs, tables, seqs): + return skytools.exists_view(curs, objname) + +META_SPLITLINE = 70 + +# list of matches, in order they need to be probed +META_MATCHERS = [ + LocalTable(), LocalSequence(), LocalDestination(), + NeedTable(), NeedSequence(), NeedFunction(), + NeedSchema(), NeedView() +] + +# key to nice key +META_KEYS = {} +for m in META_MATCHERS: + k = m.nice_name.lower() + META_KEYS[k] = m + +class ExecAttrsException(skytools.UsageError): + """Some parsing problem.""" + +class ExecAttrs: + """Container and parser for EXECUTE attributes.""" + def __init__(self, sql=None, urlenc=None): + """Create container and parse either sql or urlenc string.""" + + self.attrs = {} + if sql and urlenc: + raise Exception("Both sql and urlenc set.") + if urlenc: + self.parse_urlenc(urlenc) + elif sql: + self.parse_sql(sql) + + def add_value(self, k, v): + """Add single value to key.""" + + xk = k.lower().strip() + if xk not in META_KEYS: + raise ExecAttrsException("Invalid key: %s" % k) + if xk not in self.attrs: + self.attrs[xk] = [] + + xv = v.strip() + self.attrs[xk].append(xv) + + def to_urlenc(self): + """Convert container to urlencoded string.""" + sdict = {} + for k, v in self.attrs.items(): + sdict[k] = ','.join(v) + return skytools.db_urlencode(sdict) + + def parse_urlenc(self, ustr): + """Parse urlencoded string adding values to current container.""" + sdict = skytools.db_urldecode(ustr) + for k, v in sdict.items(): + for v1 in v.split(','): + self.add_value(k, v1) + + def to_sql(self): + """Convert container to SQL meta-comments.""" + lines = [] + for m in META_MATCHERS: + k = m.get_key() + if k not in self.attrs: + continue + vlist = self.attrs[k] + ln = "%s %s: " % (META_PREFIX, m.nice_name) + start = 0 + for nr, v in enumerate(vlist): + if nr > start: + ln = ln + ", " + v + else: + ln = ln + v + + if len(ln) >= META_SPLITLINE and nr < len(vlist) - 1: + ln += ',' + lines.append(ln) + ln = META_PREFIX + " " + start = nr + 1 + lines.append(ln) + return '\n'.join(lines) + + def parse_sql(self, sql): + """Parse SQL meta-comments.""" + + cur_key = None + cur_continued = False + lineno = 1 + for nr, ln in enumerate(sql.splitlines()): + lineno = nr+1 + + # skip empty lines + ln = ln.strip() + if not ln: + continue + + # stop at non-comment + if ln[:2] != '--': + break + + # parse only meta-comments + if ln[:len(META_PREFIX)] != META_PREFIX: + continue + + # cut prefix, skip empty comments + ln = ln[len(META_PREFIX):].strip() + if not ln: + continue + + # continuation of previous key + if cur_continued: + # collect values + for v in ln.split(','): + v = v.strip() + if not v: + continue + self.add_value(cur_key, v) + + # does this key continue? + if ln[-1] != ',': + cur_key = None + cur_continued = False + + # go to next line + continue + + # parse key + pos = ln.find(':') + if pos < 0: + continue + k = ln[:pos].strip() + + # collect values + for v in ln[pos+1:].split(','): + v = v.strip() + if not v: + continue + self.add_value(k, v) + + # check if current key values will continue + if ln[-1] == ',': + cur_key = k + cur_continued = True + else: + cur_key = None + cur_continued = False + + def need_execute(self, curs, local_tables, local_seqs): + # if no attrs, always execute + if not self.attrs: + return True + + matched = 0 + missed = 0 + good_list = [] + miss_list = [] + for m in META_MATCHERS: + k = m.get_key() + if k not in self.attrs: + continue + for v in self.attrs[k]: + fqname = skytools.fq_name(v) + if m.match(fqname, curs, local_tables, local_seqs): + matched += 1 + good_list.append(v) + else: + missed += 1 + miss_list.append(v) + # should be drop out early? + if matched > 0 and missed == 0: + return True + elif missed > 0 and matched == 0: + return False + elif missed == 0 and matched == 0: + # should not happen, but lets restore old behaviour? + return True + else: + raise Exception("SQL only partially matches local setup: matches=%r misses=%r" % (good_list, miss_list)) + + def get_attr(self, k): + k = k.lower().strip() + if k not in META_KEYS: + raise Exception("Bug: invalid key requested: " + k) + if k not in self.attrs: + return [] + return self.attrs[k] + + def process_sql(self, sql, local_tables, local_seqs): + """Replace replacement tags in sql with actual local names.""" + for k, vlist in self.attrs.items(): + m = META_KEYS[k] + if not m.local_rename(): + continue + for v in vlist: + repname = '@%s@' % v + fqname = skytools.fq_name(v) + if fqname in local_tables: + localname = local_tables[fqname] + elif fqname in local_seqs: + localname = local_seqs[fqname] + else: + # should not happen + raise Exception("bug: lost table: "+v) + qdest = skytools.quote_fqident(localname) + sql = sql.replace(repname, qdest) + return sql + +if __name__ == "__main__": + import doctest + doctest.testmod() + diff --git a/python/londiste/handler.py b/python/londiste/handler.py index ecce1cba..87a16b62 100644 --- a/python/londiste/handler.py +++ b/python/londiste/handler.py @@ -34,7 +34,7 @@ import logging import skytools import londiste.handlers -__all__ = ['RowCache', 'BaseHandler', 'build_handler', +__all__ = ['RowCache', 'BaseHandler', 'build_handler', 'EncodingValidator', 'load_handler_modules', 'create_handler_string'] class RowCache: @@ -120,7 +120,12 @@ class BaseHandler: return True class TableHandler(BaseHandler): - """Default Londiste handler, inserts events into tables with plain SQL.""" + """Default Londiste handler, inserts events into tables with plain SQL. + + Parameters: + encoding=ENC - Validate and fix incoming data from encoding. + Only 'utf8' is supported at the moment. + """ handler_name = 'londiste' sql_command = { @@ -129,16 +134,27 @@ class TableHandler(BaseHandler): 'D': "delete from only %s where %s;", } + allow_sql_event = 1 + + def __init__(self, table_name, args, dest_table): + BaseHandler.__init__(self, table_name, args, dest_table) + + enc = args.get('encoding') + if enc: + self.enc = EncodingValidator(self.log, enc) + else: + self.enc = None + def process_event(self, ev, sql_queue_func, arg): + row = self.parse_row_data(ev) if len(ev.type) == 1: # sql event fqname = self.fq_dest_table fmt = self.sql_command[ev.type] - sql = fmt % (fqname, ev.data) + sql = fmt % (fqname, row) else: # urlenc event pklist = ev.type[2:].split(',') - row = skytools.db_urldecode(ev.data) op = ev.type[0] tbl = self.dest_table if op == 'I': @@ -150,6 +166,108 @@ class TableHandler(BaseHandler): sql_queue_func(sql, arg) + def parse_row_data(self, ev): + """Extract row data from event, with optional encoding fixes. + + Returns either string (sql event) or dict (urlenc event). + """ + + if len(ev.type) == 1: + if not self.allow_sql_event: + raise Exception('SQL events not suppoted by this handler') + if self.enc: + return self.enc.validate_string(ev.data, self.table_name) + return ev.data + else: + row = skytools.db_urldecode(ev.data) + if self.enc: + return self.enc.validate_dict(row, self.table_name) + return row + + def real_copy(self, src_tablename, src_curs, dst_curs, column_list, cond_list): + """do actual table copy and return tuple with number of bytes and rows + copyed + """ + + condition = ' and '.join(cond_list) + + if self.enc: + def _write_hook(obj, data): + return self.enc.validate_copy(data, column_list, src_tablename) + else: + _write_hook = None + + return skytools.full_copy(src_tablename, src_curs, dst_curs, + column_list, condition, + dst_tablename = self.dest_table, + write_hook = _write_hook) + + +#------------------------------------------------------------------------------ +# ENCODING VALIDATOR +#------------------------------------------------------------------------------ + +class EncodingValidator: + def __init__(self, log, encoding = 'utf-8', replacement = u'\ufffd'): + """validates the correctness of given encoding. when data contains + illegal symbols, replaces them with <replacement> and logs the + incident + """ + + if encoding.lower() not in ('utf8', 'utf-8'): + raise Exception('only utf8 supported') + + self.encoding = encoding + self.log = log + self.columns = None + self.error_count = 0 + + def show_error(self, col, val, pfx, unew): + if pfx: + col = pfx + '.' + col + self.log.info('Fixed invalid UTF8 in column <%s>', col) + self.log.debug('<%s>: old=%r new=%r', col, val, unew) + + def validate_copy(self, data, columns, pfx=""): + """Validate tab-separated fields""" + + ok, _unicode = skytools.safe_utf8_decode(data) + if ok: + return data + + # log error + vals = data.split('\t') + for i, v in enumerate(vals): + ok, tmp = skytools.safe_utf8_decode(v) + if not ok: + self.show_error(columns[i], v, pfx, tmp) + + # return safe data + return _unicode.encode('utf8') + + def validate_dict(self, data, pfx=""): + """validates data in dict""" + for k, v in data.items(): + if v: + ok, u = skytools.safe_utf8_decode(v) + if not ok: + self.show_error(k, v, pfx, u) + data[k] = u.encode('utf8') + return data + + def validate_string(self, value, pfx=""): + """validate string""" + ok, u = skytools.safe_utf8_decode(value) + if ok: + return value + _pfx = pfx and (pfx+': ') or "" + self.log.info('%sFixed invalid UTF8 in string <%s>', _pfx, value) + return u.encode('utf8') + +# +# handler management +# + _handler_map = { 'londiste': TableHandler, } diff --git a/python/londiste/handlers/dispatch.py b/python/londiste/handlers/dispatch.py index 6505736a..dcfede52 100644 --- a/python/londiste/handlers/dispatch.py +++ b/python/londiste/handlers/dispatch.py @@ -86,8 +86,8 @@ part_template: row_mode: how rows are applied to target table * plain - each event creates SQL statement to run (default) - * keep_all - change updates to DELETE + INSERT - * keep_latest - change updates to inserts, ignore deletes + * keep_latest - change updates to DELETE + INSERT, ignore deletes + * keep_all - change updates to inserts, ignore deletes event_types: event types to process, separated by comma. Other events are ignored. @@ -149,7 +149,7 @@ import datetime import codecs import re import skytools -from londiste.handler import BaseHandler +from londiste.handler import BaseHandler, EncodingValidator from skytools import quote_ident, quote_fqident, UsageError from skytools.dbstruct import * from skytools.utf8 import safe_utf8_decode @@ -180,9 +180,9 @@ METHODS = [METH_CORRECT, METH_DELETE, METH_MERGED, METH_INSERT] EVENT_TYPES = ['I', 'U', 'D'] -PART_FUNC = 'public.create_partition' -PART_FUNC_ARGS = ['parent', 'part', 'pkeys', 'part_field', 'part_time', - 'period'] +PART_FUNC_OLD = 'public.create_partition' +PART_FUNC_NEW = 'londiste.create_partition' +PART_FUNC_ARGS = ['parent', 'part', 'pkeys', 'part_field', 'part_time', 'period'] @@ -601,54 +601,6 @@ ROW_HANDLERS = {'plain': RowHandler, 'keep_latest': KeepLatestRowHandler} - -#------------------------------------------------------------------------------ -# ENCODING VALIDATOR -#------------------------------------------------------------------------------ - -class EncodingValidator: - def __init__(self, log, encoding = 'utf-8', replacement = u'\ufffd'): - """validates the correctness of given encoding. when data contains - illegal symbols, replaces them with <replacement> and logs the - incident""" - self.log = log - self.columns = None - self.error_count = 0 - - def show_error(self, col, val, pfx, unew): - if pfx: - col = pfx + '.' + col - self.log.info('Fixed invalid UTF8 in column <%s>', col) - self.log.debug('<%s>: old=%r new=%r', col, val, unew) - - def validate_copy(self, data, columns, pfx=""): - """Validate tab-separated fields""" - - ok, _unicode = safe_utf8_decode(data) - if ok: - return data - - # log error - vals = data.split('\t') - for i, v in enumerate(vals): - ok, tmp = safe_utf8_decode(v) - if not ok: - self.show_error(columns[i], v, pfx, tmp) - - # return safe data - return _unicode.encode('utf8') - - def validate_dict(self, data, pfx=""): - """validates data in dict""" - for k, v in data.items(): - if v: - ok, u = safe_utf8_decode(v) - if not ok: - self.show_error(k, v, pfx, u) - data[k] = u.encode('utf8') - return data - - #------------------------------------------------------------------------------ # DISPATCHER #------------------------------------------------------------------------------ @@ -700,7 +652,7 @@ class Dispatcher(BaseHandler): conf.part_template = self.args.get('part_template') conf.pre_part = self.args.get('pre_part') conf.post_part = self.args.get('post_part') - conf.part_func = self.args.get('part_func', PART_FUNC) + conf.part_func = self.args.get('part_func', PART_FUNC_NEW) # set row mode and event types to process conf.row_mode = self.get_arg('row_mode', ROW_MODES) event_types = self.args.get('event_types', '*') @@ -879,19 +831,38 @@ class Dispatcher(BaseHandler): curs.execute(sql) return True return False + exec_with_vals(self.conf.pre_part) + if not exec_with_vals(self.conf.part_template): self.log.debug('part_template not provided, using part func') # if part func exists call it with val arguments pfargs = ', '.join('%%(%s)s' % arg for arg in PART_FUNC_ARGS) + + # set up configured function pfcall = 'select %s(%s)' % (self.conf.part_func, pfargs) - if skytools.exists_function(curs, self.conf.part_func, len(PART_FUNC_ARGS)): + have_func = skytools.exists_function(curs, self.conf.part_func, len(PART_FUNC_ARGS)) + + # backwards compat + if not have_func and self.conf.part_func == PART_FUNC_NEW: + pfcall = 'select %s(%s)' % (PART_FUNC_OLD, pfargs) + have_func = skytools.exists_function(curs, PART_FUNC_OLD, len(PART_FUNC_ARGS)) + + if have_func: self.log.debug('check_part.exec: func:%s, args: %s' % (pfcall, vals)) curs.execute(pfcall, vals) else: + # + # Otherwise crete simple clone. + # + # FixMe: differences from create_partitions(): + # - check constraints + # - inheritance + # self.log.debug('part func %s not found, cloning table' % self.conf.part_func) struct = TableStruct(curs, self.dest_table) struct.create(curs, T_ALL, dst) + exec_with_vals(self.conf.post_part) self.log.info("Created table: %s" % dst) diff --git a/python/londiste/handlers/part.py b/python/londiste/handlers/part.py index 6e644027..8f0eb378 100644 --- a/python/londiste/handlers/part.py +++ b/python/londiste/handlers/part.py @@ -1,5 +1,24 @@ -""" -Experimental event filtering by hash. +"""Event filtering by hash, for partitioned databases. + +Parameters: + key=COLUMN: column name to use for hashing + hashfunc=FUNCNAME: function to use for hashing. (default: partconf.get_hash_raw) + hashexpr=EXPR: full expression to use for hashing (deprecated) + encoding=ENC: validate and fix incoming data (only utf8 supported atm) + +On root node: +* Hash of key field will be added to ev_extra3. + This is implemented by adding additional trigger argument: + + ev_extra3='hash='||partconf.get_hash_raw(key_column) + +On branch/leaf node: +* On COPY time, the SELECT on provider side gets filtered by hash. +* On replay time, the events gets filtered by looking at hash in ev_extra3. + +Local config: +* Local hash value and mask are loaded from partconf.conf table. + """ import skytools @@ -8,16 +27,29 @@ from londiste.handler import TableHandler __all__ = ['PartHandler'] class PartHandler(TableHandler): + __doc__ = __doc__ handler_name = 'part' + DEFAULT_HASHFUNC = "partconf.get_hash_raw" + DEFAULT_HASHEXPR = "%s(%s)" + def __init__(self, table_name, args, dest_table): TableHandler.__init__(self, table_name, args, dest_table) self.max_part = None # max part number self.local_part = None # part number of local node - self.key = args.get('key') + + # primary key columns + self.key = args.get('key') if self.key is None: raise Exception('Specify key field as key agument') + # hash function & full expression + hashfunc = args.get('hashfunc', self.DEFAULT_HASHFUNC) + self.hashexpr = self.DEFAULT_HASHEXPR % ( + skytools.quote_fqident(hashfunc), + skytools.quote_ident(self.key)) + self.hashexpr = args.get('hashexpr', self.hashexpr) + def reset(self): """Forget config info.""" self.max_part = None @@ -27,7 +59,7 @@ class PartHandler(TableHandler): def add(self, trigger_arg_list): """Let trigger put hash into extra3""" - arg = "ev_extra3='hash='||hashtext(%s)" % skytools.quote_ident(self.key) + arg = "ev_extra3='hash='||%s" % self.hashexpr trigger_arg_list.append(arg) TableHandler.add(self, trigger_arg_list) @@ -52,8 +84,7 @@ class PartHandler(TableHandler): def real_copy(self, tablename, src_curs, dst_curs, column_list, cond_list): """Copy only slots needed locally.""" self.load_part_info(dst_curs) - fn = 'hashtext(%s)' % skytools.quote_ident(self.key) - w = "%s & %d = %d" % (fn, self.max_part, self.local_part) + w = "(%s & %d) = %d" % (self.hashexpr, self.max_part, self.local_part) self.log.debug('part: copy_condition=%s' % w) cond_list.append(w) diff --git a/python/londiste/handlers/qtable.py b/python/londiste/handlers/qtable.py index cd8cb03d..b93543e9 100644 --- a/python/londiste/handlers/qtable.py +++ b/python/londiste/handlers/qtable.py @@ -20,7 +20,11 @@ __all__ = ['QueueTableHandler', 'QueueSplitterHandler'] class QueueTableHandler(BaseHandler): - """Queue table handler. Do nothing""" + """Queue table handler. Do nothing. + + Trigger: before-insert, skip trigger. + Event-processing: do nothing. + """ handler_name = 'qtable' def add(self, trigger_arg_list): @@ -37,6 +41,11 @@ class QueueTableHandler(BaseHandler): return False class QueueSplitterHandler(BaseHandler): + """Send events for one table to another queue. + + Parameters: + queue=QUEUE Queue name. + """ handler_name = 'qsplitter' def __init__(self, table_name, args, dest_table): diff --git a/python/londiste/handlers/vtable.py b/python/londiste/handlers/vtable.py index 2f5fd551..19e5df0f 100644 --- a/python/londiste/handlers/vtable.py +++ b/python/londiste/handlers/vtable.py @@ -8,6 +8,7 @@ __all__ = ['VirtualTableHandler', 'FakeLocalHandler'] class VirtualTableHandler(BaseHandler): """Virtual Table. + Hack to get local=t for a table, but without processing any events. """ handler_name = 'vtable' diff --git a/python/londiste/playback.py b/python/londiste/playback.py index 03a45665..e5fc83a7 100644 --- a/python/londiste/playback.py +++ b/python/londiste/playback.py @@ -8,6 +8,7 @@ import skytools from pgq.cascade.worker import CascadedWorker from londiste.handler import * +from londiste.exec_attrs import ExecAttrs __all__ = ['Replicator', 'TableState', 'TABLE_MISSING', 'TABLE_IN_COPY', 'TABLE_CATCHING_UP', @@ -472,13 +473,18 @@ class Replicator(CascadedWorker): pmap = self.get_state_map(src_db.cursor()) src_db.commit() for t in self.get_tables_in_state(TABLE_MISSING): - if t.name not in pmap: - self.log.warning("Table %s not available on provider" % t.name) - continue - pt = pmap[t.name] - if pt.state != TABLE_OK: # or pt.custom_snapshot: # FIXME: does snapsnot matter? - self.log.info("Table %s not OK on provider, waiting" % t.name) - continue + if 'copy_node' in t.table_attrs: + # should we go and check this node? + pass + else: + # regular provider is used + if t.name not in pmap: + self.log.warning("Table %s not available on provider" % t.name) + continue + pt = pmap[t.name] + if pt.state != TABLE_OK: # or pt.custom_snapshot: # FIXME: does snapsnot matter? + self.log.info("Table %s not OK on provider, waiting" % t.name) + continue # dont allow more copies than configured if npossible == 0: @@ -634,10 +640,13 @@ class Replicator(CascadedWorker): return fqname = skytools.quote_fqident(t.dest_table) - if dst_curs.connection.server_version >= 80400: - sql = "TRUNCATE ONLY %s;" % fqname - else: - sql = "TRUNCATE %s;" % fqname + + # + # Always use CASCADE, because without it the + # operation cannot work with FKeys, on both + # slave and master. + # + sql = "TRUNCATE %s CASCADE;" % fqname self.flush_sql(dst_curs) dst_curs.execute(sql) @@ -650,20 +659,40 @@ class Replicator(CascadedWorker): # parse event fname = ev.extra1 + s_attrs = ev.extra2 + exec_attrs = ExecAttrs(urlenc = s_attrs) sql = ev.data # fixme: curs? pgver = dst_curs.connection.server_version if pgver >= 80300: dst_curs.execute("set local session_replication_role = 'local'") - q = "select * from londiste.execute_start(%s, %s, %s, false)" - res = self.exec_cmd(dst_curs, q, [self.queue_name, fname, sql], commit = False) + + seq_map = {} + q = "select seq_name, local from londiste.get_seq_list(%s) where local" + dst_curs.execute(q, [self.queue_name]) + for row in dst_curs.fetchall(): + seq_map[row['seq_name']] = row['seq_name'] + + tbl_map = {} + for tbl, t in self.table_map.items(): + tbl_map[t.name] = t.dest_table + + q = "select * from londiste.execute_start(%s, %s, %s, false, %s)" + res = self.exec_cmd(dst_curs, q, [self.queue_name, fname, sql, s_attrs], commit = False) ret = res[0]['ret_code'] - if ret >= 300: - self.log.warning("Skipping execution of '%s'", fname) + if ret > 200: + self.log.info("Skipping execution of '%s'", fname) return - for stmt in skytools.parse_statements(sql): - dst_curs.execute(stmt) + + if exec_attrs.need_execute(dst_curs, tbl_map, seq_map): + self.log.info("%s: executing sql") + xsql = exec_attrs.process_sql(sql, tbl_map, seq_map) + for stmt in skytools.parse_statements(xsql): + dst_curs.execute(stmt) + else: + self.log.info("%s: execution not needed on this node") + q = "select * from londiste.execute_finish(%s, %s)" self.exec_cmd(dst_curs, q, [self.queue_name, fname], commit = False) if pgver >= 80300: diff --git a/python/londiste/setup.py b/python/londiste/setup.py index bb4b152f..b8ca4a0c 100644 --- a/python/londiste/setup.py +++ b/python/londiste/setup.py @@ -6,6 +6,7 @@ import sys, os, re, skytools from pgq.cascade.admin import CascadeAdmin +from londiste.exec_attrs import ExecAttrs import londiste.handler @@ -45,12 +46,18 @@ class LondisteSetup(CascadeAdmin): help = "no copy needed", default=False) p.add_option("--skip-truncate", action="store_true", dest="skip_truncate", help = "dont delete old data", default=False) + p.add_option("--find-copy-node", action="store_true", dest="find_copy_node", + help = "add: find table source for copy by walking upwards") + p.add_option("--copy-node", dest="copy_node", + help = "add: use NODE as source for initial copy") p.add_option("--copy-condition", dest="copy_condition", help = "copy: where expression") p.add_option("--force", action="store_true", help="force", default=False) p.add_option("--all", action="store_true", help="include all tables", default=False) + p.add_option("--wait-sync", action="store_true", + help = "add: wait until all tables are in sync"), p.add_option("--create", action="store_true", help="create, minimal", default=False) p.add_option("--create-full", action="store_true", @@ -130,8 +137,17 @@ class LondisteSetup(CascadeAdmin): needs_tbl = self.handler_needs_table() args = self.expand_arg_list(dst_db, 'r', False, args, needs_tbl) + # search for usable copy node if requested + if (self.options.find_copy_node + and not self.is_root() + and needs_tbl): + src_db = self.find_copy_node(dst_db, args) + src_curs = src_db.cursor() + src_tbls = self.fetch_set_tables(src_curs) + src_db.commit() + # dont check for exist/not here (root handling) - if not self.is_root(): + if not self.is_root() and not self.options.expect_sync: problems = False for tbl in args: tbl = skytools.fq_name(tbl) @@ -159,6 +175,11 @@ class LondisteSetup(CascadeAdmin): for tbl in args: self.add_table(src_db, dst_db, tbl, create_flags, src_tbls) + # wait + if self.options.wait_sync: + self.wait_for_sync(dst_db) + + def add_table(self, src_db, dst_db, tbl, create_flags, src_tbls): # use full names tbl = skytools.fq_name(tbl) @@ -181,7 +202,7 @@ class LondisteSetup(CascadeAdmin): src_dest_table = src_tbls[tbl]['dest_table'] if not skytools.exists_table(src_curs, src_dest_table): # table not present on provider - nowhere to get the DDL from - self.log.warning('Table %s missing on provider, skipping' % desc) + self.log.warning('Table %s missing on provider, cannot create, skipping' % desc) return schema = skytools.fq_name_parts(dest_table)[0] if not skytools.exists_schema(dst_curs, schema): @@ -217,6 +238,9 @@ class LondisteSetup(CascadeAdmin): attrs['handler'] = hstr p.add(tgargs) + if self.options.copy_node: + attrs['copy_node'] = self.options.copy_node + if self.options.expect_sync: tgargs.append('expect_sync') @@ -247,6 +271,15 @@ class LondisteSetup(CascadeAdmin): return p.needs_table() return True + def handler_allows_copy(self, table_attrs): + """Decide if table is copyable based on attrs.""" + if not table_attrs: + return True + attrs = skytools.db_urldecode(table_attrs) + hstr = attrs['handler'] + p = londiste.handler.build_handler('unused.string', hstr, None) + return p.needs_table() + def sync_table_list(self, dst_curs, src_tbls, dst_tbls): for tbl in src_tbls.keys(): q = "select * from londiste.global_add_table(%s, %s)" @@ -410,6 +443,19 @@ class LondisteSetup(CascadeAdmin): db = self.get_database('db') curs = db.cursor() + tables = self.fetch_set_tables(curs) + seqs = self.fetch_seqs(curs) + + # generate local maps + local_tables = {} + local_seqs = {} + for tbl in tables.values(): + if tbl['local']: + local_tables[tbl['table_name']] = tbl['dest_table'] + for seq in seqs.values(): + if seq['local']: + local_seqs[seq['seq_name']] = seq['seq_name'] + # set replica role for EXECUTE transaction if db.server_version >= 80300: curs.execute("set local session_replication_role = 'local'") @@ -417,19 +463,90 @@ class LondisteSetup(CascadeAdmin): for fn in files: fname = os.path.basename(fn) sql = open(fn, "r").read() - q = "select * from londiste.execute_start(%s, %s, %s, true)" - res = self.exec_cmd(db, q, [self.queue_name, fname, sql], commit = False) + attrs = ExecAttrs(sql = sql) + q = "select * from londiste.execute_start(%s, %s, %s, true, %s)" + res = self.exec_cmd(db, q, [self.queue_name, fname, sql, attrs.to_urlenc()], commit = False) ret = res[0]['ret_code'] if ret >= 300: self.log.warning("Skipping execution of '%s'" % fname) continue - for stmt in skytools.parse_statements(sql): - curs.execute(stmt) + if attrs.need_execute(curs, local_tables, local_seqs): + self.log.info("%s: executing sql", fname) + xsql = attrs.process_sql(sql, local_tables, local_seqs) + for stmt in skytools.parse_statements(xsql): + curs.execute(stmt) + else: + self.log.info("%s: This SQL does not need to run on this node.", fname) q = "select * from londiste.execute_finish(%s, %s)" self.exec_cmd(db, q, [self.queue_name, fname], commit = False) db.commit() + def find_copy_node(self, dst_db, args): + src_db = self.get_provider_db() + + need = {} + for t in args: + need[t] = 1 + + while 1: + src_curs = src_db.cursor() + + q = "select * from pgq_node.get_node_info(%s)" + src_curs.execute(q, [self.queue_name]) + info = src_curs.fetchone() + if info['ret_code'] >= 400: + raise UsageError("Node does not exists") + + self.log.info("Checking if %s can be used for copy", info['node_name']) + + q = "select table_name, local, table_attrs from londiste.get_table_list(%s)" + src_curs.execute(q, [self.queue_name]) + got = {} + for row in src_curs.fetchall(): + tbl = row['table_name'] + if tbl not in need: + continue + if not row['local']: + self.log.debug("Problem: %s is not local", tbl) + continue + if not self.handler_allows_copy(row['table_attrs']): + self.log.debug("Problem: %s handler does not store data [%s]", tbl, row['table_attrs']) + continue + self.log.debug("Good: %s is usable", tbl) + got[row['table_name']] = 1 + + ok = 1 + for t in args: + if t not in got: + self.log.info("Node %s does not have all tables", info['node_name']) + ok = 0 + break + + if ok: + self.options.copy_node = info['node_name'] + self.log.info("Node %s seems good source, using it", info['node_name']) + break + + if info['node_type'] == 'root': + raise skytools.UsageError("Found root and no source found") + + self.close_database('provider_db') + src_db = self.get_database('provider_db', connstr = info['provider_location']) + + return src_db + def get_provider_db(self): + + # use custom node for copy + if self.options.copy_node: + source_node = self.options.copy_node + m = self.queue_info.get_member(source_node) + if not m: + raise skytools.UsageError("Cannot find node <%s>", source_node) + if source_node == self.local_node: + raise skytools.UsageError("Cannot use itself as provider") + self.provider_location = m.location + if not self.provider_location: db = self.get_database('db') q = 'select * from pgq_node.get_node_info(%s)' @@ -535,3 +652,52 @@ class LondisteSetup(CascadeAdmin): n_half += 1 node.add_info_line('Tables: %d/%d/%d' % (n_ok, n_half, n_ign)) + def cmd_wait_sync(self): + self.load_local_info() + + dst_db = self.get_database('db') + self.wait_for_sync(dst_db) + + def wait_for_sync(self, dst_db): + self.log.info("Waiting until all tables are in sync") + q = "select table_name, merge_state, local"\ + " from londiste.get_table_list(%s) where local" + dst_curs = dst_db.cursor() + + partial = {} + done_pos = 1 + startup_info = 0 + while 1: + dst_curs.execute(q, [self.queue_name]) + rows = dst_curs.fetchall() + dst_db.commit() + + cur_count = 0 + done_list = [] + for row in rows: + if not row['local']: + continue + tbl = row['table_name'] + if row['merge_state'] != 'ok': + partial[tbl] = 0 + cur_count += 1 + elif tbl in partial: + if partial[tbl] == 0: + partial[tbl] = 1 + done_list.append(tbl) + + if not startup_info: + self.log.info("%d table(s) to copy", len(partial)) + startup_info = 1 + + for done in done_list: + self.log.info("%s: finished (%d/%d)", done, done_pos, len(partial)) + done_pos += 1 + + if cur_count == 0: + break + + self.sleep(2) + + self.log.info("All done") + diff --git a/python/londiste/table_copy.py b/python/londiste/table_copy.py index 3c137ae6..65a702fb 100644 --- a/python/londiste/table_copy.py +++ b/python/londiste/table_copy.py @@ -239,14 +239,38 @@ class CopyTable(Replicator): return Replicator.work(self) def register_copy_consumer(self): - # fetch parent consumer state dst_db = self.get_database('db') - q = "select * from pgq_node.get_consumer_state(%s, %s)" - rows = self.exec_cmd(dst_db, q, [ self.queue_name, self.old_consumer_name ]) - state = rows[0] - loc = state['provider_location'] + dst_curs = dst_db.cursor() - self.register_consumer(loc) + # fetch table attrs + q = "select * from londiste.get_table_list(%s) where table_name = %s" + dst_curs.execute(q, [ self.queue_name, self.copy_table_name ]) + rows = dst_curs.fetchall() + attrs = {} + if len(rows) > 0: + v_attrs = rows[0]['table_attrs'] + if v_attrs: + attrs = skytools.db_urldecode(v_attrs) + + # do we have node here? + if 'copy_node' in attrs: + # take node from attrs + source_node = attrs['copy_node'] + q = "select * from pgq_node.get_queue_locations(%s) where node_name = %s" + dst_curs.execute(q, [ self.queue_name, source_node ]) + rows = dst_curs.fetchall() + if len(rows): + source_location = rows[0]['node_location'] + else: + # fetch parent consumer state + q = "select * from pgq_node.get_consumer_state(%s, %s)" + rows = self.exec_cmd(dst_db, q, [ self.queue_name, self.old_consumer_name ]) + state = rows[0] + source_node = state['provider_node'] + source_location = state['provider_location'] + + self.log.info("Using '%s' as source node", source_node) + self.register_consumer(source_location) if __name__ == '__main__': script = CopyTable(sys.argv[1:]) diff --git a/python/pgq/__init__.py b/python/pgq/__init__.py index 638ee372..dc6ece29 100644 --- a/python/pgq/__init__.py +++ b/python/pgq/__init__.py @@ -21,6 +21,7 @@ from pgq.event import * from pgq.consumer import * from pgq.coopconsumer import * from pgq.remoteconsumer import * +from pgq.localconsumer import * from pgq.producer import * from pgq.ticker import * @@ -37,6 +38,7 @@ __all__ = ( pgq.consumer.__all__ + pgq.coopconsumer.__all__ + pgq.remoteconsumer.__all__ + + pgq.localconsumer.__all__ + pgq.cascade.nodeinfo.__all__ + pgq.cascade.admin.__all__ + pgq.cascade.consumer.__all__ + diff --git a/python/pgq/cascade/admin.py b/python/pgq/cascade/admin.py index 517cac21..f8f294c8 100644 --- a/python/pgq/cascade/admin.py +++ b/python/pgq/cascade/admin.py @@ -497,17 +497,20 @@ class CascadeAdmin(skytools.AdminScript): pass try: - # drop node info - db = self.get_node_database(node_name) - q = "select * from pgq_node.drop_node(%s, %s)" - self.exec_cmd(db, q, [self.queue_name, node_name]) - # unregister node location from root node (event will be added to queue) root_db = self.find_root_db() q = "select * from pgq_node.unregister_location(%s, %s)" self.exec_cmd(root_db, q, [self.queue_name, node_name]) except skytools.DBError, d: - self.log.warning("Removal failure: %s", str(d)) + self.log.warning("Unregister from root failed: %s", str(d)) + + try: + # drop node info + db = self.get_node_database(node_name) + q = "select * from pgq_node.drop_node(%s, %s)" + self.exec_cmd(db, q, [self.queue_name, node_name]) + except skytools.DBError, d: + self.log.warning("Local drop failure: %s", str(d)) # brute force removal for n in self.queue_info.member_map.values(): @@ -763,6 +766,63 @@ class CascadeAdmin(skytools.AdminScript): if n.combined_queue: print('Combined Queue: %s (node type: %s)' % (n.combined_queue, n.combined_type)) + def cmd_wait_root(self): + """Wait for next tick from root.""" + + self.load_local_info() + + if self.queue_info.local_node.type == 'root': + self.log.info("Current node is root, no need to wait") + return + + self.log.info("Finding root node") + root_node = self.find_root_node() + self.log.info("Root is %s", root_node) + + dst_db = self.get_database('db') + self.wait_for_node(dst_db, root_node) + + def cmd_wait_provider(self): + """Wait for next tick from provider.""" + + self.load_local_info() + + if self.queue_info.local_node.type == 'root': + self.log.info("Current node is root, no need to wait") + return + + dst_db = self.get_database('db') + node = self.queue_info.local_node.provider_node + self.log.info("Provider is %s", node) + self.wait_for_node(dst_db, node) + + def wait_for_node(self, dst_db, node_name): + """Core logic for waiting.""" + + self.log.info("Fetching last tick for %s", node_name) + node_info = self.load_node_info(node_name) + tick_id = node_info.last_tick + + self.log.info("Waiting for tick > %d", tick_id) + + q = "select * from pgq_node.get_node_info(%s)" + dst_curs = dst_db.cursor() + + while 1: + dst_curs.execute(q, [self.queue_name]) + row = dst_curs.fetchone() + dst_db.commit() + + if row['ret_code'] >= 300: + self.log.warning("Problem: %s", row['ret_code'], row['ret_note']) + return + + if row['worker_last_tick'] > tick_id: + self.log.info("Got tick %d, exiting", row['worker_last_tick']) + break + + self.sleep(2) + # # Shortcuts for operating on nodes. # @@ -814,7 +874,7 @@ class CascadeAdmin(skytools.AdminScript): self.log.warning("ignoring cmd for dead node '%s': %s" % ( node_name, skytools.quote_statement(sql, args))) return None - return self.exec_cmd(db, sql, args, quiet = quiet) + return self.exec_cmd(db, sql, args, quiet = quiet, prefix=node_name) # # Various operation on nodes. diff --git a/python/pgq/cascade/consumer.py b/python/pgq/cascade/consumer.py index cebba94a..3d20ba78 100644 --- a/python/pgq/cascade/consumer.py +++ b/python/pgq/cascade/consumer.py @@ -228,6 +228,8 @@ class CascadedConsumer(Consumer): if self.provider_connstr != loc: self.close_database(PDB) self.provider_connstr = loc + # re-initialize provider connection + db = self.get_provider_db(state); return state diff --git a/python/pgq/cascade/worker.py b/python/pgq/cascade/worker.py index a721eaa0..e53ddfe5 100644 --- a/python/pgq/cascade/worker.py +++ b/python/pgq/cascade/worker.py @@ -222,13 +222,18 @@ class CascadedWorker(CascadedConsumer): if st.sync_watermark: # dont send local watermark upstream wm = self.batch_info['prev_tick_id'] + elif wm > self.batch_info['cur_tick_id']: + # in wait-behind-leaf case, the wm from target can be + # ahead from source queue, use current batch then + wm = self.batch_info['cur_tick_id'] self.log.debug("Publishing local watermark: %d" % wm) src_curs = src_db.cursor() q = "select * from pgq_node.set_subscriber_watermark(%s, %s, %s)" src_curs.execute(q, [self.pgq_queue_name, st.node_name, wm]) + src_db.commit() - # if last part fails, dont repeat it immediately + # if next part fails, dont repeat it immediately self.local_wm_publish_time = t if st.sync_watermark: @@ -331,13 +336,17 @@ class CascadedWorker(CascadedConsumer): """ # merge-leaf on branch should not update tick pos - wst = self._worker_state - if wst.wait_behind: + st = self._worker_state + if st.wait_behind: dst_db.commit() + + # still need to publish wm info + if st.local_wm_publish and self.main_worker: + self.publish_local_wm(src_db, dst_db) + return if self.main_worker: - st = self._worker_state dst_curs = dst_db.cursor() self.flush_events(dst_curs) diff --git a/python/pgq/consumer.py b/python/pgq/consumer.py index 76470b72..77c018c7 100644 --- a/python/pgq/consumer.py +++ b/python/pgq/consumer.py @@ -111,6 +111,9 @@ class Consumer(skytools.DBScript): # override consumer name #consumer_name = %(job_name)s + # filter out only events for specific tables + #table_filter = table1, table2 + # whether to use cursor to fetch events (0 disables) #pgq_lazy_fetch = 300 @@ -198,6 +201,14 @@ class Consumer(skytools.DBScript): self.pgq_min_interval = self.cf.get("pgq_batch_collect_interval", '') or None self.pgq_min_lag = self.cf.get("pgq_keep_lag", '') or None + # filter out specific tables only + tfilt = [] + for t in self.cf.getlist('table_filter', ''): + tfilt.append(skytools.quote_literal(skytools.fq_name(t))) + if len(tfilt) > 0: + expr = "ev_extra1 in (%s)" % ','.join(tfilt) + self.consumer_filter = expr + def startup(self): """Handle commands here. __init__ does not have error logging.""" if self.options.register: diff --git a/python/pgq/localconsumer.py b/python/pgq/localconsumer.py new file mode 100644 index 00000000..55ea15b2 --- /dev/null +++ b/python/pgq/localconsumer.py @@ -0,0 +1,211 @@ + +""" +Consumer that stores last applied position in local file. + +For cases where the consumer cannot use single database for remote tracking. + +""" + +import sys +import os +import errno +import skytools +import pgq + +__all__ = ['LocalConsumer'] + +class LocalConsumer(pgq.Consumer): + """Consumer that applies batches sequentially in second database. + + Requirements: + - Whole batch in one TX. + - Must not use retry queue. + + Features: + - Can detect if several batches are already applied to dest db. + - If some ticks are lost. allows to seek back on queue. + Whether it succeeds, depends on pgq configuration. + + Config options:: + + ## Parameters for LocalConsumer ## + + # file location where last applied tick is tracked + local_tracking_file = ~/state/%(job_name)s.tick + """ + + def reload(self): + super(LocalConsumer, self).reload() + + self.local_tracking_file = self.cf.getfile('local_tracking_file') + + def init_optparse(self, parser = None): + p = super(LocalConsumer, self).init_optparse(parser) + p.add_option("--rewind", action = "store_true", + help = "change queue position according to local tick") + p.add_option("--reset", action = "store_true", + help = "reset local tick based on queue position") + return p + + def startup(self): + if self.options.rewind: + self.rewind() + sys.exit(0) + if self.options.reset: + self.dst_reset() + sys.exit(0) + super(LocalConsumer, self).startup() + + self.check_queue() + + def check_queue(self): + queue_tick = -1 + local_tick = self.load_local_tick() + + db = self.get_database(self.db_name) + curs = db.cursor() + q = "select last_tick from pgq.get_consumer_info(%s, %s)" + curs.execute(q, [self.queue_name, self.consumer_name]) + rows = curs.fetchall() + if len(rows) == 1: + queue_tick = rows[0]['last_tick'] + db.commit() + + if queue_tick < 0: + if local_tick >= 0: + self.log.info("Registering consumer at tick %d", local_tick) + q = "select * from pgq.register_consumer_at(%s, %s, %s)" + curs.execute(q, [self.queue_name, self.consumer_name, local_tick]) + else: + self.log.info("Registering consumer at queue top") + q = "select * from pgq.register_consumer(%s, %s)" + curs.execute(q, [self.queue_name, self.consumer_name]) + elif local_tick < 0: + self.log.info("Local tick missing, storing queueu tick %d", queue_tick) + self.save_local_tick(queue_tick) + elif local_tick > queue_tick: + self.log.warning("Tracking out of sync: queue=%d local=%d. Repositioning on queue. [Database failure?]", + queue_tick, local_tick) + q = "select * from pgq.register_consumer_at(%s, %s, %s)" + curs.execute(q, [self.queue_name, self.consumer_name, local_tick]) + elif local_tick < queue_tick: + self.log.warning("Tracking out of sync: queue=%d local=%d. Rewinding queue. [Lost file data?]", + queue_tick, local_tick) + q = "select * from pgq.register_consumer_at(%s, %s, %s)" + curs.execute(q, [self.queue_name, self.consumer_name, local_tick]) + else: + self.log.info("Ticks match: Queue=%d Local=%d", queue_tick, local_tick) + + def work(self): + if self.work_state < 0: + self.check_queue() + return super(LocalConsumer, self).work() + + def process_batch(self, db, batch_id, event_list): + """Process all events in batch. + """ + + # check if done + if self.is_batch_done(): + return + + # actual work + self.process_local_batch(db, batch_id, event_list) + + # finish work + self.set_batch_done() + + def process_local_batch(self, db, batch_id, event_list): + for ev in event_list: + self.process_local_event(db, batch_id, ev) + + def process_local_event(self, db, batch_id, ev): + raise Exception('process_local_event not implemented') + + def is_batch_done(self): + """Helper function to keep track of last successful batch + in external database. + """ + + local_tick = self.load_local_tick() + + cur_tick = self.batch_info['tick_id'] + prev_tick = self.batch_info['prev_tick_id'] + + if local_tick < 0: + # seems this consumer has not run yet? + return False + + if prev_tick == local_tick: + # on track + return False + + if cur_tick == local_tick: + # current batch is already applied, skip it + return True + + # anything else means problems + raise Exception('Lost position: batch %d..%d, dst has %d' % ( + prev_tick, cur_tick, local_tick)) + + def set_batch_done(self): + """Helper function to set last successful batch + in external database. + """ + tick_id = self.batch_info['tick_id'] + self.save_local_tick(tick_id) + + def register_consumer(self): + new = super(LocalConsumer, self).register_consumer() + if new: # fixme + self.dst_reset() + + def unregister_consumer(self): + """If unregistering, also clean completed tick table on dest.""" + + super(LocalConsumer, self).unregister_consumer() + self.dst_reset() + + def rewind(self): + dst_tick = self.load_local_tick() + if dst_tick >= 0: + src_db = self.get_database(self.db_name) + src_curs = src_db.cursor() + + self.log.info("Rewinding queue to tick local tick %d", dst_tick) + q = "select pgq.register_consumer_at(%s, %s, %s)" + src_curs.execute(q, [self.queue_name, self.consumer_name, dst_tick]) + + src_db.commit() + else: + self.log.error('Cannot rewind, no tick found in local file') + + def dst_reset(self): + self.log.info("Removing local tracking file") + try: + os.remove(self.local_tracking_file) + except: + pass + + def load_local_tick(self): + """Reads stored tick or -1.""" + try: + f = open(self.local_tracking_file, 'r') + buf = f.read() + f.close() + data = buf.strip() + if data: + tick_id = int(data) + else: + tick_id = -1 + return tick_id + except IOError, ex: + if ex.errno == errno.ENOENT: + return -1 + raise + + def save_local_tick(self, tick_id): + """Store tick in local file.""" + data = str(tick_id) + skytools.write_atomic(self.local_tracking_file, data) + diff --git a/python/qadmin.py b/python/qadmin.py index 839b3a35..aed41c56 100755 --- a/python/qadmin.py +++ b/python/qadmin.py @@ -6,20 +6,20 @@ connect [ queue=.. ] [ node=.. ]; install pgq | londiste; + show queue [ <qname | *> ]; create queue <qname>; alter queue <qname | *> set param = , ...; drop queue <qname>; - show queue [ <qname | *> ]; - show table <tbl>; - show sequence <seq>; + show consumer [ <cname | *> [on <qname>] ]; register consumer <consumer> [on <qname> | at <tick_id> | copy <consumer> ]* ; unregister consumer <consumer | *> [from <qname>]; - register subconsumer <subconsumer> for <consumer> [on <qname>]; unregister subconsumer <subconsumer | *> for <consumer> [from <qname>] [close [batch]]; - show consumer [ <cname | *> [on <qname>] ]; + show node [ <node | *> [on <qname>] ]; + show table <tbl>; + show sequence <seq>; Following commands expect default queue: @@ -79,8 +79,6 @@ show cascade; """ -__version__ = '0.1' - cmdline_usage = '''\ Usage: qadmin [switches] @@ -106,6 +104,8 @@ import pkgloader pkgloader.require('skytools', '3.0') import skytools +__version__ = skytools.__version__ + script = None IGNORE_HOSTS = { @@ -446,6 +446,7 @@ w_show = List( Word('node', w_show_node), Word('table', w_show_table), Word('sequence', w_show_seq), + Word('version', w_done), name = "cmd2") w_install = List( @@ -635,6 +636,9 @@ class AdminConsole: cur_queue = None cur_database = None + server_version = None + pgq_version = None + cmd_file = None cmd_str = None @@ -853,7 +857,7 @@ class AdminConsole: self.initial_connstr = " ".join(cstr_list) - def db_connect(self, connstr): + def db_connect(self, connstr, quiet=False): db = skytools.connect_database(connstr) db.set_isolation_level(0) # autocommit @@ -861,17 +865,20 @@ class AdminConsole: curs = db.cursor() curs.execute(q) res = curs.fetchone() - print 'Server version', res[1] self.cur_database = res[0] + self.server_version = res[1] + q = "select pgq.version()" + try: + curs.execute(q) + res = curs.fetchone() + self.pgq_version = res[0] + except psycopg2.ProgrammingError: + self.pgq_version = "<none>" + if not quiet: + print "qadmin (%s, server %s, pgq %s)" % (__version__, self.server_version, self.pgq_version) + #print "Connected to %r" % connstr return db - #print res - #print dir(self.db) - #print dir(self.db.cursor()) - #print self.db.status - #print "connected to", repr(self.initial_connstr) - - def run(self, argv): self.parse_cmdline(argv) @@ -888,7 +895,7 @@ class AdminConsole: cmd_str = open(self.cmd_file, "r").read() try: - self.db = self.db_connect(self.initial_connstr) + self.db = self.db_connect(self.initial_connstr, quiet=True) except psycopg2.Error, d: print str(d).strip() sys.exit(1) @@ -911,6 +918,7 @@ class AdminConsole: except IOError: pass + print "Welcome to qadmin %s (server %s), the PgQ interactive terminal." % (__version__, self.server_version) print "Use 'show help;' to see available commands." while 1: try: @@ -1109,6 +1117,11 @@ class AdminConsole: print "CONNECT" + def cmd_show_version (self, params): + print "qadmin version %s" % __version__ + print "server version %s" % self.server_version + print "pgq version %s" % self.pgq_version + def cmd_install(self, params): pgq_objs = [ skytools.DBLanguage("plpgsql"), @@ -1162,7 +1175,7 @@ class AdminConsole: q = pfx + " from pgq.get_queue_info(%s)" curs.execute(q, [queue]) - display_result(curs, 'Queue: %s' % queue) + display_result(curs, 'Queue "%s":' % queue) def cmd_show_consumer(self, params): """Show consumer status""" @@ -1176,7 +1189,7 @@ class AdminConsole: q = "select * from pgq.get_consumer_info(%s, %s)" curs.execute(q, [q_queue, q_consumer]) - display_result(curs, 'Consumer "%s" on queue "%s"' % (consumer, queue)) + display_result(curs, 'Consumer "%s" on queue "%s":' % (consumer, queue)) def cmd_show_node(self, params): """Show node information.""" @@ -1198,7 +1211,7 @@ class AdminConsole: order by 1,2""" curs.execute(q, [q_node, q_queue]) - display_result(curs, 'Node "%s" on queue "%s"' % (node, queue)) + display_result(curs, 'Node "%s" on queue "%s":' % (node, queue)) def cmd_show_batch(self, params): batch_id = params.get('batch_id') @@ -1223,7 +1236,7 @@ class AdminConsole: q = "select * from pgq.get_batch_events(%s)" curs.execute(q, [batch_id]) - display_result(curs, 'Batch events') + display_result(curs, 'Batch events:') def cmd_register_consumer(self, params): queue = params.get("queue", self.cur_queue) @@ -1389,7 +1402,7 @@ class AdminConsole: q = """select * from londiste.local_show_missing(%s)""" curs.execute(q, [queue]) - display_result(curs, 'Missing objects queue "%s"' % (queue)) + display_result(curs, 'Missing objects on queue "%s":' % (queue)) def cmd_londiste_tables(self, params): """Show local tables.""" @@ -1400,7 +1413,7 @@ class AdminConsole: q = """select * from londiste.get_table_list(%s) where local""" curs.execute(q, [queue]) - display_result(curs, 'Local tables on queue "%s"' % (queue)) + display_result(curs, 'Local tables on queue "%s":' % (queue)) def cmd_londiste_seqs(self, params): """Show local seqs.""" @@ -1411,7 +1424,7 @@ class AdminConsole: q = """select * from londiste.get_seq_list(%s) where local""" curs.execute(q, [queue]) - display_result(curs, 'Sequences on queue "%s"' % (queue)) + display_result(curs, 'Sequences on queue "%s":' % (queue)) def cmd_londiste_add_table(self, params): """Add table.""" @@ -1509,4 +1522,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/python/skytools/__init__.py b/python/skytools/__init__.py index 6bdeb51e..67362996 100644 --- a/python/skytools/__init__.py +++ b/python/skytools/__init__.py @@ -29,8 +29,18 @@ _symbols = { 'T_SEQUENCE': 'skytools.dbstruct:T_SEQUENCE', 'T_TABLE': 'skytools.dbstruct:T_TABLE', 'T_TRIGGER': 'skytools.dbstruct:T_TRIGGER', + # skytools.fileutil + 'signal_pidfile': 'skytools.fileutil:signal_pidfile', + 'write_atomic': 'skytools.fileutil:write_atomic', # skytools.gzlog 'gzip_append': 'skytools.gzlog:gzip_append', + # skytools.natsort + 'natsort': 'skytools.natsort:natsort', + 'natsort_icase': 'skytools.natsort:natsort_icase', + 'natsorted': 'skytools.natsort:natsorted', + 'natsorted_icase': 'skytools.natsort:natsorted_icase', + 'natsort_key': 'skytools.natsort:natsort_key', + 'natsort_key_icase': 'skytools.natsort:natsort_key_icase', # skytools.parsing 'dedent': 'skytools.parsing:dedent', 'hsize_to_bytes': 'skytools.parsing:hsize_to_bytes', @@ -81,7 +91,6 @@ _symbols = { 'BaseScript': 'skytools.scripting:BaseScript', 'daemonize': 'skytools.scripting:daemonize', 'DBScript': 'skytools.scripting:DBScript', - 'signal_pidfile': 'skytools.scripting:signal_pidfile', 'UsageError': 'skytools.scripting:UsageError', # skytools.skylog 'getLogger': 'skytools.skylog:getLogger', @@ -106,6 +115,7 @@ _symbols = { 'exists_table': 'skytools.sqltools:exists_table', 'exists_temp_table': 'skytools.sqltools:exists_temp_table', 'exists_type': 'skytools.sqltools:exists_type', + 'exists_view': 'skytools.sqltools:exists_view', 'fq_name': 'skytools.sqltools:fq_name', 'fq_name_parts': 'skytools.sqltools:fq_name_parts', 'full_copy': 'skytools.sqltools:full_copy', diff --git a/python/skytools/dbservice.py b/python/skytools/dbservice.py index e49576ce..d1ddb666 100755 --- a/python/skytools/dbservice.py +++ b/python/skytools/dbservice.py @@ -92,7 +92,11 @@ def get_record_list(array): """ if array is None: return [] - return map(get_record, skytools.parse_pgarray(array)) + + if isinstance(array, list): + return map(get_record, array) + else: + return map(get_record, skytools.parse_pgarray(array)) def get_record_lists(tbl, field): """ Create dictionary of lists from given list using field as grouping criteria diff --git a/python/skytools/dbstruct.py b/python/skytools/dbstruct.py index 5311889a..e1a7012c 100644 --- a/python/skytools/dbstruct.py +++ b/python/skytools/dbstruct.py @@ -192,10 +192,16 @@ class TRule(TElem): sql = self.defn table = self.table_name else: + idrx = r'''([a-z0-9._]+|"([^"]+|"")+")+''' # fixme: broken / quoting - rx = r"\bTO[ ][a-z0-9._]+[ ]DO[ ]" - pnew = "TO %s DO " % new_table_name - sql = rx_replace(rx, self.defn, pnew) + rx = r"\bTO[ ]" + idrx + rc = re.compile(rx, re.X) + m = rc.search(self.defn) + if not m: + raise Exception('Cannot find table name in rule') + old_tbl = m.group(1) + new_tbl = quote_fqident(new_table_name) + sql = self.defn.replace(old_tbl, new_tbl) table = new_table_name if self.enabled != 'O': # O - rule fires in origin and local modes diff --git a/python/skytools/fileutil.py b/python/skytools/fileutil.py new file mode 100644 index 00000000..a88ce88d --- /dev/null +++ b/python/skytools/fileutil.py @@ -0,0 +1,165 @@ +"""File utilities + +>>> import tempfile, os +>>> pidfn = tempfile.mktemp('.pid') +>>> write_atomic(pidfn, "1") +>>> write_atomic(pidfn, "2") +>>> os.remove(pidfn) +>>> write_atomic(pidfn, "1", '.bak') +>>> write_atomic(pidfn, "2", '.bak') +>>> os.remove(pidfn) +""" + +import sys +import os +import errno + +__all__ = ['write_atomic', 'signal_pidfile'] + +# non-win32 +def write_atomic(fn, data, bakext=None, mode='b'): + """Write file with rename.""" + + if mode not in ['', 'b', 't']: + raise ValueError("unsupported fopen mode") + + # write new data to tmp file + fn2 = fn + '.new' + f = open(fn2, 'w' + mode) + f.write(data) + f.close() + + # link old data to bak file + if bakext: + if bakext.find('/') >= 0: + raise ValueError("invalid bakext") + fnb = fn + bakext + try: + os.unlink(fnb) + except OSError, e: + if e.errno != errno.ENOENT: + raise + try: + os.link(fn, fnb) + except OSError, e: + if e.errno != errno.ENOENT: + raise + + # win32 does not like replace + if sys.platform == 'win32': + try: + os.remove(fn) + except: + pass + + # atomically replace file + os.rename(fn2, fn) + +def signal_pidfile(pidfile, sig): + """Send a signal to process whose ID is located in pidfile. + + Read only first line of pidfile to support multiline + pidfiles like postmaster.pid. + + Returns True is successful, False if pidfile does not exist + or process itself is dead. Any other errors will passed + as exceptions.""" + + ln = '' + try: + f = open(pidfile, 'r') + ln = f.readline().strip() + f.close() + pid = int(ln) + if sig == 0 and sys.platform == 'win32': + return win32_detect_pid(pid) + os.kill(pid, sig) + return True + except IOError, ex: + if ex.errno != errno.ENOENT: + raise + except OSError, ex: + if ex.errno != errno.ESRCH: + raise + except ValueError, ex: + # this leaves slight race when someone is just creating the file, + # but more common case is old empty file. + if not ln: + return False + raise ValueError('Corrupt pidfile: %s' % pidfile) + return False + +def win32_detect_pid(pid): + """Process detection for win32.""" + + # avoid pywin32 dependecy, use ctypes instead + import ctypes + + # win32 constants + PROCESS_QUERY_INFORMATION = 1024 + STILL_ACTIVE = 259 + ERROR_INVALID_PARAMETER = 87 + ERROR_ACCESS_DENIED = 5 + + # Load kernel32.dll + k = ctypes.windll.kernel32 + OpenProcess = k.OpenProcess + OpenProcess.restype = ctypes.c_void_p + + # query pid exit code + h = OpenProcess(PROCESS_QUERY_INFORMATION, 0, pid) + if h == None: + err = k.GetLastError() + if err == ERROR_INVALID_PARAMETER: + return False + if err == ERROR_ACCESS_DENIED: + return True + raise OSError(errno.EFAULT, "Unknown win32error: " + str(err)) + code = ctypes.c_int() + k.GetExitCodeProcess(h, ctypes.byref(code)) + k.CloseHandle(h) + return code.value == STILL_ACTIVE + +def win32_write_atomic(fn, data, bakext=None, mode='b'): + """Write file with rename for win32.""" + + if mode not in ['', 'b', 't']: + raise ValueError("unsupported fopen mode") + + # write new data to tmp file + fn2 = fn + '.new' + f = open(fn2, 'w' + mode) + f.write(data) + f.close() + + # move old data to bak file + if bakext: + if bakext.find('/') >= 0: + raise ValueError("invalid bakext") + fnb = fn + bakext + try: + os.remove(fnb) + except OSError, e: + if e.errno != errno.ENOENT: + raise + try: + os.rename(fn, fnb) + except OSError, e: + if e.errno != errno.ENOENT: + raise + else: + try: + os.remove(fn) + except: + pass + + # replace file + os.rename(fn2, fn) + +if sys.platform == 'win32': + write_atomic = win32_write_atomic + +if __name__ == '__main__': + import doctest + doctest.testmod() + diff --git a/python/skytools/parsing.py b/python/skytools/parsing.py index c7061ed4..decc7e7e 100644 --- a/python/skytools/parsing.py +++ b/python/skytools/parsing.py @@ -248,7 +248,7 @@ def parse_tabbed_table(txt): _extstr = r""" ['] (?: [^'\\]+ | \\. | [']['] )* ['] """ _stdstr = r""" ['] (?: [^']+ | [']['] )* ['] """ -_name = r""" (?: [a-z][a-z0-9_$]* | " (?: [^"]+ | "" )* " ) """ +_name = r""" (?: [a-z_][a-z0-9_$]* | " (?: [^"]+ | "" )* " ) """ _ident = r""" (?P<ident> %s ) """ % _name _fqident = r""" (?P<ident> %s (?: \. %s )* ) """ % (_name, _name) diff --git a/python/skytools/psycopgwrapper.py b/python/skytools/psycopgwrapper.py index 8c6d90b6..7cca9e23 100644 --- a/python/skytools/psycopgwrapper.py +++ b/python/skytools/psycopgwrapper.py @@ -107,8 +107,14 @@ class _CompatCursor(psycopg2.extras.DictCursor): class _CompatConnection(psycopg2.extensions.connection): """Connection object that uses _CompatCursor.""" my_name = '?' - def cursor(self): - return psycopg2.extensions.connection.cursor(self, cursor_factory = _CompatCursor) + def cursor(self, name = None): + if name: + return psycopg2.extensions.connection.cursor(self, + cursor_factory = _CompatCursor, + name = name) + else: + return psycopg2.extensions.connection.cursor(self, + cursor_factory = _CompatCursor) def connect_database(connstr, keepalive = True, tcp_keepidle = 4 * 60, # 7200 diff --git a/python/skytools/scripting.py b/python/skytools/scripting.py index 0faa9ef2..5efae8d6 100644 --- a/python/skytools/scripting.py +++ b/python/skytools/scripting.py @@ -17,48 +17,10 @@ except ImportError: __pychecker__ = 'no-badexcept' -__all__ = ['BaseScript', 'signal_pidfile', 'UsageError', 'daemonize', - 'DBScript'] +__all__ = ['BaseScript', 'UsageError', 'daemonize', 'DBScript'] class UsageError(Exception): """User induced error.""" - pass - -# -# utils -# - -def signal_pidfile(pidfile, sig): - """Send a signal to process whose ID is located in pidfile. - - Read only first line of pidfile to support multiline - pidfiles like postmaster.pid. - - Returns True is successful, False if pidfile does not exist - or process itself is dead. Any other errors will passed - as exceptions.""" - - ln = '' - try: - f = open(pidfile, 'r') - ln = f.readline().strip() - f.close() - pid = int(ln) - os.kill(pid, sig) - return True - except IOError, ex: - if ex.errno != errno.ENOENT: - raise - except OSError, ex: - if ex.errno != errno.ESRCH: - raise - except ValueError, ex: - # this leaves slight race when someone is just creating the file, - # but more common case is old empty file. - if not ln: - return False - raise ValueError('Corrupt pidfile: %s' % pidfile) - return False # # daemon mode @@ -95,7 +57,7 @@ def run_single_process(runnable, daemon, pidfile): # check if another process is running if pidfile and os.path.isfile(pidfile): - if signal_pidfile(pidfile, 0): + if skytools.signal_pidfile(pidfile, 0): print("Pidfile exists, another process running?") sys.exit(1) else: @@ -110,10 +72,9 @@ def run_single_process(runnable, daemon, pidfile): try: if pidfile: - f = open(pidfile, 'w') + data = str(os.getpid()) + skytools.write_atomic(pidfile, data) own_pidfile = True - f.write(str(os.getpid())) - f.close() runnable.run() finally: @@ -281,9 +242,9 @@ class BaseScript(object): """Script setup. User class should override work() and optionally __init__(), startup(), - reload(), reset() and init_optparse(). + reload(), reset(), shutdown() and init_optparse(). - NB: in case of daemon, the __init__() and startup()/work() will be + NB: In case of daemon, __init__() and startup()/work()/shutdown() will be run in different processes. So nothing fancy should be done in __init__(). @param service_name: unique name for script. @@ -337,7 +298,10 @@ class BaseScript(object): self.send_signal(signal.SIGHUP) def print_version(self): - print '%s, Skytools version %s' % (self.service_name, skytools.__version__) + service = self.service_name + if getattr(self, '__version__', None): + service += ' version %s' % self.__version__ + print '%s, Skytools version %s' % (service, skytools.__version__) def print_ini(self): """Prints out ini file from doc string of the script of default for dbscript @@ -436,7 +400,7 @@ class BaseScript(object): if not self.pidfile: self.log.warning("No pidfile in config, nothing to do") elif os.path.isfile(self.pidfile): - alive = signal_pidfile(self.pidfile, sig) + alive = skytools.signal_pidfile(self.pidfile, sig) if not alive: self.log.warning("pidfile exists, but process not running") else: @@ -472,6 +436,7 @@ class BaseScript(object): self.cf = self.load_config() else: self.cf.reload() + self.log.info ("Config reloaded") self.job_name = self.cf.get("job_name") self.pidfile = self.cf.getfile("pidfile", '') self.loop_delay = self.cf.getfloat("loop_delay", 1.0) @@ -549,6 +514,9 @@ class BaseScript(object): else: break + # run shutdown, safely? + self.shutdown() + def run_once(self): state = self.run_func_safely(self.work, True) @@ -598,7 +566,11 @@ class BaseScript(object): def sleep(self, secs): """Make script sleep for some amount of time.""" - time.sleep(secs) + try: + time.sleep(secs) + except IOError, ex: + if ex.errno != errno.EINTR: + raise def exception_hook(self, det, emsg): """Called on after exception processing. @@ -627,8 +599,18 @@ class BaseScript(object): """ # set signals - signal.signal(signal.SIGHUP, self.hook_sighup) - signal.signal(signal.SIGINT, self.hook_sigint) + if hasattr(signal, 'SIGHUP'): + signal.signal(signal.SIGHUP, self.hook_sighup) + if hasattr(signal, 'SIGINT'): + signal.signal(signal.SIGINT, self.hook_sigint) + + def shutdown(self): + """Will be called just after exiting main loop. + + In case of daemon, if will be called in same process as work(), + unlike __init__(). + """ + pass # define some aliases (short-cuts / backward compatibility cruft) stat_add = stat_put # Old, deprecated function. @@ -801,9 +783,14 @@ class DBScript(BaseScript): except select.error, d: self.log.info('wait canceled') - def _exec_cmd(self, curs, sql, args, quiet = False): + def _exec_cmd(self, curs, sql, args, quiet = False, prefix = None): """Internal tool: Run SQL on cursor.""" - self.log.debug("exec_cmd: %s" % skytools.quote_statement(sql, args)) + if self.options.verbose: + self.log.debug("exec_cmd: %s" % skytools.quote_statement(sql, args)) + + _pfx = "" + if prefix: + _pfx = "[%s] " % prefix curs.execute(sql, args) ok = True rows = curs.fetchall() @@ -818,32 +805,32 @@ class DBScript(BaseScript): sys.exit(1) level = code / 100 if level == 1: - self.log.debug("%d %s" % (code, msg)) + self.log.debug("%s%d %s" % (_pfx, code, msg)) elif level == 2: if quiet: - self.log.debug("%d %s" % (code, msg)) + self.log.debug("%s%d %s" % (_pfx, code, msg)) else: - self.log.info("%s" % (msg,)) + self.log.info("%s%s" % (_pfx, msg,)) elif level == 3: - self.log.warning("%s" % (msg,)) + self.log.warning("%s%s" % (_pfx, msg,)) else: - self.log.error("%s" % (msg,)) + self.log.error("%s%s" % (_pfx, msg,)) self.log.debug("Query was: %s" % skytools.quote_statement(sql, args)) ok = False return (ok, rows) - def _exec_cmd_many(self, curs, sql, baseargs, extra_list, quiet = False): + def _exec_cmd_many(self, curs, sql, baseargs, extra_list, quiet = False, prefix=None): """Internal tool: Run SQL on cursor multiple times.""" ok = True rows = [] for a in extra_list: - (tmp_ok, tmp_rows) = self._exec_cmd(curs, sql, baseargs + [a], quiet=quiet) + (tmp_ok, tmp_rows) = self._exec_cmd(curs, sql, baseargs + [a], quiet, prefix) if not tmp_ok: ok = False rows += tmp_rows return (ok, rows) - def exec_cmd(self, db_or_curs, q, args, commit = True, quiet = False): + def exec_cmd(self, db_or_curs, q, args, commit = True, quiet = False, prefix = None): """Run SQL on db with code/value error handling.""" if hasattr(db_or_curs, 'cursor'): db = db_or_curs @@ -851,7 +838,7 @@ class DBScript(BaseScript): else: db = None curs = db_or_curs - (ok, rows) = self._exec_cmd(curs, q, args, quiet = quiet) + (ok, rows) = self._exec_cmd(curs, q, args, quiet, prefix) if ok: if commit and db: db.commit() @@ -864,7 +851,8 @@ class DBScript(BaseScript): # error is already logged sys.exit(1) - def exec_cmd_many(self, db_or_curs, sql, baseargs, extra_list, commit = True, quiet = False): + def exec_cmd_many(self, db_or_curs, sql, baseargs, extra_list, + commit = True, quiet = False, prefix = None): """Run SQL on db multiple times.""" if hasattr(db_or_curs, 'cursor'): db = db_or_curs @@ -872,7 +860,7 @@ class DBScript(BaseScript): else: db = None curs = db_or_curs - (ok, rows) = self._exec_cmd_many(curs, sql, baseargs, extra_list, quiet=quiet) + (ok, rows) = self._exec_cmd_many(curs, sql, baseargs, extra_list, quiet, prefix) if ok: if commit and db: db.commit() diff --git a/python/skytools/sqltools.py b/python/skytools/sqltools.py index f8014eb4..38722657 100644 --- a/python/skytools/sqltools.py +++ b/python/skytools/sqltools.py @@ -13,7 +13,7 @@ except ImportError: __all__ = [ "fq_name_parts", "fq_name", "get_table_oid", "get_table_pkeys", "get_table_columns", "exists_schema", "exists_table", "exists_type", - "exists_sequence", "exists_temp_table", + "exists_sequence", "exists_temp_table", "exists_view", "exists_function", "exists_language", "Snapshot", "magic_insert", "CopyPipe", "full_copy", "DBObject", "DBSchema", "DBTable", "DBFunction", "DBLanguage", "db_install", "installer_find_file", "installer_apply_file", @@ -21,8 +21,7 @@ __all__ = [ ] class dbdict(dict): - """Wrapper on actual dict that allows - accessing dict keys as attributes.""" + """Wrapper on actual dict that allows accessing dict keys as attributes.""" # obj.foo access def __getattr__(self, k): "Return attribute." @@ -34,8 +33,12 @@ class dbdict(dict): "Set attribute." self[k] = v def __delattr__(self, k): - "Remove attribute" + "Remove attribute." del self[k] + def merge(self, other): + for key in other: + if key not in self: + self[key] = other[key] # # Fully qualified table name @@ -137,6 +140,16 @@ def exists_sequence(curs, seq_name): res = curs.fetchone() return res[0] +def exists_view(curs, view_name): + """Does view exists?""" + schema, name = fq_name_parts(view_name) + q = """select count(1) from pg_namespace n, pg_class c + where c.relnamespace = n.oid and c.relkind = 'v' + and n.nspname = %s and c.relname = %s""" + curs.execute(q, [schema, name]) + res = curs.fetchone() + return res[0] + def exists_type(curs, type_name): """Does type exists?""" schema, name = fq_name_parts(type_name) @@ -627,4 +640,3 @@ def mk_delete_sql(row, tbl, pkey_list, field_map = None): if __name__ == '__main__': import doctest doctest.testmod() - diff --git a/python/walmgr.py b/python/walmgr.py index e5374705..604ba691 100755 --- a/python/walmgr.py +++ b/python/walmgr.py @@ -10,6 +10,7 @@ Master commands: syncdaemon Daemon mode for regular syncing stop Stop archiving - de-configure PostgreSQL periodic Run periodic command if configured. + synch-standby Manage synchronous streaming replication. Slave commands: boot Stop playback, accept queries @@ -183,6 +184,74 @@ class BackupLabel: if m: self.label_string = m.group(1) +class Pgpass: + """Manipulate pgpass contents""" + + def __init__(self, passfile): + """Load .pgpass contents""" + self.passfile = os.path.expanduser(passfile) + self.contents = [] + + if os.path.isfile(self.passfile): + self.contents = open(self.passfile).readlines() + + def split_pgpass_line(selg, pgline): + """Parses pgpass line, returns dict""" + try: + (host, port, db, user, pwd) = pgline.rstrip('\n\r').split(":") + return {'host': host, 'port': port, 'db': db, 'user': user, 'pwd': pwd} + except ValueError: + return None + + def ensure_user(self, host, port, user, pwd): + """Ensure that line for streaming replication exists in .pgpass""" + self.remove_user(host, port, user) + self.contents.insert(0, '%s:%s:%s:%s:%s\n' % (host, port, 'replication', user, pwd)) + + def remove_user(self, host, port, user): + """Remove all matching lines from .pgpass""" + + new_contents = [] + found = False + for l in self.contents: + p = self.split_pgpass_line(l) + if p and p['host'] == host and p['port'] == port and p['user'] == user and p['db'] == 'replication': + found = True + continue + + new_contents.append(l) + + self.contents = new_contents + return found + + def write(self): + """Write contents back to file""" + f = open(self.passfile,'w') + os.chmod(self.passfile, 0600) + f.writelines(self.contents) + f.close() + + def pgpass_fields_from_conninfo(self,conninfo): + """Extract host,user and port from primary-conninfo""" + m = re.match("^.*\s*host=\s*([^\s]+)\s*.*$", conninfo) + if m: + host = m.group(1) + else: + host = 'localhost' + m = re.match("^.*\s*user=\s*([^\s]+)\s*.*$", conninfo) + if m: + user = m.group(1) + else: + user = os.environ['USER'] + m = re.match("^.*\s*port=\s*([^\s]+)\s*.*$", conninfo) + if m: + port = m.group(1) + else: + port = '5432' + + return host,port,user + + class PostgresConfiguration: """Postgres configuration manipulation""" @@ -209,7 +278,7 @@ class PostgresConfiguration: def synchronous_standby_names(self): """Return value for specified parameter""" # see if explicitly set - m = re.search("^\s*synchronous_standby_names\s*=\s*'?([a-zA-Z01]+)'?\s*#?.*$", self.cf_buf, re.M | re.I) + m = re.search("^\s*synchronous_standby_names\s*=\s*'([^']*)'\s*#?.*$", self.cf_buf, re.M | re.I) if m: return m.group(1) # also, it could be commented out as initdb leaves it @@ -273,6 +342,20 @@ class PostgresConfiguration: # polite method does not work, as usually not enough perms for it open(self.cf_file, "w").write(self.cf_buf) + def set_synchronous_standby_names(self,param_value): + """Helper function to change synchronous_standby_names and signal postmaster""" + + self.log.info("Changing synchronous_standby_names from '%s' to '%s'" % (self.synchronous_standby_names(),param_value)) + cf_params = dict() + cf_params['synchronous_standby_names'] = param_value + self.modify(cf_params) + self.write() + + data_dir=self.walmgr.cf.getfile("master_data") + self.log.info("Sending SIGHUP to postmaster") + self.walmgr.signal_postmaster(data_dir, signal.SIGHUP) + + class WalMgr(skytools.DBScript): def init_optparse(self, parser=None): @@ -294,6 +377,10 @@ class WalMgr(skytools.DBScript): help = "slave: add public key to authorized_hosts", default=False) p.add_option("", "--ssh-remove-key", action="store", dest="ssh_remove_key", help = "slave: remove master key from authorized_hosts", default=False) + p.add_option("", "--add-password", action="store", dest="add_password", + help = "slave: add password from file to .pgpass. Additional fields will be extracted from primary-conninfo", default=False) + p.add_option("", "--remove-password", action="store_true", dest="remove_password", + help = "slave: remove previously added line from .pgpass", default=False) p.add_option("", "--primary-conninfo", action="store", dest="primary_conninfo", default=None, help = "slave: connect string for streaming replication master") p.add_option("", "--init-slave", action="store_true", dest="init_slave", @@ -402,27 +489,28 @@ class WalMgr(skytools.DBScript): self.pidfile = None cmdtab = { - 'init_master': self.walmgr_init_master, - 'init_slave': self.walmgr_init_slave, - 'setup': self.walmgr_setup, - 'stop': self.master_stop, - 'backup': self.run_backup, - 'listbackups': self.list_backups, - 'restore': self.restore_database, - 'periodic': self.master_periodic, - 'sync': self.master_sync, - 'syncdaemon': self.master_syncdaemon, - 'pause': self.slave_pause, - 'continue': self.slave_continue, - 'boot': self.slave_boot, - 'cleanup': self.walmgr_cleanup, - 'xlock': self.slave_lock_backups_exit, - 'xrelease': self.slave_resume_backups, - 'xrotate': self.slave_rotate_backups, - 'xpurgewals': self.slave_purge_wals, - 'xarchive': self.master_xarchive, - 'xrestore': self.xrestore, - 'xpartialsync': self.slave_append_partial, + 'init_master': self.walmgr_init_master, + 'init_slave': self.walmgr_init_slave, + 'setup': self.walmgr_setup, + 'stop': self.master_stop, + 'backup': self.run_backup, + 'listbackups': self.list_backups, + 'restore': self.restore_database, + 'periodic': self.master_periodic, + 'sync': self.master_sync, + 'syncdaemon': self.master_syncdaemon, + 'pause': self.slave_pause, + 'continue': self.slave_continue, + 'boot': self.slave_boot, + 'cleanup': self.walmgr_cleanup, + 'synch-standby': self.master_synch_standby, + 'xlock': self.slave_lock_backups_exit, + 'xrelease': self.slave_resume_backups, + 'xrotate': self.slave_rotate_backups, + 'xpurgewals': self.slave_purge_wals, + 'xarchive': self.master_xarchive, + 'xrestore': self.xrestore, + 'xpartialsync': self.slave_append_partial, } if not cmdtab.has_key(self.cmd): @@ -670,11 +758,63 @@ class WalMgr(skytools.DBScript): else: self.log.debug("authorized_keys:\n%s" % keys) + # remove password from .pgpass + primary_conninfo = self.cf.get("primary_conninfo", "") + if self.options.remove_password and primary_conninfo and not self.not_really: + pg = Pgpass('~/.pgpass') + host, port, user = pg.pgpass_fields_from_conninfo(primary_conninfo) + if pg.remove_user(host, port, user): + self.log.info("Removing line from .pgpass") + pg.write() + # get rid of the configuration file, both master and slave self.log.info("Removing config file: %s" % self.cfgfile) if not self.not_really: os.remove(self.cfgfile) + def master_synch_standby(self): + """Manage synchronous_standby_names parameter""" + + if len(self.args) < 1: + die(1, "usage: synch-standby SYNCHRONOUS_STANDBY_NAMES") + + names = self.args[0] + cf = PostgresConfiguration(self, self.cf.getfile("master_config")) + + self.assert_is_master(True) + + # list of slaves + db = self.get_database("master_db") + cur = db.cursor() + cur.execute("select application_name from pg_stat_replication") + slave_names = [slave[0] for slave in cur.fetchall()] + self.close_database("master_db") + + if names.strip() == "": + cf.set_synchronous_standby_names("") + return + + if names.strip() == "*": + if slave_names: + cf.set_synchronous_standby_names(names) + return + else: + die(1,"At least one slave must be available when enabling synchronous mode") + + # ensure that at least one slave is available from new parameter value + slave_found = None + for new_synch_slave in re.findall(r"[^\s,]+",names): + if new_synch_slave not in slave_names: + self.log.warning("No slave available with name %s" % new_synch_slave) + else: + slave_found = True + break + + if not slave_found: + die(1,"At least one slave must be available from new list when enabling synchronous mode") + else: + cf.set_synchronous_standby_names(names) + def master_configure_archiving(self, enable_archiving, can_restart): """Turn the archiving on or off""" @@ -1055,6 +1195,21 @@ primary_conninfo = %(primary_conninfo)s af.write(master_pubkey) af.close() + if self.options.add_password and self.options.primary_conninfo: + # add password to pgpass + + self.log.debug("Reading password from file %s" % self.options.add_password) + pwd = open(self.options.add_password).readline().rstrip('\n\r') + + pg = Pgpass('~/.pgpass') + host, port, user = pg.pgpass_fields_from_conninfo(self.options.primary_conninfo) + pg.ensure_user(host, port, user, pwd) + pg.write() + + self.log.info("Added password from %s to .pgpass" % self.options.add_password) + + + def walmgr_setup(self): if self.is_master: self.log.info("Configuring WAL archiving") @@ -1265,7 +1420,7 @@ primary_conninfo = %(primary_conninfo)s stop_time = time.localtime() # Obtain the last restart point information - ctl = PgControlData(self.cf.getfile("slave_bin", ""), dst, False) + ctl = PgControlData(self.cf.getfile("slave_bin", ""), dst, True) # TODO: The newly created backup directory probably still contains # backup_label.old and recovery.conf files. Remove these. diff --git a/scripts/find_sql_functions.py b/scripts/find_sql_functions.py new file mode 100755 index 00000000..55718cd8 --- /dev/null +++ b/scripts/find_sql_functions.py @@ -0,0 +1,103 @@ +#! /usr/bin/env python + +"""Find and print out function signatures from .sql file. + +Usage: + find_sql_functions.py [-h] [-s] [-p PREFIX] FILE ... + +Switches: + -h Show help + -p PREFIX Prefix each line with string + -s Check whether function is SECURITY DEFINER +""" + +import sys, re, getopt + +rx = r""" +^ +create \s+ (?: or \s+ replace \s+ )? +function ( [^(]+ ) +[(] ( [^)]* ) [)] +""" + +rx_secdef = r"""security\s+definer""" + + +rc = re.compile(rx, re.I | re.M | re.X) +sc = re.compile(r"\s+") +rc_sec = re.compile(rx_secdef, re.I | re.X) + +def grep_file(fn, cf_prefix, cf_secdef): + sql = open(fn).read() + pos = 0 + while 1: + m = rc.search(sql, pos) + if not m: + break + pos = m.end() + + m2 = rc.search(sql, pos) + if m2: + xpos = m2.end() + else: + xpos = len(sql) + secdef = False + m2 = rc_sec.search(sql, pos, xpos) + if m2: + secdef = True + + fname = m.group(1).strip() + fargs = m.group(2) + + alist = fargs.split(',') + tlist = [] + for a in alist: + a = a.strip() + toks = sc.split(a.lower()) + if toks[0] == "out": + continue + if toks[0] in ("in", "inout"): + toks = toks[1:] + # just take last item + tlist.append(toks[-1]) + + sig = "%s(%s)" % (fname, ", ".join(tlist)) + + if cf_prefix: + ln = "%s %s;" % (cf_prefix, sig) + else: + ln = " %s(%s)," % (fname, ", ".join(tlist)) + + if cf_secdef and secdef: + ln = "%-72s -- SECDEF" % (ln) + + print ln + +def main(argv): + cf_secdef = 0 + cf_prefix = '' + + try: + opts, args = getopt.getopt(argv, "hsp:") + except getopt.error, d: + print 'getopt:', d + sys.exit(1) + + for o, a in opts: + if o == '-h': + print __doc__ + sys.exit(0) + elif o == '-s': + cf_secdef = 1 + elif o == '-p': + cf_prefix = a + else: + print __doc__ + sys.exit(1) + + for fn in args: + grep_file(fn, cf_prefix, cf_secdef) + +if __name__ == '__main__': + main(sys.argv[1:]) + diff --git a/scripts/grantfu.py b/scripts/grantfu.py new file mode 100755 index 00000000..5d89d559 --- /dev/null +++ b/scripts/grantfu.py @@ -0,0 +1,331 @@ +#! /usr/bin/env python + +# GrantFu - GRANT/REVOKE generator for Postgres +# +# Copyright (c) 2005 Marko Kreen +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""Generator for PostgreSQL permissions. + +Loads config where roles, objects and their mapping is described +and generates grants based on them. + +ConfigParser docs: http://docs.python.org/lib/module-ConfigParser.html + +Example: +-------------------------------------------------------------------- +[DEFAULT] +users = user1, user2 # users to handle +groups = group1, group2 # groups to handle +auto_seq = 0 # dont handle seqs (default) + # '!' after a table negates this setting for a table +seq_name = id # the name for serial field (default: id) +seq_usage = 0 # should we grant "usage" or "select, update" + # for automatically handled sequences + +# section names can be random, but if you want to see them +# in same order as in config file, then order them alphabetically +[1.section] +on.tables = testtbl, testtbl_id_seq, # here we handle seq by hand + table_with_seq! # handle seq automatically + # (table_with_seq_id_seq) +user1 = select +group1 = select, insert, update + +# instead of 'tables', you may use 'functions', 'languages', +# 'schemas', 'tablespaces' +--------------------------------------------------------------------- +""" + +import sys, os, getopt +from ConfigParser import SafeConfigParser + +__version__ = "1.0" + +R_NEW = 0x01 +R_DEFS = 0x02 +G_DEFS = 0x04 +R_ONLY = 0x80 + +def usage(err): + sys.stderr.write("usage: %s [-r|-R] CONF_FILE\n" % sys.argv[0]) + sys.stderr.write(" -r Generate also REVOKE commands\n") + sys.stderr.write(" -R Generate only REVOKE commands\n") + sys.stderr.write(" -d Also REVOKE default perms\n") + sys.stderr.write(" -D Only REVOKE default perms\n") + sys.stderr.write(" -o Generate default GRANTS\n") + sys.stderr.write(" -v Print program version\n") + sys.stderr.write(" -t Put everything in one big transaction\n") + sys.exit(err) + +class PConf(SafeConfigParser): + "List support for ConfigParser" + def __init__(self, defaults = None): + SafeConfigParser.__init__(self, defaults) + + def get_list(self, sect, key): + str = self.get(sect, key).strip() + res = [] + if not str: + return res + for val in str.split(","): + res.append(val.strip()) + return res + +class GrantFu: + def __init__(self, cf_file, revoke): + self.revoke = revoke + + # load config + self.cf = PConf() + self.cf.read(cf_file) + if not self.cf.has_section("GrantFu"): + print "Incorrect config file, GrantFu sction missing" + sys.exit(1) + + # avoid putting grantfu vars into defaults, thus into every section + self.group_list = [] + self.user_list = [] + self.auto_seq = 0 + self.seq_name = "id" + self.seq_usage = 0 + if self.cf.has_option('GrantFu', 'groups'): + self.group_list = self.cf.get_list('GrantFu', 'groups') + if self.cf.has_option('GrantFu', 'users'): + self.user_list += self.cf.get_list('GrantFu', 'users') + if self.cf.has_option('GrantFu', 'roles'): + self.user_list += self.cf.get_list('GrantFu', 'roles') + if self.cf.has_option('GrantFu', 'auto_seq'): + self.auto_seq = self.cf.getint('GrantFu', 'auto_seq') + if self.cf.has_option('GrantFu', 'seq_name'): + self.seq_name = self.cf.get('GrantFu', 'seq_name') + if self.cf.has_option('GrantFu', 'seq_usage'): + self.seq_usage = self.cf.getint('GrantFu', 'seq_usage') + + # make string of all subjects + tmp = [] + for g in self.group_list: + tmp.append("group " + g) + for u in self.user_list: + tmp.append(u) + self.all_subjs = ", ".join(tmp) + + # per-section vars + self.sect = None + self.seq_list = [] + self.seq_allowed = [] + + def process(self): + if len(self.user_list) == 0 and len(self.group_list) == 0: + return + + sect_list = self.cf.sections() + sect_list.sort() + for self.sect in sect_list: + if self.sect == "GrantFu": + continue + print "\n-- %s --" % self.sect + + self.handle_tables() + self.handle_other('on.databases', 'DATABASE') + self.handle_other('on.functions', 'FUNCTION') + self.handle_other('on.languages', 'LANGUAGE') + self.handle_other('on.schemas', 'SCHEMA') + self.handle_other('on.tablespaces', 'TABLESPACE') + self.handle_other('on.sequences', 'SEQUENCE') + self.handle_other('on.types', 'TYPE') + self.handle_other('on.domains', 'DOMAIN') + + def handle_other(self, listname, obj_type): + """Handle grants for all objects except tables.""" + + if not self.sect_hasvar(listname): + return + + # don't parse list, as in case of functions it may be complicated + obj_str = obj_type + " " + self.sect_var(listname) + + if self.revoke & R_NEW: + self.gen_revoke(obj_str) + + if self.revoke & R_DEFS: + self.gen_revoke_defs(obj_str, obj_type) + + if not self.revoke & R_ONLY: + self.gen_one_type(obj_str) + + if self.revoke & G_DEFS: + self.gen_defs(obj_str, obj_type) + + def handle_tables(self): + """Handle grants for tables and sequences. + + The tricky part here is the automatic handling of sequences.""" + + if not self.sect_hasvar('on.tables'): + return + + cleaned_list = [] + table_list = self.sect_list('on.tables') + for table in table_list: + if table[-1] == '!': + table = table[:-1] + if not self.auto_seq: + self.seq_list.append("%s_%s_seq" % (table, self.seq_name)) + else: + if self.auto_seq: + self.seq_list.append("%s_%s_seq" % (table, self.seq_name)) + cleaned_list.append(table) + obj_str = "TABLE " + ", ".join(cleaned_list) + + if self.revoke & R_NEW: + self.gen_revoke(obj_str) + if self.revoke & R_DEFS: + self.gen_revoke_defs(obj_str, "TABLE") + if not self.revoke & R_ONLY: + self.gen_one_type(obj_str) + if self.revoke & G_DEFS: + self.gen_defs(obj_str, "TABLE") + + # cleanup + self.seq_list = [] + self.seq_allowed = [] + + def gen_revoke(self, obj_str): + "Generate revoke for one section / subject type (user or group)" + + if len(self.seq_list) > 0: + obj_str += ", " + ", ".join(self.seq_list) + obj_str = obj_str.strip().replace('\n', '\n ') + print "REVOKE ALL ON %s\n FROM %s CASCADE;" % (obj_str, self.all_subjs) + + def gen_revoke_defs(self, obj_str, obj_type): + "Generate revoke defaults for one section" + + # process only things that have default grants to public + if obj_type not in ('FUNCTION', 'DATABASE', 'LANGUAGE', 'TYPE', 'DOMAIN'): + return + + defrole = 'public' + + # if the sections contains grants to 'public', dont drop + if self.sect_hasvar(defrole): + return + + obj_str = obj_str.strip().replace('\n', '\n ') + print "REVOKE ALL ON %s\n FROM %s CASCADE;" % (obj_str, defrole) + + def gen_defs(self, obj_str, obj_type): + "Generate defaults grants for one section" + + if obj_type == "FUNCTION": + defgrants = "execute" + elif obj_type == "DATABASE": + defgrants = "connect, temp" + elif obj_type in ("LANGUAGE", "TYPE", "DOMAIN"): + defgrants = "usage" + else: + return + + defrole = 'public' + + obj_str = obj_str.strip().replace('\n', '\n ') + print "GRANT %s ON %s\n TO %s;" % (defgrants, obj_str, defrole) + + def gen_one_subj(self, subj, fqsubj, obj_str): + if not self.sect_hasvar(subj): + return + obj_str = obj_str.strip().replace('\n', '\n ') + perm = self.sect_var(subj).strip() + if perm: + print "GRANT %s ON %s\n TO %s;" % (perm, obj_str, fqsubj) + + # check for seq perms + if len(self.seq_list) > 0: + loperm = perm.lower() + if loperm.find("insert") >= 0 or loperm.find("all") >= 0: + self.seq_allowed.append(fqsubj) + + def gen_one_type(self, obj_str): + "Generate GRANT for one section / one object type in section" + + for u in self.user_list: + self.gen_one_subj(u, u, obj_str) + for g in self.group_list: + self.gen_one_subj(g, "group " + g, obj_str) + + # if there was any seq perms, generate grants + if len(self.seq_allowed) > 0: + seq_str = ", ".join(self.seq_list) + subj_str = ", ".join(self.seq_allowed) + if self.seq_usage: + cmd = "GRANT usage ON SEQUENCE %s\n TO %s;" + else: + cmd = "GRANT select, update ON %s\n TO %s;" + print cmd % (seq_str, subj_str) + + def sect_var(self, name): + return self.cf.get(self.sect, name).strip() + + def sect_list(self, name): + return self.cf.get_list(self.sect, name) + + def sect_hasvar(self, name): + return self.cf.has_option(self.sect, name) + +def main(): + revoke = 0 + tx = False + + try: + opts, args = getopt.getopt(sys.argv[1:], "vhrRdDot") + except getopt.error, det: + print "getopt error:", det + usage(1) + + for o, v in opts: + if o == "-h": + usage(0) + elif o == "-r": + revoke |= R_NEW + elif o == "-R": + revoke |= R_NEW | R_ONLY + elif o == "-d": + revoke |= R_DEFS + elif o == "-D": + revoke |= R_DEFS | R_ONLY + elif o == "-o": + revoke |= G_DEFS + elif o == "-t": + tx = True + elif o == "-v": + print "GrantFu version", __version__ + sys.exit(0) + + if len(args) != 1: + usage(1) + + if tx: + print "begin;\n" + + g = GrantFu(args[0], revoke) + g.process() + + if tx: + print "\ncommit;\n" + +if __name__ == '__main__': + main() + diff --git a/scripts/scriptmgr.py b/scripts/scriptmgr.py index 26317036..5cba34c0 100755 --- a/scripts/scriptmgr.py +++ b/scripts/scriptmgr.py @@ -53,12 +53,12 @@ import skytools command_usage = """ %prog [options] INI CMD [subcmd args] -commands: - start [-a | jobname ..] start a job - stop [-a | jobname ..] stop a job - restart [-a | jobname ..] restart job(s) - reload [-a | jobname ..] send reload signal - status +Commands: + start -a | -t=service | jobname [...] start job(s) + stop -a | -t=service | jobname [...] stop job(s) + restart -a | -t=service | jobname [...] restart job(s) + reload -a | -t=service | jobname [...] send reload signal + status [-a | -t=service | jobname ...] """ def job_sort_cmp(j1, j2): @@ -78,6 +78,7 @@ class ScriptMgr(skytools.DBScript): def init_optparse(self, p = None): p = skytools.DBScript.init_optparse(self, p) p.add_option("-a", "--all", action="store_true", help="apply command to all jobs") + p.add_option("-t", "--type", action="store", metavar="SVC", help="apply command to all jobs of this service type") p.add_option("-w", "--wait", action="store_true", help="wait for job(s) after signaling") p.set_usage(command_usage.strip()) return p @@ -100,7 +101,7 @@ class ScriptMgr(skytools.DBScript): 'service': svc_name, 'script': cf.getfile('script', defscript), 'cwd': cf.getfile('cwd'), - 'disabled': cf.getboolean('disabled', 0), + 'disabled': disabled, 'args': cf.get('args', ''), } self.svc_list.append(svc) @@ -149,11 +150,16 @@ class ScriptMgr(skytools.DBScript): self.job_list.append(job) self.job_map[job['job_name']] = job - def cmd_status(self): - for job in self.job_list: + def cmd_status (self, jobs): + for jn in jobs: + try: + job = self.job_map[jn] + except KeyError: + self.log.error ("Unknown job: %s", jn) + continue os.chdir(job['cwd']) cf = skytools.Config(job['service'], job['config']) - pidfile = cf.getfile('pidfile', '') + pidfile = job['pidfile'] name = job['job_name'] svc = job['service'] if job['disabled']: @@ -166,8 +172,13 @@ class ScriptMgr(skytools.DBScript): else: print(" STOPPED [%s] %s" % (svc, name)) - def cmd_info(self): - for job in self.job_list: + def cmd_info (self, jobs): + for jn in jobs: + try: + job = self.job_map[jn] + except KeyError: + self.log.error ("Unknown job: %s", jn) + continue print(job) def cmd_start(self, job_name): @@ -253,24 +264,31 @@ class ScriptMgr(skytools.DBScript): self.job_list = [] self.job_map = {} self.load_jobs() + self.job_list.sort(job_sort_cmp) if len(self.args) < 2: print("need command") sys.exit(1) + cmd = self.args[1] jobs = self.args[2:] + + if cmd in ["status", "info"] and len(jobs) == 0 and not self.options.type: + self.options.all = True + if len(jobs) == 0 and self.options.all: for job in self.job_list: jobs.append(job['job_name']) + if len(jobs) == 0 and self.options.type: + for job in self.job_list: + if job['service'] == self.options.type: + jobs.append(job['job_name']) - self.job_list.sort(job_sort_cmp) - - cmd = self.args[1] if cmd == "status": - self.cmd_status() + self.cmd_status(jobs) return elif cmd == "info": - self.cmd_info() + self.cmd_info(jobs) return if len(jobs) == 0: diff --git a/scripts/simple_consumer.py b/scripts/simple_consumer.py new file mode 100755 index 00000000..df0db11c --- /dev/null +++ b/scripts/simple_consumer.py @@ -0,0 +1,69 @@ +#!/usr/bin/python + +"""Consumer that simply calls SQL query for each event. + +Config:: + # source database + src_db = + + # destination database + dst_db = + + # query to call + dst_query = select * from somefunc(%%(pgq.ev_data)s); + + ## Deprecated, use table_filter ## + # filter for events (SQL fragment) + consumer_filter = ev_extra1 = 'public.mytable1' +""" + + +import sys + +import pkgloader +pkgloader.require('skytools', '3.0') + +import pgq +import skytools + +class SimpleConsumer(pgq.Consumer): + __doc__ = __doc__ + + def reload(self): + super(SimpleConsumer, self).reload() + self.dst_query = self.cf.get("dst_query") + if self.cf.get("consumer_filter", ""): + self.consumer_filter = self.cf.get("consumer_filter", "") + + def process_event(self, db, ev): + curs = self.get_database('dst_db', autocommit = 1).cursor() + + if ev.ev_type[:2] not in ('I:', 'U:', 'D:'): + return + + if ev.ev_data is None: + payload = {} + else: + payload = skytools.db_urldecode(ev.ev_data) + payload['pgq.tick_id'] = self.batch_info['cur_tick_id'] + payload['pgq.ev_id'] = ev.ev_id + payload['pgq.ev_time'] = ev.ev_time + payload['pgq.ev_type'] = ev.ev_type + payload['pgq.ev_data'] = ev.ev_data + payload['pgq.ev_extra1'] = ev.ev_extra1 + payload['pgq.ev_extra2'] = ev.ev_extra2 + payload['pgq.ev_extra3'] = ev.ev_extra3 + payload['pgq.ev_extra4'] = ev.ev_extra4 + + self.log.debug(self.dst_query, payload) + curs.execute(self.dst_query, payload) + if curs.statusmessage[:6] == 'SELECT': + res = curs.fetchall() + self.log.debug(res) + else: + self.log.debug(curs.statusmessage) + +if __name__ == '__main__': + script = SimpleConsumer("simple_consumer3", "src_db", sys.argv[1:]) + script.start() + diff --git a/scripts/simple_local_consumer.py b/scripts/simple_local_consumer.py new file mode 100755 index 00000000..6e3eb601 --- /dev/null +++ b/scripts/simple_local_consumer.py @@ -0,0 +1,66 @@ +#!/usr/bin/python + +"""Consumer that simply calls SQL query for each event. + +It tracks completed batches in local file. + +Config:: + # source database + src_db = + + # destination database + dst_db = + + # query to call + dst_query = select * from somefunc(%%(pgq.ev_data)s); +""" + + +import sys + +import pkgloader +pkgloader.require('skytools', '3.0') + +import pgq +import skytools + +class SimpleLocalConsumer(pgq.LocalConsumer): + __doc__ = __doc__ + + def reload(self): + super(SimpleLocalConsumer, self).reload() + self.dst_query = self.cf.get("dst_query") + + def process_local_event(self, db, batch_id, ev): + curs = self.get_database('dst_db', autocommit = 1).cursor() + + if ev.ev_type[:2] not in ('I:', 'U:', 'D:'): + return + + if ev.ev_data is None: + payload = {} + else: + payload = skytools.db_urldecode(ev.ev_data) + + payload['pgq.tick_id'] = self.batch_info['cur_tick_id'] + payload['pgq.ev_id'] = ev.ev_id + payload['pgq.ev_time'] = ev.ev_time + payload['pgq.ev_type'] = ev.ev_type + payload['pgq.ev_data'] = ev.ev_data + payload['pgq.ev_extra1'] = ev.ev_extra1 + payload['pgq.ev_extra2'] = ev.ev_extra2 + payload['pgq.ev_extra3'] = ev.ev_extra3 + payload['pgq.ev_extra4'] = ev.ev_extra4 + + self.log.debug(self.dst_query, payload) + curs.execute(self.dst_query, payload) + if curs.statusmessage[:6] == 'SELECT': + res = curs.fetchall() + self.log.debug(res) + else: + self.log.debug(curs.statusmessage) + +if __name__ == '__main__': + script = SimpleLocalConsumer("simple_local_consumer3", "src_db", sys.argv[1:]) + script.start() + diff --git a/scripts/skytools_upgrade.py b/scripts/skytools_upgrade.py index 169a1913..f509b93c 100755 --- a/scripts/skytools_upgrade.py +++ b/scripts/skytools_upgrade.py @@ -43,13 +43,14 @@ def check_version(curs, schema, new_ver_str, recheck_func=None): funcname = "%s.version" % schema if not skytools.exists_function(curs, funcname, 0): if recheck_func is not None: - return recheck_func(curs) + return recheck_func(curs), 'NULL' else: - return 0 + return 0, 'NULL' q = "select %s()" % funcname curs.execute(q) old_ver_str = curs.fetchone()[0] - return is_version_ge(old_ver_str, new_ver_str) + ok = is_version_ge(old_ver_str, new_ver_str) + return ok, old_ver_str class DbUpgrade(skytools.DBScript): @@ -69,7 +70,8 @@ class DbUpgrade(skytools.DBScript): continue # new enough? - if check_version(curs, schema, ver, recheck_func): + ok, oldver = check_version(curs, schema, ver, recheck_func) + if ok: continue # too old schema, no way to upgrade @@ -78,8 +80,13 @@ class DbUpgrade(skytools.DBScript): ignore[schema] = 1 continue + if self.options.not_really: + self.log.info ("%s: Would upgrade '%s' version %s to %s", dbname, schema, oldver, ver) + continue + curs = db.cursor() curs.execute('begin') + self.log.info("%s: Upgrading '%s' version %s to %s", dbname, schema, oldver, ver) skytools.installer_apply_file(db, fn, self.log) curs.execute('commit') @@ -87,7 +94,7 @@ class DbUpgrade(skytools.DBScript): """Loop over databases.""" self.set_single_loop(1) - + self.load_cur_versions() # loop over all dbs @@ -149,11 +156,13 @@ class DbUpgrade(skytools.DBScript): return self.get_database('db', connstr = cstr, autocommit = 1) def init_optparse(self, parser=None): - """Setup commend-line flags.""" + """Setup command-line flags.""" p = skytools.DBScript.init_optparse(self, parser) p.set_usage(usage) g = optparse.OptionGroup(p, "options for skytools_upgrade") g.add_option("--all", action="store_true", help = 'upgrade all databases') + g.add_option("--not-really", action = "store_true", dest = "not_really", + default = False, help = "don't actually do anything") g.add_option("--user", help = 'username to use') g.add_option("--host", help = 'hostname to use') g.add_option("--port", help = 'port to use') @@ -168,4 +177,3 @@ class DbUpgrade(skytools.DBScript): if __name__ == '__main__': script = DbUpgrade('skytools_upgrade', sys.argv[1:]) script.start() - diff --git a/setup_skytools.py b/setup_skytools.py index 648b9aac..5440068d 100755 --- a/setup_skytools.py +++ b/setup_skytools.py @@ -37,6 +37,8 @@ sfx_scripts = [ 'scripts/scriptmgr.py', 'scripts/queue_splitter.py', 'scripts/queue_mover.py', + 'scripts/simple_consumer.py', + 'scripts/simple_local_consumer.py', 'scripts/skytools_upgrade.py', ] # those do not need suffix (no conflict with 2.1) @@ -8,7 +8,8 @@ include configure config.guess config.sub install-sh include setup_skytools.py setup_pkgloader.py source.cfg include python/pkgloader.py -recursive-include sql *.sql Makefile *.out *.in *.[ch] README* *.ini *.templ +include sql/common-pgxs.mk +recursive-include sql *.sql Makefile *.out *.in *.[ch] README* *.ini *.templ *.control prune sql/*/results prune sql/*/docs @@ -33,7 +34,7 @@ prune python/skytools/installer_config.py recursive-include lib *.[chg] *.m4 *.mk *.h.in *.sh *-sh Setup.mk README COPYRIGHT prune lib/mk/temos -prune lib/usual/config.h +exclude lib/usual/config.h prune lib/test prune lib/autogen.sh @@ -41,4 +42,6 @@ recursive-include upgrade *.sql Makefile recursive-include tests *.conf *.sh *.ini *.py Makefile data.sql install.sql *.sql *.conf prune fix*.sql prune tests/merge/conf +prune tests/handler/conf prune tests/londiste/conf + diff --git a/sql/common-pgxs.mk b/sql/common-pgxs.mk new file mode 100644 index 00000000..22534504 --- /dev/null +++ b/sql/common-pgxs.mk @@ -0,0 +1,73 @@ + +# PGXS does not support modules that are supposed +# to run on different Postgres versions very well. +# Here are some workarounds for them. + +# Variables that are used when extensions are available +Extension_data ?= +Extension_data_built ?= +Extension_regress ?= + +# Variables that are used when extensions are not available +Contrib_data ?= +Contrib_data_built ?= +Contrib_regress ?= + +# Should the Contrib* files installed (under ../contrib/) +# even when extensions are available? +Contrib_install_always ?= no + +# +# switch variables +# + +IfExt = $(if $(filter 8.% 9.0%,$(MAJORVERSION)8.3),$(2),$(1)) + +DATA = $(call IfExt,$(Extension_data),$(Contrib_data)) +DATA_built = $(call IfExt,$(Extension_data_built),$(Contrib_data_built)) +REGRESS = $(call IfExt,$(Extension_regress),$(Contrib_regress)) + +EXTRA_CLEAN += $(call IfExt,$(Contrib_data_built),$(Extension_data_built)) test.dump + +# have deterministic dbname for regtest database +override CONTRIB_TESTDB = regression +REGRESS_OPTS = --load-language=plpgsql --dbname=$(CONTRIB_TESTDB) + +# +# load PGXS +# + +PG_CONFIG ?= pg_config +PGXS = $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) + +# +# build rules, in case Contrib data must be always installed +# + +ifeq ($(call IfExt,$(Contrib_install_always),no),yes) + +all: $(Contrib_data) $(Contrib_data_built) +installdirs: installdirs-old-contrib +install: install-old-contrib + +installdirs-old-contrib: + $(MKDIR_P) '$(DESTDIR)$(datadir)/contrib' + +install-old-contrib: $(Contrib_data) $(Contrib_data_built) installdirs-old-contrib + $(INSTALL_DATA) $(addprefix $(srcdir)/, $(Contrib_data)) $(Contrib_data_built) '$(DESTDIR)$(datadir)/contrib/' + +endif + +# +# regtest shortcuts +# + +test: install + $(MAKE) installcheck || { filterdiff --format=unified regression.diffs | less; exit 1; } + pg_dump regression > test.dump + +ack: + cp results/*.out expected/ + +.PHONY: test ack installdirs-old-contrib install-old-contrib diff --git a/sql/dispatch/create_partition.sql b/sql/dispatch/create_partition.sql index 6925df17..2a328b5d 100644 --- a/sql/dispatch/create_partition.sql +++ b/sql/dispatch/create_partition.sql @@ -37,7 +37,9 @@ declare pos int4; fq_table text; fq_part text; + q_grantee text; g record; + r record; sql text; pgver integer; begin @@ -92,6 +94,7 @@ begin sql := sql || ') inherits (' || fq_table || ')'; execute sql; + -- extra check constraint if i_part_field != '' then part_start := date_trunc(i_part_period, i_part_time); part_end := part_start + ('1 ' || i_part_period)::interval; @@ -110,8 +113,12 @@ begin where table_schema = parent_schema and table_name = parent_name loop - sql := 'grant ' || g.privilege_type || ' on ' || fq_part - || ' to ' || quote_ident(g.grantee); + if g.grantee = 'PUBLIC' then + q_grantee = 'public'; + else + q_grantee := quote_ident(g.grantee); + end if; + sql := 'grant ' || g.privilege_type || ' on ' || fq_part || ' to ' || q_grantee; if g.is_grantable = 'YES' then sql := sql || ' with grant option'; end if; diff --git a/sql/dispatch/expected/test_create_part.out b/sql/dispatch/expected/test_create_part.out index 20a9ef21..bbbd37c5 100644 --- a/sql/dispatch/expected/test_create_part.out +++ b/sql/dispatch/expected/test_create_part.out @@ -10,6 +10,11 @@ create table events ( someval int4 check (someval > 0) ); create index ctime_idx on events (ctime); +create rule ignore_dups AS + on insert to events + where (exists (select 1 from events + where (events.id = new.id))) + do instead nothing; grant select,delete on events to ptest1; grant select,update,delete on events to ptest2 with grant option; select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01', 'month'); @@ -42,6 +47,12 @@ select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::reg 3 (1 row) +select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; + count +------- + 0 +(1 row) + -- \d events_2011_01 -- \dp events -- \dp events_2011_01 diff --git a/sql/dispatch/sql/test_create_part.sql b/sql/dispatch/sql/test_create_part.sql index 71a552a1..74695644 100644 --- a/sql/dispatch/sql/test_create_part.sql +++ b/sql/dispatch/sql/test_create_part.sql @@ -20,6 +20,14 @@ create table events ( ); create index ctime_idx on events (ctime); +create rule ignore_dups AS + on insert to events + where (exists (select 1 from events + where (events.id = new.id))) + do instead nothing; + + + grant select,delete on events to ptest1; grant select,update,delete on events to ptest2 with grant option; @@ -30,6 +38,7 @@ select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01': select count(*) from pg_indexes where schemaname='public' and tablename = 'events_2011_01'; select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::regclass; +select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; -- \d events_2011_01 -- \dp events diff --git a/sql/logtriga/textbuf.c b/sql/logtriga/textbuf.c index a2e6d6b9..49202247 100644 --- a/sql/logtriga/textbuf.c +++ b/sql/logtriga/textbuf.c @@ -1,5 +1,7 @@ #include <postgres.h> + +#include "utils/elog.h" #include "funcapi.h" #include "mb/pg_wchar.h" #include "parser/keywords.h" diff --git a/sql/londiste/Makefile b/sql/londiste/Makefile index c81efb34..31ff8b43 100644 --- a/sql/londiste/Makefile +++ b/sql/londiste/Makefile @@ -1,5 +1,20 @@ -DATA_built = londiste.sql londiste.upgrade.sql +EXTENSION = londiste + +Contrib_data_built = londiste.sql londiste.upgrade.sql \ + structure/oldgrants_londiste.sql \ + structure/newgrants_londiste.sql + +Extension_data_built = londiste--3.1.sql londiste--unpackaged--3.1.sql + +base_regress = londiste_provider londiste_subscriber \ + londiste_fkeys londiste_execute londiste_seqs londiste_merge \ + londiste_leaf londiste_create_part + +Contrib_regress = init_noext $(base_regress) +Extension_regress = init_ext $(base_regress) + +Contrib_install_always = yes SQLS = $(shell sed -e 's/^[^\\].*//' -e 's/\\i //' structure/install.sql) FUNCS = $(shell sed -e 's/^[^\\].*//' -e 's/\\i //' $(SQLS)) @@ -8,18 +23,15 @@ SRCS = $(SQLS) $(FUNCS) NDOC = NaturalDocs NDOCARGS = -r -o html docs/html -p docs -i docs/sql CATSQL = ../../scripts/catsql.py +GRANTFU = ../../scripts/grantfu.py -REGRESS = londiste_install londiste_provider londiste_subscriber \ - londiste_fkeys londiste_execute londiste_seqs londiste_merge \ - londiste_leaf -# londiste_denytrigger +include ../common-pgxs.mk -REGRESS_OPTS = --dbname=regression -override CONTRIB_TESTDB=regression +londiste--3.1.sql: londiste.sql structure/ext_postproc.sql + cat $^ > $@ -PG_CONFIG = pg_config -PGXS = $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) +londiste--unpackaged--3.1.sql: londiste.upgrade.sql structure/ext_unpackaged.sql structure/ext_postproc.sql + cat $^ > $@ londiste.sql: $(SRCS) $(CATSQL) $(SQLS) > $@ @@ -27,11 +39,14 @@ londiste.sql: $(SRCS) londiste.upgrade.sql: $(SRCS) $(CATSQL) structure/upgrade.sql > $@ -test: londiste.sql - $(MAKE) installcheck || { filterdiff --format=unified regression.diffs | less; exit 1; } +structure/newgrants_londiste.sql: structure/grants.ini + $(GRANTFU) -r -d -t $< > $@ -ack: - cp results/* expected/ +structure/oldgrants_londiste.sql: structure/grants.ini + echo "begin;" > $@ + $(GRANTFU) -R -o $< >> $@ + cat structure/grants.sql >> $@ + echo "commit;" >> $@ dox: cleandox mkdir -p docs/html diff --git a/sql/londiste/expected/init_ext.out b/sql/londiste/expected/init_ext.out new file mode 100644 index 00000000..db6ebeef --- /dev/null +++ b/sql/londiste/expected/init_ext.out @@ -0,0 +1,21 @@ +\set ECHO off + upgrade_schema +---------------- + 0 +(1 row) + +create extension londiste from 'unpackaged'; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; + dumpable +---------- + 4 +(1 row) + +drop extension londiste; +create extension londiste; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; + dumpable +---------- + 4 +(1 row) + diff --git a/sql/londiste/expected/init_noext.out b/sql/londiste/expected/init_noext.out new file mode 100644 index 00000000..3cc747e5 --- /dev/null +++ b/sql/londiste/expected/init_noext.out @@ -0,0 +1,16 @@ +\set ECHO off + upgrade_schema +---------------- + 0 +(1 row) + + upgrade_schema +---------------- + 0 +(1 row) + + upgrade_schema +---------------- + 0 +(1 row) + diff --git a/sql/londiste/expected/londiste_create_part.out b/sql/londiste/expected/londiste_create_part.out new file mode 100644 index 00000000..56001c0c --- /dev/null +++ b/sql/londiste/expected/londiste_create_part.out @@ -0,0 +1,102 @@ +\set ECHO none +drop role if exists londiste_test_part1; +drop role if exists londiste_test_part2; +create group londiste_test_part1; +create group londiste_test_part2; +create table events ( + id int4 primary key, + txt text not null, + ctime timestamptz not null default now(), + someval int4 check (someval > 0) +); +create index ctime_idx on events (ctime); +create rule ignore_dups AS + on insert to events + where (exists (select 1 from events + where (events.id = new.id))) + do instead nothing; +create or replace function "NullTrigger"() returns trigger as $$ +begin + return null; +end; $$ language plpgsql; +create trigger "Fooza" after delete on events for each row execute procedure "NullTrigger"(); +alter table events enable always trigger "Fooza"; +grant select,delete on events to londiste_test_part1; +grant select,update,delete on events to londiste_test_part2 with grant option; +grant select,insert on events to public; +select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01', 'month'); + create_partition +------------------ + 1 +(1 row) + +select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamptz, 'month'); + create_partition +------------------ + 0 +(1 row) + +select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamp, 'month'); + create_partition +------------------ + 0 +(1 row) + +select count(*) from pg_indexes where schemaname='public' and tablename = 'events_2011_01'; + count +------- + 2 +(1 row) + +select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::regclass; + count +------- + 3 +(1 row) + +select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; + count +------- + 1 +(1 row) + +select * from information_schema.triggers where event_object_schema = 'public' and event_object_table = 'events_2011_01'; + trigger_catalog | trigger_schema | trigger_name | event_manipulation | event_object_catalog | event_object_schema | event_object_table | action_order | action_condition | action_statement | action_orientation | action_timing | action_reference_old_table | action_reference_new_table | action_reference_old_row | action_reference_new_row | created +-----------------+----------------+--------------+--------------------+----------------------+---------------------+--------------------+--------------+------------------+-----------------------------------+--------------------+---------------+----------------------------+----------------------------+--------------------------+--------------------------+--------- + regression | public | Fooza | DELETE | regression | public | events_2011_01 | | | EXECUTE PROCEDURE "NullTrigger"() | ROW | AFTER | | | | | +(1 row) + +select tgenabled, pg_get_triggerdef(oid) from pg_trigger where tgrelid = 'events_2011_01'::regclass::oid; + tgenabled | pg_get_triggerdef +-----------+------------------------------------------------------------------------------------------------------ + A | CREATE TRIGGER "Fooza" AFTER DELETE ON events_2011_01 FOR EACH ROW EXECUTE PROCEDURE "NullTrigger"() +(1 row) + +-- test weird quoting +create table "Bad "" table '.' name!" ( + id int4 primary key, + txt text not null, + ctime timestamptz not null default now(), + someval int4 check (someval > 0) +); +create rule "Ignore Dups" AS + on insert to "Bad "" table '.' name!" + where (exists (select 1 from "Bad "" table '.' name!" + where ("Bad "" table '.' name!".id = new.id))) + do instead nothing; +alter table "Bad "" table '.' name!" ENABLE ALWAYS RULE "Ignore Dups"; +select londiste.create_partition('public.Bad " table ''.'' name!', 'public.Bad " table ''.'' part!', 'id', 'ctime', '2011-01-01', 'month'); + create_partition +------------------ + 1 +(1 row) + +select count(*) from pg_rules where schemaname = 'public' and tablename ilike 'bad%'; + count +------- + 2 +(1 row) + +-- \d events_2011_01 +-- \dp events +-- \dp events_2011_01 diff --git a/sql/londiste/expected/londiste_execute.out b/sql/londiste/expected/londiste_execute.out index c1e9745f..fae6cbd1 100644 --- a/sql/londiste/expected/londiste_execute.out +++ b/sql/londiste/expected/londiste_execute.out @@ -6,9 +6,9 @@ select * from londiste.execute_start('branch_set', 'DDL-A.sql', 'drop all', fals (1 row) select * from londiste.execute_start('branch_set', 'DDL-A.sql', 'drop all', false); - ret_code | ret_note -----------+------------------------------------ - 301 | EXECUTE(DDL-A.sql) already applied + ret_code | ret_note +----------+------------------------------------------------ + 201 | EXECUTE: "DDL-A.sql" already applied, skipping (1 row) select * from londiste.execute_finish('branch_set', 'DDL-A.sql'); @@ -48,9 +48,9 @@ select * from londiste.execute_start('aset', 'DDL-root.sql', 'drop all', true); (1 row) select * from londiste.execute_start('aset', 'DDL-root.sql', 'drop all', true); - ret_code | ret_note -----------+--------------------------------------- - 301 | EXECUTE(DDL-root.sql) already applied + ret_code | ret_note +----------+--------------------------------------------------- + 201 | EXECUTE: "DDL-root.sql" already applied, skipping (1 row) select * from londiste.execute_finish('aset', 'DDL-root.sql'); diff --git a/sql/londiste/expected/londiste_seqs.out b/sql/londiste/expected/londiste_seqs.out index 459f7d93..ed21db3f 100644 --- a/sql/londiste/expected/londiste_seqs.out +++ b/sql/londiste/expected/londiste_seqs.out @@ -120,3 +120,50 @@ select * from londiste.local_remove_seq('seqbranch', 'masterseq'); 404 | Sequence not found: public.masterseq (1 row) +-- seq auto-removal +create table seqtable ( + id1 serial primary key, + id2 bigserial not null +); +select * from londiste.local_add_table('seqroot', 'seqtable'); + ret_code | ret_note +----------+------------------------------ + 200 | Table added: public.seqtable +(1 row) + +select * from londiste.local_add_seq('seqroot', 'seqtable_id1_seq'); + ret_code | ret_note +----------+----------------------------------------- + 200 | Sequence added: public.seqtable_id1_seq +(1 row) + +select * from londiste.local_add_seq('seqroot', 'seqtable_id2_seq'); + ret_code | ret_note +----------+----------------------------------------- + 200 | Sequence added: public.seqtable_id2_seq +(1 row) + +select * from londiste.get_table_list('seqroot'); + table_name | local | merge_state | custom_snapshot | table_attrs | dropped_ddl | copy_role | copy_pos | dest_table +-----------------+-------+-------------+-----------------+-------------+-------------+-----------+----------+------------ + public.seqtable | t | ok | | | | | 0 | +(1 row) + +select * from londiste.get_seq_list('seqroot'); + seq_name | last_value | local +-------------------------+------------+------- + public.seqtable_id1_seq | 30001 | t + public.seqtable_id2_seq | 30001 | t +(2 rows) + +select * from londiste.local_remove_table('seqroot', 'seqtable'); + ret_code | ret_note +----------+-------------------------------- + 200 | Table removed: public.seqtable +(1 row) + +select * from londiste.get_seq_list('seqroot'); + seq_name | last_value | local +----------+------------+------- +(0 rows) + diff --git a/sql/londiste/functions/londiste.create_partition.sql b/sql/londiste/functions/londiste.create_partition.sql new file mode 100644 index 00000000..b13448b4 --- /dev/null +++ b/sql/londiste/functions/londiste.create_partition.sql @@ -0,0 +1,229 @@ + +create or replace function londiste.create_partition( + i_table text, + i_part text, + i_pkeys text, + i_part_field text, + i_part_time timestamptz, + i_part_period text +) returns int as $$ +------------------------------------------------------------------------ +-- Function: public.create_partition +-- +-- Creates inherited child table if it does not exist by copying parent table's structure. +-- Locks parent table to avoid parallel creation. +-- +-- Elements that are copied over by "LIKE x INCLUDING ALL": +-- * Defaults +-- * Constraints +-- * Indexes +-- * Storage options (9.0+) +-- * Comments (9.0+) +-- +-- Elements that are copied over manually because LIKE ALL does not support them: +-- * Grants +-- * Triggers +-- * Rules +-- +-- Parameters: +-- i_table - name of parent table +-- i_part - name of partition table to create +-- i_pkeys - primary key fields (comma separated, used to create constraint). +-- i_part_field - field used to partition table (when not partitioned by field, value is NULL) +-- i_part_time - partition time +-- i_part_period - period of partitioned data, current possible values are 'hour', 'day', 'month' and 'year' +-- +-- Example: +-- select londiste.create_partition('aggregate.user_call_monthly', 'aggregate.user_call_monthly_2010_01', 'key_user', 'period_start', '2010-01-10 11:00'::timestamptz, 'month'); +-- +------------------------------------------------------------------------ +declare + chk_start text; + chk_end text; + part_start timestamptz; + part_end timestamptz; + parent_schema text; + parent_name text; + parent_oid oid; + part_schema text; + part_name text; + pos int4; + fq_table text; + fq_part text; + q_grantee text; + g record; + r record; + tg record; + sql text; + pgver integer; + r_oldtbl text; + r_extra text; + r_sql text; +begin + if i_table is null or i_part is null then + raise exception 'need table and part'; + end if; + + -- load postgres version (XYYZZ). + show server_version_num into pgver; + + -- parent table schema and name + quoted name + pos := position('.' in i_table); + if pos > 0 then + parent_schema := substring(i_table for pos - 1); + parent_name := substring(i_table from pos + 1); + else + parent_schema := 'public'; + parent_name := i_table; + end if; + fq_table := quote_ident(parent_schema) || '.' || quote_ident(parent_name); + + -- part table schema and name + quoted name + pos := position('.' in i_part); + if pos > 0 then + part_schema := substring(i_part for pos - 1); + part_name := substring(i_part from pos + 1); + else + part_schema := 'public'; + part_name := i_part; + end if; + fq_part := quote_ident(part_schema) || '.' || quote_ident(part_name); + + -- allow only single creation at a time, without affecting DML operations + execute 'lock table ' || fq_table || ' in share update exclusive mode'; + parent_oid := fq_table::regclass::oid; + + -- check if part table exists + perform 1 from pg_class t, pg_namespace s + where t.relnamespace = s.oid + and s.nspname = part_schema + and t.relname = part_name; + if found then + return 0; + end if; + + -- need to use 'like' to get indexes + sql := 'create table ' || fq_part || ' (like ' || fq_table; + if pgver >= 90000 then + sql := sql || ' including all'; + else + sql := sql || ' including indexes including constraints including defaults'; + end if; + sql := sql || ') inherits (' || fq_table || ')'; + execute sql; + + -- extra check constraint + if i_part_field != '' then + part_start := date_trunc(i_part_period, i_part_time); + part_end := part_start + ('1 ' || i_part_period)::interval; + chk_start := quote_literal(to_char(part_start, 'YYYY-MM-DD HH24:MI:SS')); + chk_end := quote_literal(to_char(part_end, 'YYYY-MM-DD HH24:MI:SS')); + sql := 'alter table '|| fq_part || ' add check (' + || quote_ident(i_part_field) || ' >= ' || chk_start || ' and ' + || quote_ident(i_part_field) || ' < ' || chk_end || ')'; + execute sql; + end if; + + -- load grants from parent table + for g in + select grantor, grantee, privilege_type, is_grantable + from information_schema.table_privileges + where table_schema = parent_schema + and table_name = parent_name + loop + if g.grantee = 'PUBLIC' then + q_grantee = 'public'; + else + q_grantee := quote_ident(g.grantee); + end if; + sql := 'grant ' || g.privilege_type || ' on ' || fq_part || ' to ' || q_grantee; + if g.is_grantable = 'YES' then + sql := sql || ' with grant option'; + end if; + execute sql; + end loop; + + -- generate triggers info query + sql := 'SELECT tgname, tgenabled,' + || ' pg_catalog.pg_get_triggerdef(oid) as tgdef' + || ' FROM pg_catalog.pg_trigger ' + || ' WHERE tgrelid = ' || parent_oid::text + || ' AND '; + if pgver >= 90000 then + sql := sql || ' NOT tgisinternal'; + else + sql := sql || ' NOT tgisconstraint'; + end if; + + -- copy triggers + for tg in execute sql + loop + sql := regexp_replace(tg.tgdef, E' ON ([[:alnum:]_.]+|"([^"]|"")+")+ ', ' ON ' || fq_part || ' '); + if sql = tg.tgdef then + raise exception 'Failed to reconstruct the trigger: %', sql; + end if; + execute sql; + if tg.tgenabled = 'O' then + -- standard mode + r_extra := NULL; + elsif tg.tgenabled = 'D' then + r_extra := ' DISABLE TRIGGER '; + elsif tg.tgenabled = 'A' then + r_extra := ' ENABLE ALWAYS TRIGGER '; + elsif tg.tgenabled = 'R' then + r_extra := ' ENABLE REPLICA TRIGGER '; + else + raise exception 'Unknown trigger mode: %', tg.tgenabled; + end if; + if r_extra is not null then + sql := 'ALTER TABLE ' || fq_part || r_extra || quote_ident(tg.tgname); + execute sql; + end if; + end loop; + + -- copy rules + for r in + select rw.rulename, rw.ev_enabled, pg_get_ruledef(rw.oid) as definition + from pg_catalog.pg_rewrite rw + where rw.ev_class = parent_oid + and rw.rulename <> '_RETURN'::name + loop + -- try to skip rule name + r_extra := 'CREATE RULE ' || quote_ident(r.rulename) || ' AS'; + r_sql := substr(r.definition, 1, char_length(r_extra)); + if r_sql = r_extra then + r_sql := substr(r.definition, char_length(r_extra)); + else + raise exception 'failed to match rule name'; + end if; + + -- no clue what name was used in defn, so find it from sql + r_oldtbl := substring(r_sql from ' TO (([[:alnum:]_.]+|"([^"]+|"")+")+)[[:space:]]'); + if char_length(r_oldtbl) > 0 then + sql := replace(r.definition, r_oldtbl, fq_part); + else + raise exception 'failed to find original table name'; + end if; + execute sql; + + -- rule flags + r_extra := NULL; + if r.ev_enabled = 'R' then + r_extra = ' ENABLE REPLICA RULE '; + elsif r.ev_enabled = 'A' then + r_extra = ' ENABLE ALWAYS RULE '; + elsif r.ev_enabled = 'D' then + r_extra = ' DISABLE RULE '; + elsif r.ev_enabled <> 'O' then + raise exception 'unknown rule option: %', r.ev_enabled; + end if; + if r_extra is not null then + sql := 'ALTER TABLE ' || fq_part || r_extra + || quote_ident(r.rulename); + end if; + end loop; + + return 1; +end; +$$ language plpgsql; + diff --git a/sql/londiste/functions/londiste.execute_finish.sql b/sql/londiste/functions/londiste.execute_finish.sql index a7d510eb..fa472a0c 100644 --- a/sql/londiste/functions/londiste.execute_finish.sql +++ b/sql/londiste/functions/londiste.execute_finish.sql @@ -20,13 +20,14 @@ as $$ declare is_root boolean; sql text; + attrs text; begin is_root := pgq_node.is_root_node(i_queue_name); - select execute_sql into sql + select execute_sql, execute_attrs + into sql, attrs from londiste.applied_execute - where queue_name = i_queue_name - and execute_file = i_file_name; + where execute_file = i_file_name; if not found then select 404, 'execute_file called without execute_start' into ret_code, ret_note; @@ -34,7 +35,7 @@ begin end if; if is_root then - perform pgq.insert_event(i_queue_name, 'EXECUTE', sql, i_file_name, null, null, null); + perform pgq.insert_event(i_queue_name, 'EXECUTE', sql, i_file_name, attrs, null, null); end if; select 200, 'Execute finished: ' || i_file_name into ret_code, ret_note; diff --git a/sql/londiste/functions/londiste.execute_start.sql b/sql/londiste/functions/londiste.execute_start.sql index 9ce0071f..d50807ca 100644 --- a/sql/londiste/functions/londiste.execute_start.sql +++ b/sql/londiste/functions/londiste.execute_start.sql @@ -3,11 +3,12 @@ create or replace function londiste.execute_start( in i_file_name text, in i_sql text, in i_expect_root boolean, + in i_attrs text, out ret_code int4, out ret_note text) as $$ -- ---------------------------------------------------------------------- --- Function: londiste.execute_start(4) +-- Function: londiste.execute_start(5) -- -- Start execution of DDL. Should be called at the -- start of the transaction that does the SQL execution. @@ -21,10 +22,13 @@ as $$ -- i_sql - Actual script (informative, not used here) -- i_expect_root - Is this on root? Setup tool sets this to avoid -- execution on branches. +-- i_attrs - urlencoded dict of extra attributes. +-- The value will be put into ev_extra2 +-- field of outgoing event. -- -- Returns: -- 200 - Proceed. --- 301 - Already applied +-- 201 - Already applied -- 401 - Not root. -- 404 - No such queue -- ---------------------------------------------------------------------- @@ -41,20 +45,57 @@ begin end if; perform 1 from londiste.applied_execute - where queue_name = i_queue_name - and execute_file = i_file_name; + where execute_file = i_file_name; if found then - select 301, 'EXECUTE(' || i_file_name || ') already applied' + select 201, 'EXECUTE: "' || i_file_name || '" already applied, skipping' into ret_code, ret_note; return; end if; -- this also lock against potetial parallel execute - insert into londiste.applied_execute (queue_name, execute_file, execute_sql) - values (i_queue_name, i_file_name, i_sql); + insert into londiste.applied_execute (queue_name, execute_file, execute_sql, execute_attrs) + values (i_queue_name, i_file_name, i_sql, i_attrs); select 200, 'Executing: ' || i_file_name into ret_code, ret_note; return; end; -$$ language plpgsql strict; +$$ language plpgsql; + +create or replace function londiste.execute_start( + in i_queue_name text, + in i_file_name text, + in i_sql text, + in i_expect_root boolean, + out ret_code int4, + out ret_note text) +as $$ +-- ---------------------------------------------------------------------- +-- Function: londiste.execute_start(4) +-- +-- Start execution of DDL. Should be called at the +-- start of the transaction that does the SQL execution. +-- +-- Called-by: +-- Londiste setup tool on root, replay on branches/leafs. +-- +-- Parameters: +-- i_queue_name - cascaded queue name +-- i_file_name - Unique ID for SQL +-- i_sql - Actual script (informative, not used here) +-- i_expect_root - Is this on root? Setup tool sets this to avoid +-- execution on branches. +-- +-- Returns: +-- 200 - Proceed. +-- 301 - Already applied +-- 401 - Not root. +-- 404 - No such queue +-- ---------------------------------------------------------------------- +begin + select f.ret_code, f.ret_note + from londiste.execute_start(i_queue_name, i_file_name, i_sql, i_expect_root, null) f + into ret_code, ret_note; + return; +end; +$$ language plpgsql; diff --git a/sql/londiste/functions/londiste.find_table_oid.sql b/sql/londiste/functions/londiste.find_table_oid.sql index 2888575d..584442f7 100644 --- a/sql/londiste/functions/londiste.find_table_oid.sql +++ b/sql/londiste/functions/londiste.find_table_oid.sql @@ -1,4 +1,8 @@ +drop function if exists londiste.find_seq_oid(text); +drop function if exists londiste.find_table_oid(text); +drop function if exists londiste.find_rel_oid(text, text); + create or replace function londiste.find_rel_oid(i_fqname text, i_kind text) returns oid as $$ -- ---------------------------------------------------------------------- diff --git a/sql/londiste/functions/londiste.local_remove_table.sql b/sql/londiste/functions/londiste.local_remove_table.sql index ac86566a..71a0806a 100644 --- a/sql/londiste/functions/londiste.local_remove_table.sql +++ b/sql/londiste/functions/londiste.local_remove_table.sql @@ -18,9 +18,14 @@ as $$ -- ---------------------------------------------------------------------- declare fq_table_name text; + qtbl text; + seqname text; tbl record; + tbl_oid oid; begin fq_table_name := londiste.make_fqname(i_table_name); + qtbl := londiste.quote_fqname(fq_table_name); + tbl_oid := londiste.find_table_oid(i_table_name); select local into tbl from londiste.table_info @@ -44,6 +49,21 @@ begin dest_table = null where queue_name = i_queue_name and table_name = fq_table_name; + + -- drop dependent sequence + for seqname in + select n.nspname || '.' || s.relname + from pg_catalog.pg_class s, + pg_catalog.pg_namespace n, + pg_catalog.pg_attribute a + where a.attrelid = tbl_oid + and a.atthasdef + and a.atttypid::regtype::text in ('integer', 'bigint') + and s.oid = pg_get_serial_sequence(qtbl, a.attname)::regclass::oid + and n.oid = s.relnamespace + loop + perform londiste.local_remove_seq(i_queue_name, seqname); + end loop; else if not pgq_node.is_root_node(i_queue_name) then select 400, 'Table not registered locally: ' || fq_table_name into ret_code, ret_note; diff --git a/sql/londiste/functions/londiste.periodic_maintenance.sql b/sql/londiste/functions/londiste.periodic_maintenance.sql new file mode 100644 index 00000000..0b70d09e --- /dev/null +++ b/sql/londiste/functions/londiste.periodic_maintenance.sql @@ -0,0 +1,18 @@ + +create or replace function londiste.periodic_maintenance() +returns integer as $$ +-- ---------------------------------------------------------------------- +-- Function: londiste.periodic_maintenance(0) +-- +-- Clean random stuff. +-- ---------------------------------------------------------------------- +begin + + -- clean old EXECUTE entries + delete from londiste.applied_execute + where execute_time < now() - '3 months'::interval; + + return 0; +end; +$$ language plpgsql; -- need admin access + diff --git a/sql/londiste/functions/londiste.upgrade_schema.sql b/sql/londiste/functions/londiste.upgrade_schema.sql index b982a0cc..a0cf11e2 100644 --- a/sql/londiste/functions/londiste.upgrade_schema.sql +++ b/sql/londiste/functions/londiste.upgrade_schema.sql @@ -28,6 +28,53 @@ begin alter table londiste.table_info add column dest_table text; end if; + -- applied_execute.dest_table + perform 1 from information_schema.columns + where table_schema = 'londiste' + and table_name = 'applied_execute' + and column_name = 'execute_attrs'; + if not found then + alter table londiste.applied_execute add column execute_attrs text; + end if; + + -- applied_execute: drop queue_name from primary key + perform 1 from pg_catalog.pg_indexes + where schemaname = 'londiste' + and tablename = 'applied_execute' + and indexname = 'applied_execute_pkey' + and indexdef like '%queue_name%'; + if found then + alter table londiste.applied_execute + drop constraint applied_execute_pkey; + alter table londiste.applied_execute + add constraint applied_execute_pkey + primary key (execute_file); + end if; + + -- applied_execute: drop fkey to pgq_node + perform 1 from information_schema.table_constraints + where constraint_schema = 'londiste' + and table_schema = 'londiste' + and table_name = 'applied_execute' + and constraint_type = 'FOREIGN KEY' + and constraint_name = 'applied_execute_queue_name_fkey'; + if found then + alter table londiste.applied_execute + drop constraint applied_execute_queue_name_fkey; + end if; + + -- create roles + perform 1 from pg_catalog.pg_roles where rolname = 'londiste_writer'; + if not found then + create role londiste_writer in role pgq_admin; + cnt := cnt + 1; + end if; + perform 1 from pg_catalog.pg_roles where rolname = 'londiste_reader'; + if not found then + create role londiste_reader in role pgq_reader; + cnt := cnt + 1; + end if; + return cnt; end; $$ language plpgsql; diff --git a/sql/londiste/functions/londiste.version.sql b/sql/londiste/functions/londiste.version.sql index 6814ef7c..d9d12f39 100644 --- a/sql/londiste/functions/londiste.version.sql +++ b/sql/londiste/functions/londiste.version.sql @@ -1,8 +1,14 @@ create or replace function londiste.version() returns text as $$ +-- ---------------------------------------------------------------------- +-- Function: londiste.version(0) +-- +-- Returns version string for londiste. ATM it is based on SkyTools version +-- and only bumped when database code changes. +-- ---------------------------------------------------------------------- begin - return '3.0.0.13'; + return '3.1.0.0'; end; $$ language plpgsql; diff --git a/sql/londiste/londiste.control b/sql/londiste/londiste.control new file mode 100644 index 00000000..96c0e96f --- /dev/null +++ b/sql/londiste/londiste.control @@ -0,0 +1,8 @@ +# Londiste extensions +comment = 'Londiste Replication' +default_version = '3.1' +relocatable = false +superuser = true +schema = 'pg_catalog' +requires = 'pgq_node' + diff --git a/sql/londiste/sql/init_ext.sql b/sql/londiste/sql/init_ext.sql new file mode 100644 index 00000000..fdae9503 --- /dev/null +++ b/sql/londiste/sql/init_ext.sql @@ -0,0 +1,22 @@ +\set ECHO off + +set log_error_verbosity = 'terse'; +set client_min_messages = 'fatal'; +create language plpgsql; +set client_min_messages = 'warning'; + +create extension pgq; +create extension pgq_node; + +\i londiste.sql + +\set ECHO all + +create extension londiste from 'unpackaged'; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; + +drop extension londiste; + +create extension londiste; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; + diff --git a/sql/londiste/sql/init_noext.sql b/sql/londiste/sql/init_noext.sql new file mode 100644 index 00000000..c11550cf --- /dev/null +++ b/sql/londiste/sql/init_noext.sql @@ -0,0 +1,15 @@ +\set ECHO off + +set log_error_verbosity = 'terse'; +set client_min_messages = 'fatal'; +create language plpgsql; +set client_min_messages = 'warning'; + +-- \i ../txid/txid.sql +\i ../pgq/pgq.sql +\i ../pgq_node/pgq_node.sql + +\i londiste.sql + +\set ECHO all + diff --git a/sql/londiste/sql/londiste_create_part.sql b/sql/londiste/sql/londiste_create_part.sql new file mode 100644 index 00000000..d607a3f7 --- /dev/null +++ b/sql/londiste/sql/londiste_create_part.sql @@ -0,0 +1,70 @@ + + +\set ECHO none +set log_error_verbosity = 'terse'; +set client_min_messages = 'warning'; +\set ECHO all + +drop role if exists londiste_test_part1; +drop role if exists londiste_test_part2; +create group londiste_test_part1; +create group londiste_test_part2; + +create table events ( + id int4 primary key, + txt text not null, + ctime timestamptz not null default now(), + someval int4 check (someval > 0) +); +create index ctime_idx on events (ctime); + +create rule ignore_dups AS + on insert to events + where (exists (select 1 from events + where (events.id = new.id))) + do instead nothing; + + +create or replace function "NullTrigger"() returns trigger as $$ +begin + return null; +end; $$ language plpgsql; + +create trigger "Fooza" after delete on events for each row execute procedure "NullTrigger"(); +alter table events enable always trigger "Fooza"; + +grant select,delete on events to londiste_test_part1; +grant select,update,delete on events to londiste_test_part2 with grant option; +grant select,insert on events to public; + +select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01', 'month'); +select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamptz, 'month'); +select londiste.create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamp, 'month'); + +select count(*) from pg_indexes where schemaname='public' and tablename = 'events_2011_01'; +select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::regclass; +select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; +select * from information_schema.triggers where event_object_schema = 'public' and event_object_table = 'events_2011_01'; +select tgenabled, pg_get_triggerdef(oid) from pg_trigger where tgrelid = 'events_2011_01'::regclass::oid; + +-- test weird quoting + +create table "Bad "" table '.' name!" ( + id int4 primary key, + txt text not null, + ctime timestamptz not null default now(), + someval int4 check (someval > 0) +); +create rule "Ignore Dups" AS + on insert to "Bad "" table '.' name!" + where (exists (select 1 from "Bad "" table '.' name!" + where ("Bad "" table '.' name!".id = new.id))) + do instead nothing; +alter table "Bad "" table '.' name!" ENABLE ALWAYS RULE "Ignore Dups"; +select londiste.create_partition('public.Bad " table ''.'' name!', 'public.Bad " table ''.'' part!', 'id', 'ctime', '2011-01-01', 'month'); +select count(*) from pg_rules where schemaname = 'public' and tablename ilike 'bad%'; + +-- \d events_2011_01 +-- \dp events +-- \dp events_2011_01 + diff --git a/sql/londiste/sql/londiste_merge.sql b/sql/londiste/sql/londiste_merge.sql index 9df6b888..fd037f13 100644 --- a/sql/londiste/sql/londiste_merge.sql +++ b/sql/londiste/sql/londiste_merge.sql @@ -124,7 +124,7 @@ create function testmatrix( returns setof record as $$ begin for p1s, p2s, p3s in - select p1.state, p2.state, p3.state + select p1.state::name, p2.state::name, p3.state::name from states p1, states p2, states p3 where position('!' in p1.state) + position('!' in p2.state) + position('!' in p3.state) < 2 order by 1,2,3 diff --git a/sql/londiste/sql/londiste_seqs.sql b/sql/londiste/sql/londiste_seqs.sql index 27f3cd50..8487a2d0 100644 --- a/sql/londiste/sql/londiste_seqs.sql +++ b/sql/londiste/sql/londiste_seqs.sql @@ -36,4 +36,19 @@ select * from londiste.get_seq_list('seqbranch'); select * from londiste.local_remove_seq('seqbranch', 'masterseq'); select * from londiste.local_remove_seq('seqbranch', 'masterseq'); +-- seq auto-removal +create table seqtable ( + id1 serial primary key, + id2 bigserial not null +); +select * from londiste.local_add_table('seqroot', 'seqtable'); +select * from londiste.local_add_seq('seqroot', 'seqtable_id1_seq'); +select * from londiste.local_add_seq('seqroot', 'seqtable_id2_seq'); + +select * from londiste.get_table_list('seqroot'); +select * from londiste.get_seq_list('seqroot'); + +select * from londiste.local_remove_table('seqroot', 'seqtable'); + +select * from londiste.get_seq_list('seqroot'); diff --git a/sql/londiste/structure/ext_postproc.sql b/sql/londiste/structure/ext_postproc.sql new file mode 100644 index 00000000..34bd45cd --- /dev/null +++ b/sql/londiste/structure/ext_postproc.sql @@ -0,0 +1,9 @@ + +-- tag data objects as dumpable + +SELECT pg_catalog.pg_extension_config_dump('londiste.table_info', ''); +SELECT pg_catalog.pg_extension_config_dump('londiste.seq_info', ''); +SELECT pg_catalog.pg_extension_config_dump('londiste.applied_execute', ''); +SELECT pg_catalog.pg_extension_config_dump('londiste.pending_fkeys', ''); + + diff --git a/sql/londiste/structure/ext_unpackaged.sql b/sql/londiste/structure/ext_unpackaged.sql new file mode 100644 index 00000000..71714e89 --- /dev/null +++ b/sql/londiste/structure/ext_unpackaged.sql @@ -0,0 +1,7 @@ +ALTER EXTENSION londiste ADD SCHEMA londiste; + +ALTER EXTENSION londiste ADD TABLE londiste.table_info; +ALTER EXTENSION londiste ADD TABLE londiste.seq_info; +ALTER EXTENSION londiste ADD TABLE londiste.applied_execute; +ALTER EXTENSION londiste ADD TABLE londiste.pending_fkeys; + diff --git a/sql/londiste/structure/functions.sql b/sql/londiste/structure/functions.sql index 2f1fa1cc..4c8322a5 100644 --- a/sql/londiste/structure/functions.sql +++ b/sql/londiste/structure/functions.sql @@ -34,6 +34,7 @@ select londiste.upgrade_schema(); \i functions/londiste.local_set_table_state.sql \i functions/londiste.local_set_table_attrs.sql \i functions/londiste.local_set_table_struct.sql +\i functions/londiste.periodic_maintenance.sql -- Group: Utility functions \i functions/londiste.find_column_types.sql @@ -47,3 +48,6 @@ select londiste.upgrade_schema(); \i functions/londiste.is_replica_func.sql \i functions/londiste.version.sql +-- Group: Utility functions for handlers +\i functions/londiste.create_partition.sql + diff --git a/sql/londiste/structure/grants.ini b/sql/londiste/structure/grants.ini new file mode 100644 index 00000000..0c704014 --- /dev/null +++ b/sql/londiste/structure/grants.ini @@ -0,0 +1,89 @@ + +[GrantFu] +# roles that we maintain in this file +roles = londiste_local, londiste_remote, public + + +[1.tables] +on.tables = londiste.table_info, londiste.seq_info, londiste.pending_fkeys, londiste.applied_execute + +londiste_local = select, insert, update, delete +londiste_remote = select + +# backwards compat, should be dropped? +public = select + + +[2.public.fns] +on.functions = %(londiste_public_fns)s +public = execute + + +[3.remote.node] +on.functions = %(londiste_remote_fns)s +londiste_remote = execute +londiste_local = execute + + +[3.local.node] +on.functions = %(londiste_local_fns)s, %(londiste_internal_fns)s +londiste_local = execute + + +# define various groups of functions +[DEFAULT] + +# can be executed by everybody, read-only, not secdef +londiste_public_fns = + londiste.find_column_types(text), + londiste.find_table_fkeys(text), + londiste.find_rel_oid(text, text), + londiste.find_table_oid(text), + londiste.find_seq_oid(text), + londiste.is_replica_func(oid), + londiste.quote_fqname(text), + londiste.make_fqname(text), + londiste.split_fqname(text), + londiste.version() + +# remote node uses those on provider, read local tables +londiste_remote_fns = + londiste.get_seq_list(text), + londiste.get_table_list(text), + londiste._coordinate_copy(text, text) + +# used by owner only +londiste_internal_fns = + londiste.periodic_maintenance(), + londiste.upgrade_schema() + +# used by local worker, admin +londiste_local_fns = + londiste.local_show_missing(text), + londiste.local_add_seq(text, text), + londiste.local_add_table(text, text, text[], text, text), + londiste.local_add_table(text, text, text[], text), + londiste.local_add_table(text, text, text[]), + londiste.local_add_table(text, text), + londiste.local_remove_seq(text, text), + londiste.local_remove_table(text, text), + londiste.global_add_table(text, text), + londiste.global_remove_table(text, text), + londiste.global_update_seq(text, text, int8), + londiste.global_remove_seq(text, text), + londiste.get_table_pending_fkeys(text), + londiste.get_valid_pending_fkeys(text), + londiste.drop_table_fkey(text, text), + londiste.restore_table_fkey(text, text), + londiste.execute_start(text, text, text, boolean), + londiste.execute_finish(text, text), + londiste.root_check_seqs(text, int8), + londiste.root_check_seqs(text), + londiste.root_notify_change(text, text, text), + londiste.local_set_table_state(text, text, text, text), + londiste.local_set_table_attrs(text, text, text), + londiste.local_set_table_struct(text, text, text), + londiste.drop_table_triggers(text, text), + londiste.table_info_trigger(), + londiste.create_partition(text,text,text,text,timestamptz,text) + diff --git a/sql/londiste/structure/install.sql b/sql/londiste/structure/install.sql index 00bf5638..fde88003 100644 --- a/sql/londiste/structure/install.sql +++ b/sql/londiste/structure/install.sql @@ -1,4 +1,4 @@ \i structure/tables.sql -\i structure/grants.sql \i structure/functions.sql \i structure/triggers.sql +\i structure/grants.sql diff --git a/sql/londiste/structure/tables.sql b/sql/londiste/structure/tables.sql index 7d082dbb..5f5429b6 100644 --- a/sql/londiste/structure/tables.sql +++ b/sql/londiste/structure/tables.sql @@ -146,10 +146,8 @@ create table londiste.applied_execute ( execute_file text not null, execute_time timestamptz not null default now(), execute_sql text not null, - primary key (queue_name, execute_file), - foreign key (queue_name) - references pgq_node.node_info (queue_name) - on delete cascade + execute_attrs text, + primary key (execute_file) ); diff --git a/sql/pgq/Makefile b/sql/pgq/Makefile index 87992bae..1a21df42 100644 --- a/sql/pgq/Makefile +++ b/sql/pgq/Makefile @@ -1,7 +1,20 @@ +EXTENSION = pgq + DOCS = README.pgq -DATA_built = pgq.sql pgq.upgrade.sql -DATA = structure/uninstall_pgq.sql + +Extension_data_built = pgq--3.1.sql pgq--unpackaged--3.1.sql + +Contrib_data_built = pgq.sql pgq.upgrade.sql structure/oldgrants_pgq.sql structure/newgrants_pgq.sql +Contrib_data = structure/uninstall_pgq.sql + +Contrib_regress = pgq_init_noext pgq_core pgq_perms logutriga sqltriga $(trunc_test) +Extension_regress = pgq_init_ext pgq_core pgq_perms logutriga sqltriga $(trunc_test) + +Contrib_install_always = yes + +# MAJORVERSION was defined in 8.4 +trunc_test = $(if $(MAJORVERSION),trunctrg) # scripts that load other sql files LDRS = structure/func_internal.sql structure/func_public.sql structure/triggers.sql @@ -9,16 +22,12 @@ FUNCS = $(shell sed -e 's/^[^\\].*//' -e 's/\\i //' $(LDRS)) SRCS = structure/tables.sql structure/grants.sql structure/install.sql \ structure/uninstall_pgq.sql $(FUNCS) -REGRESS = pgq_init pgq_core logutriga sqltriga trunctrg -REGRESS_OPTS = --load-language=plpgsql - -PG_CONFIG = pg_config -PGXS = $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) +include ../common-pgxs.mk NDOC = NaturalDocs NDOCARGS = -r -o html docs/html -p docs -i docs/sql CATSQL = ../../scripts/catsql.py +GRANTFU = ../../scripts/grantfu.py SUBDIRS = lowlevel triggers @@ -42,6 +51,12 @@ triggers/pgq_triggers.sql: sub-all # combined SQL files # +pgq--3.1.sql: pgq.sql structure/ext_postproc.sql + cat $^ > $@ + +pgq--unpackaged--3.1.sql: pgq.upgrade.sql structure/ext_unpackaged.sql structure/ext_postproc.sql + cat $^ > $@ + pgq.sql: $(SRCS) $(CATSQL) structure/install.sql > $@ @@ -49,6 +64,15 @@ pgq.sql: $(SRCS) pgq.upgrade.sql: $(SRCS) $(CATSQL) structure/upgrade.sql > $@ +structure/newgrants_pgq.sql: structure/grants.ini + $(GRANTFU) -t -r -d $< > $@ + +structure/oldgrants_pgq.sql: structure/grants.ini structure/grants.sql + echo "begin;" > $@ + $(GRANTFU) -R -o $< >> $@ + cat structure/grants.sql >> $@ + echo "commit;" >> $@ + # # docs # @@ -66,16 +90,6 @@ cleandox: clean: cleandox -upload: dox - rsync -az --delete docs/html/* data1:public_html/pgq-new/ - -# -# regtest shortcuts -# - -test: pgq.sql - $(MAKE) install installcheck || { filterdiff --format=unified regression.diffs | less; exit 1; } - -ack: - cp results/*.out expected/ +test: $(Contrib_data_built) +.PHONY: cleandox dox diff --git a/sql/pgq/expected/logutriga.out b/sql/pgq/expected/logutriga.out index d6a5aaf4..21bc1bce 100644 --- a/sql/pgq/expected/logutriga.out +++ b/sql/pgq/expected/logutriga.out @@ -1,3 +1,4 @@ +\set VERBOSITY 'terse' select 1 from (select set_config(name, 'escape', false) as ignore from pg_settings where name = 'bytea_output') x where x.ignore = 'foo'; @@ -5,11 +6,10 @@ select 1 from (select set_config(name, 'escape', false) as ignore ---------- (0 rows) -drop function pgq.insert_event(text, text, text, text, text, text, text); -create or replace function pgq.insert_event(que text, ev_type text, ev_data text, x1 text, x2 text, x3 text, x4 text) +create or replace function pgq.insert_event(queue_name text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ begin - raise notice 'insert_event(%, %, %, %)', que, ev_type, ev_data, x1; + raise notice 'insert_event(%, %, %, %)', queue_name, ev_type, ev_data, ev_extra1; return 1; end; $$ language plpgsql; @@ -24,10 +24,8 @@ create trigger utest AFTER insert or update or delete ON udata for each row execute procedure pgq.logutriga('udata_que'); insert into udata (txt) values ('text1'); NOTICE: insert_event(udata_que, I:id, id=1&txt=text1&bin, public.udata) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" insert into udata (bin) values (E'bi\tn\\000bin'); NOTICE: insert_event(udata_que, I:id, id=2&txt&bin=bi%5c011n%5c000bin, public.udata) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test ignore drop trigger utest on udata; truncate udata; @@ -35,24 +33,19 @@ create trigger utest after insert or update or delete on udata for each row execute procedure pgq.logutriga('udata_que', 'ignore=bin'); insert into udata values (1, 'txt', 'bin'); NOTICE: insert_event(udata_que, I:id, id=1&txt=txt, public.udata) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update udata set txt = 'txt'; NOTICE: insert_event(udata_que, U:id, id=1&txt=txt, public.udata) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update udata set txt = 'txt2', bin = 'bin2'; NOTICE: insert_event(udata_que, U:id, id=1&txt=txt2, public.udata) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update udata set bin = 'bin3'; delete from udata; NOTICE: insert_event(udata_que, D:id, id=1&txt=txt2, public.udata) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test missing pkey create table nopkey2 (dat text); create trigger nopkey_triga2 after insert or update or delete on nopkey2 for each row execute procedure pgq.logutriga('que3'); insert into nopkey2 values ('foo'); NOTICE: insert_event(que3, I:, dat=foo, public.nopkey2) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update nopkey2 set dat = 'bat'; ERROR: Update/Delete on table without pkey delete from nopkey2; @@ -64,7 +57,6 @@ create trigger ucustom_triga after insert or update or delete on ucustom_pkey for each row execute procedure pgq.logutriga('que3'); insert into ucustom_pkey values ('foo', '2'); NOTICE: insert_event(que3, I:, dat1=foo&dat2=2&dat3, public.ucustom_pkey) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update ucustom_pkey set dat3 = 'bat'; ERROR: Update/Delete on table without pkey delete from ucustom_pkey; @@ -85,13 +77,10 @@ create trigger customf2_triga after insert or update or delete on custom_fields2 for each row execute procedure pgq.logutriga('que3'); insert into custom_fields2 values ('foo', '2'); NOTICE: insert_event(que3, my_type, dat1=foo&dat2=2&dat3, e1) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update custom_fields2 set dat3 = 'bat'; NOTICE: insert_event(que3, my_type, dat1=foo&dat2=2&dat3=bat, e1) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from custom_fields2; NOTICE: insert_event(que3, my_type, dat1=foo&dat2=2&dat3=bat, e1) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test custom expression create table custom_expr2 ( dat1 text not null primary key, @@ -103,13 +92,10 @@ create trigger customex2_triga after insert or update or delete on custom_expr2 for each row execute procedure pgq.logutriga('que3', 'ev_extra1=''test='' || dat1', 'ev_type=dat3'); insert into custom_expr2 values ('foo', '2'); NOTICE: insert_event(que3, <NULL>, dat1=foo&dat2=2&dat3, test=foo) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update custom_expr2 set dat3 = 'bat'; NOTICE: insert_event(que3, bat, dat1=foo&dat2=2&dat3=bat, test=foo) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from custom_expr2; NOTICE: insert_event(que3, bat, dat1=foo&dat2=2&dat3=bat, test=foo) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test when= create table when_test ( dat1 text not null primary key, @@ -121,7 +107,6 @@ create trigger when_triga after insert or update or delete on when_test for each row execute procedure pgq.logutriga('que3', 'when=dat1=''foo'''); insert into when_test values ('foo', '2'); NOTICE: insert_event(que3, I:dat1, dat1=foo&dat2=2&dat3, public.when_test) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" insert into when_test values ('bar', '2'); select * from when_test; dat1 | dat2 | dat3 @@ -132,10 +117,8 @@ select * from when_test; update when_test set dat3 = 'bat'; NOTICE: insert_event(que3, U:dat1, dat1=foo&dat2=2&dat3=bat, public.when_test) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from when_test; NOTICE: insert_event(que3, D:dat1, dat1=foo&dat2=2&dat3=bat, public.when_test) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" drop trigger when_triga on when_test; create trigger when_triga after insert or update or delete on when_test for each row execute procedure pgq.logutriga('que3', 'when=current_user=''random'''); @@ -159,6 +142,5 @@ ERROR: Table 'public.deny_test' to queue 'noqueue': change not allowed (I) -- test pk update insert into udata (id, txt) values (1, 'txt'); NOTICE: insert_event(udata_que, I:id, id=1&txt=txt, public.udata) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update udata set id = 2; ERROR: primary key update not allowed diff --git a/sql/pgq/expected/pgq_core.out b/sql/pgq/expected/pgq_core.out index a0c760fa..265ccc41 100644 --- a/sql/pgq/expected/pgq_core.out +++ b/sql/pgq/expected/pgq_core.out @@ -1,3 +1,4 @@ +\set VERBOSITY 'terse' select * from pgq.maint_tables_to_vacuum(); maint_tables_to_vacuum ------------------------ @@ -340,8 +341,6 @@ select pgq.insert_event('myqueue', 'test', 'event'); update pgq.queue set queue_disable_insert = true where queue_name = 'myqueue'; select pgq.insert_event('myqueue', 'test', 'event'); ERROR: Insert into queue disallowed -CONTEXT: PL/pgSQL function "insert_event" line 19 at RETURN -PL/pgSQL function "insert_event" line 15 at RETURN update pgq.queue set queue_disable_insert = false where queue_name = 'myqueue'; select pgq.insert_event('myqueue', 'test', 'event'); insert_event @@ -366,15 +365,11 @@ select pgq.insert_event('myqueue', 'test', 'event2'); select pgq.insert_event('myqueue', 'test', 'event3'); ERROR: Queue 'myqueue' allows max 2 events from one TX -CONTEXT: PL/pgSQL function "insert_event" line 19 at RETURN -PL/pgSQL function "insert_event" line 15 at RETURN end; update pgq.queue set queue_per_tx_limit = 0 where queue_name = 'myqueue'; begin; select pgq.insert_event('myqueue', 'test', 'event1'); ERROR: Queue 'myqueue' allows max 0 events from one TX -CONTEXT: PL/pgSQL function "insert_event" line 19 at RETURN -PL/pgSQL function "insert_event" line 15 at RETURN select pgq.insert_event('myqueue', 'test', 'event2'); ERROR: current transaction is aborted, commands ignored until end of transaction block select pgq.insert_event('myqueue', 'test', 'event3'); diff --git a/sql/pgq/expected/pgq_init_ext.out b/sql/pgq/expected/pgq_init_ext.out new file mode 100644 index 00000000..df0cc184 --- /dev/null +++ b/sql/pgq/expected/pgq_init_ext.out @@ -0,0 +1,49 @@ +-- create noext schema +\set ECHO none + upgrade_schema +---------------- + 0 +(1 row) + + create_queue +-------------- + 1 +(1 row) + +-- convert to extension +create extension pgq from 'unpackaged'; +select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; + array_length +-------------- + 6 +(1 row) + +select pgq.create_queue('testqueue2'); + create_queue +-------------- + 1 +(1 row) + +--drop extension pgq; -- will fail +select pgq.drop_queue('testqueue2'); + drop_queue +------------ + 1 +(1 row) + +select pgq.drop_queue('testqueue1'); + drop_queue +------------ + 1 +(1 row) + +-- drop schema failure +drop extension pgq; +-- create clean schema +create extension pgq; +select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; + array_length +-------------- + 6 +(1 row) + diff --git a/sql/pgq/expected/pgq_init.out b/sql/pgq/expected/pgq_init_noext.out index 6460c411..6460c411 100644 --- a/sql/pgq/expected/pgq_init.out +++ b/sql/pgq/expected/pgq_init_noext.out diff --git a/sql/pgq/expected/pgq_perms.out b/sql/pgq/expected/pgq_perms.out new file mode 100644 index 00000000..83b6ac4d --- /dev/null +++ b/sql/pgq/expected/pgq_perms.out @@ -0,0 +1,42 @@ +\set ECHO off +drop role if exists pgq_test_producer; +drop role if exists pgq_test_consumer; +drop role if exists pgq_test_admin; +create role pgq_test_consumer with login in role pgq_reader; +create role pgq_test_producer with login in role pgq_writer; +create role pgq_test_admin with login in role pgq_admin; +\c - pgq_test_admin +select * from pgq.create_queue('pqueue'); -- ok + create_queue +-------------- + 1 +(1 row) + +\c - pgq_test_producer +select * from pgq.create_queue('pqueue'); -- fail +ERROR: permission denied for function create_queue +select * from pgq.insert_event('pqueue', 'test', 'data'); -- ok + insert_event +-------------- + 1 +(1 row) + +select * from pgq.register_consumer('pqueue', 'prod'); -- fail +ERROR: permission denied for function register_consumer +\c - pgq_test_consumer +select * from pgq.create_queue('pqueue'); -- fail +ERROR: permission denied for function create_queue +select * from pgq.insert_event('pqueue', 'test', 'data'); -- fail +ERROR: permission denied for function insert_event +select * from pgq.register_consumer('pqueue', 'cons'); -- ok + register_consumer +------------------- + 1 +(1 row) + +select * from pgq.next_batch('pqueue', 'cons'); -- ok + next_batch +------------ + +(1 row) + diff --git a/sql/pgq/expected/sqltriga.out b/sql/pgq/expected/sqltriga.out index b1ccafaf..139de081 100644 --- a/sql/pgq/expected/sqltriga.out +++ b/sql/pgq/expected/sqltriga.out @@ -1,3 +1,4 @@ +\set VERBOSITY 'terse' -- start testing create table rtest ( id integer primary key, @@ -9,40 +10,30 @@ for each row execute procedure pgq.sqltriga('que'); -- simple test insert into rtest values (1, 'value1'); NOTICE: insert_event(que, I, (id,dat) values ('1','value1'), public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update rtest set dat = 'value2'; NOTICE: insert_event(que, U, dat='value2' where id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from rtest; NOTICE: insert_event(que, D, id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test new fields alter table rtest add column dat2 text; insert into rtest values (1, 'value1'); NOTICE: insert_event(que, I, (id,dat,dat2) values ('1','value1',null), public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update rtest set dat = 'value2'; NOTICE: insert_event(que, U, dat='value2' where id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from rtest; NOTICE: insert_event(que, D, id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test field ignore drop trigger rtest_triga on rtest; create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure pgq.sqltriga('que2', 'ignore=dat2'); insert into rtest values (1, '666', 'newdat'); NOTICE: insert_event(que2, I, (id,dat) values ('1','666'), public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update rtest set dat = 5, dat2 = 'newdat2'; NOTICE: insert_event(que2, U, dat='5' where id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update rtest set dat = 6; NOTICE: insert_event(que2, U, dat='6' where id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from rtest; NOTICE: insert_event(que2, D, id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test hashed pkey -- drop trigger rtest_triga on rtest; -- create trigger rtest_triga after insert or update or delete on rtest @@ -57,32 +48,24 @@ create trigger rtest_triga after insert or update or delete on rtest for each row execute procedure pgq.sqltriga('que3'); insert into rtest values (1, 0, 'non-null'); NOTICE: insert_event(que3, I, (id,dat,dat2) values ('1','0','non-null'), public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" insert into rtest values (2, 0, NULL); NOTICE: insert_event(que3, I, (id,dat,dat2) values ('2','0',null), public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update rtest set dat2 = 'non-null2' where id=1; NOTICE: insert_event(que3, U, dat2='non-null2' where id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update rtest set dat2 = NULL where id=1; NOTICE: insert_event(que3, U, dat2=NULL where id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update rtest set dat2 = 'new-nonnull' where id=2; NOTICE: insert_event(que3, U, dat2='new-nonnull' where id='2', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from rtest where id=1; NOTICE: insert_event(que3, D, id='1', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from rtest where id=2; NOTICE: insert_event(que3, D, id='2', public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test missing pkey create table nopkey (dat text); create trigger nopkey_triga after insert or update or delete on nopkey for each row execute procedure pgq.sqltriga('que3'); insert into nopkey values ('foo'); NOTICE: insert_event(que3, I, (dat) values ('foo'), public.nopkey) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update nopkey set dat = 'bat'; ERROR: Update/Delete on table without pkey delete from nopkey; @@ -93,13 +76,10 @@ create trigger custom_triga after insert or update or delete on custom_pkey for each row execute procedure pgq.sqltriga('que3', 'pkey=dat1,dat2'); insert into custom_pkey values ('foo', '2'); NOTICE: insert_event(que3, I, (dat1,dat2,dat3) values ('foo','2',null), public.custom_pkey) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update custom_pkey set dat3 = 'bat'; NOTICE: insert_event(que3, U, dat3='bat' where dat1='foo' and dat2='2', public.custom_pkey) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from custom_pkey; NOTICE: insert_event(que3, D, dat1='foo' and dat2='2', public.custom_pkey) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test custom fields create table custom_fields ( dat1 text not null primary key, @@ -116,13 +96,10 @@ create trigger customf_triga after insert or update or delete on custom_fields for each row execute procedure pgq.sqltriga('que3'); insert into custom_fields values ('foo', '2'); NOTICE: insert_event(que3, my_type, (dat1,dat2,dat3) values ('foo','2',null), e1) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update custom_fields set dat3 = 'bat'; NOTICE: insert_event(que3, my_type, dat3='bat' where dat1='foo', e1) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from custom_fields; NOTICE: insert_event(que3, my_type, dat1='foo', e1) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test custom expression create table custom_expr ( dat1 text not null primary key, @@ -134,16 +111,12 @@ create trigger customex_triga after insert or update or delete on custom_expr for each row execute procedure pgq.sqltriga('que3', 'ev_extra1=''test='' || dat1', 'ev_type=dat3'); insert into custom_expr values ('foo', '2'); NOTICE: insert_event(que3, <NULL>, (dat1,dat2,dat3) values ('foo','2',null), test=foo) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update custom_expr set dat3 = 'bat'; NOTICE: insert_event(que3, bat, dat3='bat' where dat1='foo', test=foo) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" delete from custom_expr; NOTICE: insert_event(que3, bat, dat1='foo', test=foo) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test pk update insert into rtest values (1, 'value1'); NOTICE: insert_event(que3, I, (id,dat,dat2) values ('1','value1',null), public.rtest) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" update rtest set id = 2; ERROR: primary key update not allowed diff --git a/sql/pgq/expected/trunctrg.out b/sql/pgq/expected/trunctrg.out index 8563565d..2aba1302 100644 --- a/sql/pgq/expected/trunctrg.out +++ b/sql/pgq/expected/trunctrg.out @@ -1,3 +1,4 @@ +\set VERBOSITY 'terse' -- test sqltriga truncate create table trunctrg1 ( dat1 text not null primary key, @@ -9,7 +10,6 @@ create trigger trunc1_trig after truncate on trunctrg1 for each statement execute procedure pgq.sqltriga('que3'); truncate trunctrg1; NOTICE: insert_event(que3, R, , public.trunctrg1) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test logutriga truncate create table trunctrg2 ( dat1 text not null primary key, @@ -21,7 +21,6 @@ create trigger trunc2_trig after truncate on trunctrg2 for each statement execute procedure pgq.logutriga('que3'); truncate trunctrg2; NOTICE: insert_event(que3, R, , public.trunctrg2) -CONTEXT: SQL statement "select pgq.insert_event($1, $2, $3, $4, $5, $6, $7)" -- test deny create trigger deny_triga2 after truncate on trunctrg2 for each statement execute procedure pgq.logutriga('noqueue', 'deny'); diff --git a/sql/pgq/functions/pgq.grant_perms.sql b/sql/pgq/functions/pgq.grant_perms.sql index afabfa5c..db8c6edf 100644 --- a/sql/pgq/functions/pgq.grant_perms.sql +++ b/sql/pgq/functions/pgq.grant_perms.sql @@ -14,8 +14,12 @@ returns integer as $$ declare q record; i integer; + pos integer; tbl_perms text; seq_perms text; + dst_schema text; + dst_table text; + part_table text; begin select * from pgq.queue into q where queue_name = x_queue_name; @@ -23,36 +27,72 @@ begin raise exception 'Queue not found'; end if; - if true then - -- safe, all access must go via functions - seq_perms := 'select'; - tbl_perms := 'select'; + -- split data table name to components + pos := position('.' in q.queue_data_pfx); + if pos > 0 then + dst_schema := substring(q.queue_data_pfx for pos - 1); + dst_table := substring(q.queue_data_pfx from pos + 1); else - -- allow ordinery users to directly insert - -- to event tables. dangerous. - seq_perms := 'select, update'; - tbl_perms := 'select, insert'; + dst_schema := 'public'; + dst_table := q.queue_data_pfx; end if; -- tick seq, normal users don't need to modify it - execute 'grant ' || seq_perms - || ' on ' || q.queue_tick_seq || ' to public'; + execute 'grant select on ' || q.queue_tick_seq || ' to public'; -- event seq - execute 'grant ' || seq_perms - || ' on ' || q.queue_event_seq || ' to public'; + execute 'grant select on ' || q.queue_event_seq || ' to public'; - -- parent table for events - execute 'grant select on ' || q.queue_data_pfx || ' to public'; + -- set grants on parent table + perform pgq._grant_perms_from('pgq', 'event_template', dst_schema, dst_table); - -- real event tables + -- set grants on real event tables for i in 0 .. q.queue_ntables - 1 loop - execute 'grant ' || tbl_perms - || ' on ' || q.queue_data_pfx || '_' || i::text - || ' to public'; + part_table := dst_table || '_' || i::text; + perform pgq._grant_perms_from('pgq', 'event_template', dst_schema, part_table); end loop; return 1; end; $$ language plpgsql security definer; + +create or replace function pgq._grant_perms_from(src_schema text, src_table text, dst_schema text, dst_table text) +returns integer as $$ +-- ---------------------------------------------------------------------- +-- Function: pgq.grant_perms_from(1) +-- +-- Copy grants from one table to another. +-- Workaround for missing GRANTS option for CREATE TABLE LIKE. +-- ---------------------------------------------------------------------- +declare + fq_table text; + sql text; + g record; + q_grantee text; +begin + fq_table := quote_ident(dst_schema) || '.' || quote_ident(dst_table); + + for g in + select grantor, grantee, privilege_type, is_grantable + from information_schema.table_privileges + where table_schema = src_schema + and table_name = src_table + loop + if g.grantee = 'PUBLIC' then + q_grantee = 'public'; + else + q_grantee = quote_ident(g.grantee); + end if; + sql := 'grant ' || g.privilege_type || ' on ' || fq_table + || ' to ' || q_grantee; + if g.is_grantable = 'YES' then + sql := sql || ' with grant option'; + end if; + execute sql; + end loop; + + return 1; +end; +$$ language plpgsql; + diff --git a/sql/pgq/functions/pgq.insert_event.sql b/sql/pgq/functions/pgq.insert_event.sql index 2a58fb15..e2bfe780 100644 --- a/sql/pgq/functions/pgq.insert_event.sql +++ b/sql/pgq/functions/pgq.insert_event.sql @@ -18,7 +18,7 @@ returns bigint as $$ begin return pgq.insert_event(queue_name, ev_type, ev_data, null, null, null, null); end; -$$ language plpgsql security definer; +$$ language plpgsql; diff --git a/sql/pgq/functions/pgq.maint_operations.sql b/sql/pgq/functions/pgq.maint_operations.sql index 31c44ba3..5ddb26fa 100644 --- a/sql/pgq/functions/pgq.maint_operations.sql +++ b/sql/pgq/functions/pgq.maint_operations.sql @@ -113,6 +113,16 @@ begin end loop; end if; + perform 1 from pg_proc p, pg_namespace n + where p.pronamespace = n.oid + and n.nspname = 'londiste' + and p.proname = 'periodic_maintenance'; + if found then + func_name := 'londiste.periodic_maintenance'; + func_arg := NULL; + return next; + end if; + return; end; $$ language plpgsql; diff --git a/sql/pgq/functions/pgq.maint_rotate_tables.sql b/sql/pgq/functions/pgq.maint_rotate_tables.sql index d972a04e..d3cee739 100644 --- a/sql/pgq/functions/pgq.maint_rotate_tables.sql +++ b/sql/pgq/functions/pgq.maint_rotate_tables.sql @@ -30,6 +30,12 @@ begin return 0; end if; + -- if DB is in invalid state, stop + if txid_current() < cf.queue_switch_step1 then + raise exception 'queue % maint failure: step1=%, current=%', + i_queue_name, cf.queue_switch_step1, txid_current(); + end if; + -- find lowest tick for that queue select min(sub_last_tick) into lowest_tick_id from pgq.subscription diff --git a/sql/pgq/functions/pgq.upgrade_schema.sql b/sql/pgq/functions/pgq.upgrade_schema.sql index 69d1bb17..8ddc8b32 100644 --- a/sql/pgq/functions/pgq.upgrade_schema.sql +++ b/sql/pgq/functions/pgq.upgrade_schema.sql @@ -19,6 +19,23 @@ begin cnt := cnt + 1; end if; + -- create roles + perform 1 from pg_catalog.pg_roles where rolname = 'pgq_reader'; + if not found then + create role pgq_reader; + cnt := cnt + 1; + end if; + perform 1 from pg_catalog.pg_roles where rolname = 'pgq_writer'; + if not found then + create role pgq_writer; + cnt := cnt + 1; + end if; + perform 1 from pg_catalog.pg_roles where rolname = 'pgq_admin'; + if not found then + create role pgq_admin in role pgq_reader, pgq_writer; + cnt := cnt + 1; + end if; + return cnt; end; $$ language plpgsql; diff --git a/sql/pgq/functions/pgq.version.sql b/sql/pgq/functions/pgq.version.sql index 2f5e5764..d8baec48 100644 --- a/sql/pgq/functions/pgq.version.sql +++ b/sql/pgq/functions/pgq.version.sql @@ -3,11 +3,11 @@ returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq.version(0) -- --- Returns verison string for pgq. ATM its SkyTools version --- that is only bumped when PGQ database code changes. +-- Returns version string for pgq. ATM it is based on SkyTools version +-- and only bumped when database code changes. -- ---------------------------------------------------------------------- begin - return '3.0.0.14'; + return '3.1.0.0'; end; $$ language plpgsql; diff --git a/sql/pgq/lowlevel/insert_event.c b/sql/pgq/lowlevel/insert_event.c index 490dfe08..d1d3c681 100644 --- a/sql/pgq/lowlevel/insert_event.c +++ b/sql/pgq/lowlevel/insert_event.c @@ -20,6 +20,7 @@ #include "funcapi.h" #include "catalog/pg_type.h" +#include "commands/trigger.h" #include "executor/spi.h" #include "lib/stringinfo.h" #include "utils/builtins.h" @@ -107,7 +108,6 @@ struct QueueState { /* * Cached plans. */ -static void *queue_check_plan; static void *queue_plan; static HTAB *insert_cache; @@ -307,8 +307,20 @@ Datum pgq_insert_event_raw(PG_FUNCTION_ARGS) load_queue_info(qname, &state); + /* + * Check if queue has disable_insert flag set. + */ +#if defined(PG_VERSION_NUM) && PG_VERSION_NUM >= 80300 + /* 8.3+: allow insert_event() even if connection is in 'replica' role */ + if (state.disabled) { + if (SessionReplicationRole != SESSION_REPLICATION_ROLE_REPLICA) + elog(ERROR, "Insert into queue disallowed"); + } +#else + /* pre-8.3 */ if (state.disabled) elog(ERROR, "Insert into queue disallowed"); +#endif if (PG_ARGISNULL(1)) ev_id = state.next_event_id; diff --git a/sql/pgq/pgq.control b/sql/pgq/pgq.control new file mode 100644 index 00000000..f02c27d5 --- /dev/null +++ b/sql/pgq/pgq.control @@ -0,0 +1,7 @@ +# pgq extension +comment = 'Generic queue for PostgreSQL' +default_version = '3.1' +relocatable = false +superuser = true +schema = 'pg_catalog' + diff --git a/sql/pgq/sql/logutriga.sql b/sql/pgq/sql/logutriga.sql index cd5ce151..e97b2dce 100644 --- a/sql/pgq/sql/logutriga.sql +++ b/sql/pgq/sql/logutriga.sql @@ -1,13 +1,13 @@ +\set VERBOSITY 'terse' select 1 from (select set_config(name, 'escape', false) as ignore from pg_settings where name = 'bytea_output') x where x.ignore = 'foo'; -drop function pgq.insert_event(text, text, text, text, text, text, text); -create or replace function pgq.insert_event(que text, ev_type text, ev_data text, x1 text, x2 text, x3 text, x4 text) +create or replace function pgq.insert_event(queue_name text, ev_type text, ev_data text, ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) returns bigint as $$ begin - raise notice 'insert_event(%, %, %, %)', que, ev_type, ev_data, x1; + raise notice 'insert_event(%, %, %, %)', queue_name, ev_type, ev_data, ev_extra1; return 1; end; $$ language plpgsql; diff --git a/sql/pgq/sql/pgq_core.sql b/sql/pgq/sql/pgq_core.sql index 66cdb56e..b312f121 100644 --- a/sql/pgq/sql/pgq_core.sql +++ b/sql/pgq/sql/pgq_core.sql @@ -1,3 +1,4 @@ +\set VERBOSITY 'terse' select * from pgq.maint_tables_to_vacuum(); select * from pgq.maint_retry_events(); diff --git a/sql/pgq/sql/pgq_init_ext.sql b/sql/pgq/sql/pgq_init_ext.sql new file mode 100644 index 00000000..3ed13776 --- /dev/null +++ b/sql/pgq/sql/pgq_init_ext.sql @@ -0,0 +1,24 @@ + +-- create noext schema +\set ECHO none +\set VERBOSITY 'terse' +\i structure/install.sql +select pgq.create_queue('testqueue1'); +\set ECHO all +-- convert to extension +create extension pgq from 'unpackaged'; +select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; + +select pgq.create_queue('testqueue2'); +--drop extension pgq; -- will fail +select pgq.drop_queue('testqueue2'); +select pgq.drop_queue('testqueue1'); + +-- drop schema failure +drop extension pgq; + +-- create clean schema +create extension pgq; + +select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; + diff --git a/sql/pgq/sql/pgq_init.sql b/sql/pgq/sql/pgq_init_noext.sql index 18a6344b..d33cd9c7 100644 --- a/sql/pgq/sql/pgq_init.sql +++ b/sql/pgq/sql/pgq_init_noext.sql @@ -1,5 +1,7 @@ \set ECHO none +\set VERBOSITY 'terse' + -- \i ../txid/txid.sql -- \i pgq.sql \i structure/install.sql diff --git a/sql/pgq/sql/pgq_perms.sql b/sql/pgq/sql/pgq_perms.sql new file mode 100644 index 00000000..c1962a1f --- /dev/null +++ b/sql/pgq/sql/pgq_perms.sql @@ -0,0 +1,39 @@ +\set ECHO off +\set VERBOSITY 'terse' +set client_min_messages = 'warning'; + +-- drop public perms +\i structure/newgrants_pgq.sql + +-- select proname, proacl from pg_proc p, pg_namespace n where n.nspname = 'pgq' and p.pronamespace = n.oid; + +\set ECHO all + +drop role if exists pgq_test_producer; +drop role if exists pgq_test_consumer; +drop role if exists pgq_test_admin; + +create role pgq_test_consumer with login in role pgq_reader; +create role pgq_test_producer with login in role pgq_writer; +create role pgq_test_admin with login in role pgq_admin; + + +\c - pgq_test_admin + +select * from pgq.create_queue('pqueue'); -- ok + +\c - pgq_test_producer + +select * from pgq.create_queue('pqueue'); -- fail + +select * from pgq.insert_event('pqueue', 'test', 'data'); -- ok + +select * from pgq.register_consumer('pqueue', 'prod'); -- fail + +\c - pgq_test_consumer + +select * from pgq.create_queue('pqueue'); -- fail +select * from pgq.insert_event('pqueue', 'test', 'data'); -- fail +select * from pgq.register_consumer('pqueue', 'cons'); -- ok +select * from pgq.next_batch('pqueue', 'cons'); -- ok + diff --git a/sql/pgq/sql/sqltriga.sql b/sql/pgq/sql/sqltriga.sql index 2d0d0192..f858f911 100644 --- a/sql/pgq/sql/sqltriga.sql +++ b/sql/pgq/sql/sqltriga.sql @@ -1,3 +1,4 @@ +\set VERBOSITY 'terse' -- start testing create table rtest ( diff --git a/sql/pgq/sql/trunctrg.sql b/sql/pgq/sql/trunctrg.sql index 53ef8d70..f999dedc 100644 --- a/sql/pgq/sql/trunctrg.sql +++ b/sql/pgq/sql/trunctrg.sql @@ -1,3 +1,4 @@ +\set VERBOSITY 'terse' -- test sqltriga truncate create table trunctrg1 ( diff --git a/sql/pgq/structure/ext_postproc.sql b/sql/pgq/structure/ext_postproc.sql new file mode 100644 index 00000000..5c945e87 --- /dev/null +++ b/sql/pgq/structure/ext_postproc.sql @@ -0,0 +1,13 @@ + +-- tag data objects as dumpable + +SELECT pg_catalog.pg_extension_config_dump('pgq.queue', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq.consumer', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq.tick', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq.subscription', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq.event_template', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq.retry_queue', ''); + +---- pg_dump is broken and cannot handle dumpable sequences +-- SELECT pg_catalog.pg_extension_config_dump('pgq.batch_id_seq', ''); + diff --git a/sql/pgq/structure/ext_unpackaged.sql b/sql/pgq/structure/ext_unpackaged.sql new file mode 100644 index 00000000..06971142 --- /dev/null +++ b/sql/pgq/structure/ext_unpackaged.sql @@ -0,0 +1,12 @@ + +ALTER EXTENSION pgq ADD SCHEMA pgq; + +ALTER EXTENSION pgq ADD TABLE pgq.queue; +ALTER EXTENSION pgq ADD TABLE pgq.consumer; +ALTER EXTENSION pgq ADD TABLE pgq.tick; +ALTER EXTENSION pgq ADD TABLE pgq.subscription; +ALTER EXTENSION pgq ADD TABLE pgq.event_template; +ALTER EXTENSION pgq ADD TABLE pgq.retry_queue; + +ALTER EXTENSION pgq ADD SEQUENCE pgq.batch_id_seq; + diff --git a/sql/pgq/structure/grants.ini b/sql/pgq/structure/grants.ini new file mode 100644 index 00000000..82cb8554 --- /dev/null +++ b/sql/pgq/structure/grants.ini @@ -0,0 +1,100 @@ + +[GrantFu] +roles = pgq_reader, pgq_writer, pgq_admin, public + +[1.public] +on.functions = %(pgq_generic_fns)s +public = execute + +[2.consumer] +on.functions = %(pgq_read_fns)s +pgq_reader = execute + +[3.producer] +on.functions = %(pgq_write_fns)s +pgq_writer = execute + +[4.admin] +on.functions = %(pgq_system_fns)s +pgq_admin = execute + +[5.meta.tables] +on.tables = + pgq.consumer, + pgq.queue, + pgq.tick, + pgq.subscription +pgq_reader = select +public = select + +[5.event.tables] +on.tables = pgq.event_template, pgq.retry_queue +pgq_reader = select + +# drop public access to events +public = + + +# +# define various groups of functions +# + +[DEFAULT] + +pgq_generic_fns = + pgq.seq_getval(text), + pgq.get_queue_info(), + pgq.get_queue_info(text), + pgq.get_consumer_info(), + pgq.get_consumer_info(text), + pgq.get_consumer_info(text, text), + pgq.version() + +pgq_read_fns = + pgq.batch_event_sql(bigint), + pgq.batch_event_tables(bigint), + pgq.find_tick_helper(int4, int8, timestamptz, int8, int8, interval), + pgq.register_consumer(text, text), + pgq.register_consumer_at(text, text, bigint), + pgq.unregister_consumer(text, text), + pgq.next_batch_info(text, text), + pgq.next_batch(text, text), + pgq.next_batch_custom(text, text, interval, int4, interval), + pgq.get_batch_events(bigint), + pgq.get_batch_info(bigint), + pgq.get_batch_cursor(bigint, text, int4, text), + pgq.get_batch_cursor(bigint, text, int4), + pgq.event_retry(bigint, bigint, timestamptz), + pgq.event_retry(bigint, bigint, integer), + pgq.batch_retry(bigint, integer), + pgq.finish_batch(bigint) + +pgq_write_fns = + pgq.insert_event(text, text, text), + pgq.insert_event(text, text, text, text, text, text, text), + pgq.current_event_table(text), + pgq.sqltriga(), + pgq.logutriga() + +pgq_system_fns = + pgq.ticker(text, bigint, timestamptz, bigint), + pgq.ticker(text), + pgq.ticker(), + pgq.maint_retry_events(), + pgq.maint_rotate_tables_step1(text), + pgq.maint_rotate_tables_step2(), + pgq.maint_tables_to_vacuum(), + pgq.maint_operations(), + pgq.upgrade_schema(), + pgq.grant_perms(text), + pgq._grant_perms_from(text,text,text,text), + pgq.tune_storage(text), + pgq.force_tick(text), + pgq.seq_setval(text, int8), + pgq.create_queue(text), + pgq.drop_queue(text, bool), + pgq.drop_queue(text), + pgq.set_queue_config(text, text, text), + pgq.insert_event_raw(text, bigint, timestamptz, integer, integer, text, text, text, text, text, text), + pgq.event_retry_raw(text, text, timestamptz, bigint, timestamptz, integer, text, text, text, text, text, text) + diff --git a/sql/pgq/structure/grants.sql b/sql/pgq/structure/grants.sql index d5f8ef1f..acbd484c 100644 --- a/sql/pgq/structure/grants.sql +++ b/sql/pgq/structure/grants.sql @@ -1,5 +1,8 @@ + grant usage on schema pgq to public; + +-- old default grants grant select on table pgq.consumer to public; grant select on table pgq.queue to public; grant select on table pgq.tick to public; @@ -7,3 +10,4 @@ grant select on table pgq.queue to public; grant select on table pgq.subscription to public; grant select on table pgq.event_template to public; grant select on table pgq.retry_queue to public; + diff --git a/sql/pgq/structure/install.sql b/sql/pgq/structure/install.sql index 511aaba7..747801ef 100644 --- a/sql/pgq/structure/install.sql +++ b/sql/pgq/structure/install.sql @@ -1,7 +1,7 @@ \i structure/tables.sql -\i structure/grants.sql \i structure/func_internal.sql \i structure/func_public.sql \i structure/triggers.sql +\i structure/grants.sql diff --git a/sql/pgq/structure/upgrade.sql b/sql/pgq/structure/upgrade.sql index 1b898d11..94ae6c75 100644 --- a/sql/pgq/structure/upgrade.sql +++ b/sql/pgq/structure/upgrade.sql @@ -1,2 +1,3 @@ \i structure/func_internal.sql \i structure/func_public.sql +\i structure/triggers.sql diff --git a/sql/pgq_coop/Makefile b/sql/pgq_coop/Makefile index 31c5d329..6d88236d 100644 --- a/sql/pgq_coop/Makefile +++ b/sql/pgq_coop/Makefile @@ -1,32 +1,54 @@ -DATA_built = pgq_coop.sql pgq_coop.upgrade.sql +EXTENSION = pgq_coop -SQL_FULL = structure/schema.sql structure/functions.sql structure/grants.sql +Extension_data_built = pgq_coop--3.1.sql pgq_coop--unpackaged--3.1.sql +Contrib_data_built = pgq_coop.sql pgq_coop.upgrade.sql \ + structure/newgrants_pgq_coop.sql \ + structure/oldgrants_pgq_coop.sql + +Contrib_regress = pgq_coop_init_noext pgq_coop_test +Extension_regress = pgq_coop_init_ext pgq_coop_test +Contrib_install_always = yes + +SQL_FULL = structure/schema.sql structure/functions.sql structure/grants.sql FUNCS = $(shell sed -n -e '/^\\/{s/\\i //;p}' structure/functions.sql) SRCS = $(SQL_FULL) $(FUNCS) -REGRESS = pgq_coop_test -REGRESS_OPTS = --load-language=plpgsql - -PG_CONFIG = pg_config -PGXS = $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) +include ../common-pgxs.mk NDOC = NaturalDocs NDOCARGS = -r -o html docs/html -p docs -i docs/sql CATSQL = ../../scripts/catsql.py +GRANTFU = ../../scripts/grantfu.py + # # combined SQL files # +pgq_coop--3.1.sql: pgq_coop.sql + cat $< > $@ + +#pgq_coop--unpackaged--3.1.sql: structure/ext_unpackaged.sql pgq_coop.upgrade.sql +pgq_coop--unpackaged--3.1.sql: pgq_coop.upgrade.sql structure/ext_unpackaged.sql + cat $< > $@ + pgq_coop.sql: $(SRCS) $(CATSQL) structure/install.sql > $@ pgq_coop.upgrade.sql: $(SRCS) $(CATSQL) structure/upgrade.sql > $@ +structure/newgrants_pgq_coop.sql: structure/grants.ini + $(GRANTFU) -t -r -d $< > $@ + +structure/oldgrants_pgq_coop.sql: structure/grants.ini structure/grants.sql + echo "begin;" > $@ + $(GRANTFU) -R -o $< >> $@ + cat structure/grants.sql >> $@ + echo "commit;" >> $@ + # # docs # @@ -41,16 +63,3 @@ cleandox: clean: cleandox -upload: dox - rsync -az --delete docs/html/* data1:public_html/pgq-set/ - -# -# regtest shortcuts -# - -test: pgq_coop.sql - $(MAKE) installcheck || { less regression.diffs; exit 1; } - -ack: - cp results/*.out expected/ - diff --git a/sql/pgq_coop/expected/pgq_coop_init_ext.out b/sql/pgq_coop/expected/pgq_coop_init_ext.out new file mode 100644 index 00000000..c933def6 --- /dev/null +++ b/sql/pgq_coop/expected/pgq_coop_init_ext.out @@ -0,0 +1,5 @@ +create extension pgq; +\set ECHO none +create extension pgq_coop from 'unpackaged'; +drop extension pgq_coop; +create extension pgq_coop; diff --git a/sql/pgq_coop/expected/pgq_coop_init_noext.out b/sql/pgq_coop/expected/pgq_coop_init_noext.out new file mode 100644 index 00000000..6460c411 --- /dev/null +++ b/sql/pgq_coop/expected/pgq_coop_init_noext.out @@ -0,0 +1,6 @@ +\set ECHO none + upgrade_schema +---------------- + 0 +(1 row) + diff --git a/sql/pgq_coop/expected/pgq_coop_test.out b/sql/pgq_coop/expected/pgq_coop_test.out index f821d766..11e8424e 100644 --- a/sql/pgq_coop/expected/pgq_coop_test.out +++ b/sql/pgq_coop/expected/pgq_coop_test.out @@ -1,9 +1,3 @@ -\set ECHO none - upgrade_schema ----------------- - 0 -(1 row) - select pgq.create_queue('testqueue'); create_queue -------------- diff --git a/sql/pgq_coop/functions/pgq_coop.version.sql b/sql/pgq_coop/functions/pgq_coop.version.sql index 3ab14d05..aab49258 100644 --- a/sql/pgq_coop/functions/pgq_coop.version.sql +++ b/sql/pgq_coop/functions/pgq_coop.version.sql @@ -4,11 +4,11 @@ returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq_coop.version(0) -- --- Returns version string for pgq_coop. ATM its SkyTools version --- with suffix that is only bumped when pgq_coop database code changes. +-- Returns version string for pgq_coop. ATM it is based on SkyTools version +-- and only bumped when database code changes. -- ---------------------------------------------------------------------- begin - return '3.0.0.4'; + return '3.1.0.0'; end; $$ language plpgsql; diff --git a/sql/pgq_coop/pgq_coop.control b/sql/pgq_coop/pgq_coop.control new file mode 100644 index 00000000..804e1f51 --- /dev/null +++ b/sql/pgq_coop/pgq_coop.control @@ -0,0 +1,7 @@ +# pgq_coop +comment = 'Cooperative queue consuming for PgQ' +default_version = '3.1' +relocatable = false +superuser = true +schema = 'pg_catalog' +requires = 'pgq' diff --git a/sql/pgq_coop/sql/pgq_coop_init_ext.sql b/sql/pgq_coop/sql/pgq_coop_init_ext.sql new file mode 100644 index 00000000..88f031d5 --- /dev/null +++ b/sql/pgq_coop/sql/pgq_coop_init_ext.sql @@ -0,0 +1,12 @@ + +create extension pgq; + +\set ECHO none +\i structure/install.sql +\set ECHO all + +create extension pgq_coop from 'unpackaged'; +drop extension pgq_coop; + +create extension pgq_coop; + diff --git a/sql/pgq_coop/sql/pgq_coop_init_noext.sql b/sql/pgq_coop/sql/pgq_coop_init_noext.sql new file mode 100644 index 00000000..e1e6e56e --- /dev/null +++ b/sql/pgq_coop/sql/pgq_coop_init_noext.sql @@ -0,0 +1,7 @@ + +\set ECHO none +\i ../pgq/pgq.sql +\i structure/schema.sql +\i structure/functions.sql +\set ECHO all + diff --git a/sql/pgq_coop/sql/pgq_coop_test.sql b/sql/pgq_coop/sql/pgq_coop_test.sql index 93f583f1..0801a9d9 100644 --- a/sql/pgq_coop/sql/pgq_coop_test.sql +++ b/sql/pgq_coop/sql/pgq_coop_test.sql @@ -1,10 +1,4 @@ -\set ECHO none -\i ../pgq/pgq.sql -\i structure/schema.sql -\i structure/functions.sql -\set ECHO all - select pgq.create_queue('testqueue'); update pgq.queue set queue_ticker_max_count = 1 where queue_name = 'testqueue'; diff --git a/sql/pgq_coop/structure/ext_unpackaged.sql b/sql/pgq_coop/structure/ext_unpackaged.sql new file mode 100644 index 00000000..2755d8a2 --- /dev/null +++ b/sql/pgq_coop/structure/ext_unpackaged.sql @@ -0,0 +1,3 @@ + +ALTER EXTENSION pgq_coop ADD SCHEMA pgq_coop; + diff --git a/sql/pgq_coop/structure/grants.ini b/sql/pgq_coop/structure/grants.ini new file mode 100644 index 00000000..a1e98ea9 --- /dev/null +++ b/sql/pgq_coop/structure/grants.ini @@ -0,0 +1,21 @@ +[GrantFu] +roles = pgq_reader, pgq_writer, pgq_admin, public + +[1.consumer] +on.functions = %(pgq_coop_fns)s +pgq_reader = execute + +[2.public] +on.functions = pgq_coop.version() +public = execute + +[DEFAULT] +pgq_coop_fns = + pgq_coop.register_subconsumer(text, text, text), + pgq_coop.unregister_subconsumer(text, text, text, integer), + pgq_coop.next_batch(text, text, text), + pgq_coop.next_batch(text, text, text, interval), + pgq_coop.next_batch_custom(text, text, text, interval, int4, interval), + pgq_coop.next_batch_custom(text, text, text, interval, int4, interval, interval), + pgq_coop.finish_batch(bigint) + diff --git a/sql/pgq_coop/structure/grants.sql b/sql/pgq_coop/structure/grants.sql index b3f384cd..2ed2bd20 100644 --- a/sql/pgq_coop/structure/grants.sql +++ b/sql/pgq_coop/structure/grants.sql @@ -1,3 +1,3 @@ -grant usage on schema pgq_coop to public; +GRANT usage ON SCHEMA pgq_coop TO public; diff --git a/sql/pgq_ext/Makefile b/sql/pgq_ext/Makefile index 50381f6f..92ddbfbc 100644 --- a/sql/pgq_ext/Makefile +++ b/sql/pgq_ext/Makefile @@ -1,19 +1,32 @@ +EXTENSION = pgq_ext + +Contrib_data_built = pgq_ext.sql pgq_ext.upgrade.sql \ + structure/oldgrants_pgq_ext.sql \ + structure/newgrants_pgq_ext.sql +Contrib_regress = init_noext test_pgq_ext test_upgrade + +Extension_regress = init_ext test_pgq_ext +Extension_data_built = pgq_ext--3.1.sql pgq_ext--unpackaged--3.1.sql + +Contrib_install_always = yes + DOCS = README.pgq_ext -DATA_built = pgq_ext.sql pgq_ext.upgrade.sql SRCS = $(wildcard functions/*.sql structure/*.sql) -REGRESS = test_pgq_ext test_upgrade -REGRESS_OPTS = --load-language=plpgsql - +GRANTFU = ../../scripts/grantfu.py CATSQL = ../../scripts/catsql.py NDOC = NaturalDocs NDOCARGS = -r -o html docs/html -p docs -i docs/sql -PG_CONFIG = pg_config -PGXS = $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) +include ../common-pgxs.mk + +pgq_ext--3.1.sql: pgq_ext.sql structure/ext_postproc.sql + cat $^ > $@ + +pgq_ext--unpackaged--3.1.sql: pgq_ext.upgrade.sql structure/ext_unpackaged.sql structure/ext_postproc.sql + cat $^ > $@ pgq_ext.sql: $(SRCS) $(CATSQL) structure/install.sql > $@ @@ -21,11 +34,14 @@ pgq_ext.sql: $(SRCS) pgq_ext.upgrade.sql: $(SRCS) $(CATSQL) structure/upgrade.sql > $@ -test: pgq_ext.sql - make installcheck || { less regression.diffs ; exit 1; } +structure/newgrants_pgq_ext.sql: structure/grants.ini + $(GRANTFU) -t -r -d $< > $@ -ack: - cp results/* expected/ +structure/oldgrants_pgq_ext.sql: structure/grants.ini structure/grants.sql + echo "begin;" > $@ + $(GRANTFU) -R -o $< >> $@ + cat structure/grants.sql >> $@ + echo "commit;" >> $@ cleandox: rm -rf docs/html docs/Data docs/sql diff --git a/sql/pgq_ext/expected/init_ext.out b/sql/pgq_ext/expected/init_ext.out new file mode 100644 index 00000000..921f10b0 --- /dev/null +++ b/sql/pgq_ext/expected/init_ext.out @@ -0,0 +1,21 @@ +\set ECHO off + upgrade_schema +---------------- + 4 +(1 row) + +create extension pgq_ext from 'unpackaged'; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; + dumpable +---------- + 4 +(1 row) + +drop extension pgq_ext; +create extension pgq_ext; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; + dumpable +---------- + 4 +(1 row) + diff --git a/sql/pgq_ext/expected/init_noext.out b/sql/pgq_ext/expected/init_noext.out new file mode 100644 index 00000000..48a00a35 --- /dev/null +++ b/sql/pgq_ext/expected/init_noext.out @@ -0,0 +1,6 @@ +\set ECHO off + upgrade_schema +---------------- + 4 +(1 row) + diff --git a/sql/pgq_ext/expected/test_pgq_ext.out b/sql/pgq_ext/expected/test_pgq_ext.out index ad1ea437..668b8c87 100644 --- a/sql/pgq_ext/expected/test_pgq_ext.out +++ b/sql/pgq_ext/expected/test_pgq_ext.out @@ -1,9 +1,3 @@ -\set ECHO off - upgrade_schema ----------------- - 0 -(1 row) - -- -- test batch tracking -- diff --git a/sql/pgq_ext/functions/pgq_ext.version.sql b/sql/pgq_ext/functions/pgq_ext.version.sql index 092dee59..d3da90a3 100644 --- a/sql/pgq_ext/functions/pgq_ext.version.sql +++ b/sql/pgq_ext/functions/pgq_ext.version.sql @@ -4,11 +4,11 @@ returns text as $$ -- ---------------------------------------------------------------------- -- Function: pgq_ext.version(0) -- --- Returns version string for pgq_ext. ATM its SkyTools version --- with suffix that is only bumped when pgq_ext database code changes. +-- Returns version string for pgq_ext. ATM it is based SkyTools version +-- only bumped when database code changes. -- ---------------------------------------------------------------------- begin - return '3.0.0.3'; + return '3.1.0.0'; end; $$ language plpgsql; diff --git a/sql/pgq_ext/pgq_ext.control b/sql/pgq_ext/pgq_ext.control new file mode 100644 index 00000000..9a224f2b --- /dev/null +++ b/sql/pgq_ext/pgq_ext.control @@ -0,0 +1,7 @@ +# pgq_ext +comment = 'Target-side batch tracking infrastructure' +default_version = '3.1' +relocatable = false +superuser = true +schema = 'pg_catalog' + diff --git a/sql/pgq_ext/sql/init_ext.sql b/sql/pgq_ext/sql/init_ext.sql new file mode 100644 index 00000000..bea94d26 --- /dev/null +++ b/sql/pgq_ext/sql/init_ext.sql @@ -0,0 +1,11 @@ + +\set ECHO off +\i structure/install.sql +\set ECHO all +create extension pgq_ext from 'unpackaged'; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; +drop extension pgq_ext; + +create extension pgq_ext; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; + diff --git a/sql/pgq_ext/sql/init_noext.sql b/sql/pgq_ext/sql/init_noext.sql new file mode 100644 index 00000000..23803bd3 --- /dev/null +++ b/sql/pgq_ext/sql/init_noext.sql @@ -0,0 +1,3 @@ +\set ECHO off +\i structure/install.sql + diff --git a/sql/pgq_ext/sql/test_pgq_ext.sql b/sql/pgq_ext/sql/test_pgq_ext.sql index 9ceee599..2d94e041 100644 --- a/sql/pgq_ext/sql/test_pgq_ext.sql +++ b/sql/pgq_ext/sql/test_pgq_ext.sql @@ -1,7 +1,3 @@ -\set ECHO off -\i structure/install.sql -\set ECHO all - -- -- test batch tracking -- diff --git a/sql/pgq_ext/structure/ext_postproc.sql b/sql/pgq_ext/structure/ext_postproc.sql new file mode 100644 index 00000000..ce254032 --- /dev/null +++ b/sql/pgq_ext/structure/ext_postproc.sql @@ -0,0 +1,8 @@ + +-- tag data objects as dumpable + +SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_tick', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_batch', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_event', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq_ext.partial_batch', ''); + diff --git a/sql/pgq_ext/structure/ext_unpackaged.sql b/sql/pgq_ext/structure/ext_unpackaged.sql new file mode 100644 index 00000000..51fbeb42 --- /dev/null +++ b/sql/pgq_ext/structure/ext_unpackaged.sql @@ -0,0 +1,8 @@ + +ALTER EXTENSION pgq_ext ADD SCHEMA pgq_ext; + +ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_tick; +ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_batch; +ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_event; +ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.partial_batch; + diff --git a/sql/pgq_ext/structure/grants.ini b/sql/pgq_ext/structure/grants.ini new file mode 100644 index 00000000..e9c6927b --- /dev/null +++ b/sql/pgq_ext/structure/grants.ini @@ -0,0 +1,28 @@ +[GrantFu] +roles = pgq_writer, public + +[1.public] +on.functions = pgq_ext.version() +public = execute + +[2.pgq_ext] +on.functions = %(pgq_ext_fns)s +pgq_writer = execute + + +[DEFAULT] +pgq_ext_fns = + pgq_ext.upgrade_schema(), + pgq_ext.is_batch_done(text, text, bigint), + pgq_ext.is_batch_done(text, bigint), + pgq_ext.set_batch_done(text, text, bigint), + pgq_ext.set_batch_done(text, bigint), + pgq_ext.is_event_done(text, text, bigint, bigint), + pgq_ext.is_event_done(text, bigint, bigint), + pgq_ext.set_event_done(text, text, bigint, bigint), + pgq_ext.set_event_done(text, bigint, bigint), + pgq_ext.get_last_tick(text, text), + pgq_ext.get_last_tick(text), + pgq_ext.set_last_tick(text, text, bigint), + pgq_ext.set_last_tick(text, bigint) + diff --git a/sql/pgq_ext/structure/grants.sql b/sql/pgq_ext/structure/grants.sql new file mode 100644 index 00000000..0b3db788 --- /dev/null +++ b/sql/pgq_ext/structure/grants.sql @@ -0,0 +1,3 @@ + +grant usage on schema pgq_ext to public; + diff --git a/sql/pgq_ext/structure/install.sql b/sql/pgq_ext/structure/install.sql index 973e35b8..4b9be2c5 100644 --- a/sql/pgq_ext/structure/install.sql +++ b/sql/pgq_ext/structure/install.sql @@ -1,3 +1,4 @@ \i structure/tables.sql \i structure/upgrade.sql +\i structure/grants.sql diff --git a/sql/pgq_ext/structure/tables.sql b/sql/pgq_ext/structure/tables.sql index c40368eb..78a1b234 100644 --- a/sql/pgq_ext/structure/tables.sql +++ b/sql/pgq_ext/structure/tables.sql @@ -52,7 +52,6 @@ set client_min_messages = 'warning'; set default_with_oids = 'off'; create schema pgq_ext; -grant usage on schema pgq_ext to public; -- diff --git a/sql/pgq_node/Makefile b/sql/pgq_node/Makefile index c3932ca9..a42a7fad 100644 --- a/sql/pgq_node/Makefile +++ b/sql/pgq_node/Makefile @@ -1,31 +1,54 @@ -DATA_built = pgq_node.sql pgq_node.upgrade.sql +EXTENSION = pgq_node + +Extension_data_built = pgq_node--3.1.sql pgq_node--unpackaged--3.1.sql +Contrib_data_built = pgq_node.sql pgq_node.upgrade.sql \ + structure/newgrants_pgq_node.sql \ + structure/oldgrants_pgq_node.sql + +Extension_regress = pgq_node_init_ext pgq_node_test +Contrib_regress = pgq_node_init_noext pgq_node_test + +Contrib_install_always = yes LDRS = structure/functions.sql FUNCS = $(shell sed -e 's/^[^\\].*//' -e 's/\\i //' $(LDRS)) -SRCS = structure/tables.sql structure/functions.sql $(FUNCS) - -REGRESS = pgq_node_test -REGRESS_OPTS = --load-language=plpgsql +SRCS = structure/tables.sql structure/functions.sql structure/grants.sql \ + $(FUNCS) -PG_CONFIG = pg_config -PGXS = $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) +include ../common-pgxs.mk NDOC = NaturalDocs NDOCARGS = -r -o html docs/html -p docs -i docs/sql CATSQL = ../../scripts/catsql.py +GRANTFU = ../../scripts/grantfu.py # # combined SQL files # +pgq_node--3.1.sql: pgq_node.sql structure/ext_postproc.sql + cat $^ > $@ + +pgq_node--unpackaged--3.1.sql: pgq_node.upgrade.sql structure/ext_unpackaged.sql structure/ext_postproc.sql + cat $^ > $@ + pgq_node.sql: $(SRCS) $(CATSQL) structure/install.sql > $@ pgq_node.upgrade.sql: $(SRCS) $(CATSQL) structure/upgrade.sql > $@ +structure/newgrants_pgq_node.sql: structure/grants.ini + $(GRANTFU) -r -d -t $< > $@ + +structure/oldgrants_pgq_node.sql: structure/grants.ini structure/grants.sql + echo "begin;" > $@ + $(GRANTFU) -R -o $< >> $@ + cat structure/grants.sql >> $@ + echo "commit;" >> $@ + + # # docs # @@ -41,16 +64,3 @@ cleandox: clean: cleandox -upload: dox - rsync -az --delete docs/html/* data1:public_html/pgq-set/ - -# -# regtest shortcuts -# - -test: pgq_node.sql - $(MAKE) installcheck || { filterdiff --format=unified regression.diffs | less; exit 1; } - -ack: - cp results/*.out expected/ - diff --git a/sql/pgq_node/expected/pgq_node_init_ext.out b/sql/pgq_node/expected/pgq_node_init_ext.out new file mode 100644 index 00000000..778c35d2 --- /dev/null +++ b/sql/pgq_node/expected/pgq_node_init_ext.out @@ -0,0 +1,22 @@ +create extension pgq; +\set ECHO none + upgrade_schema +---------------- + 0 +(1 row) + +create extension pgq_node from unpackaged; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; + dumpable +---------- + 4 +(1 row) + +drop extension pgq_node; +create extension pgq_node; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; + dumpable +---------- + 4 +(1 row) + diff --git a/sql/pgq_node/expected/pgq_node_init_noext.out b/sql/pgq_node/expected/pgq_node_init_noext.out new file mode 100644 index 00000000..65817072 --- /dev/null +++ b/sql/pgq_node/expected/pgq_node_init_noext.out @@ -0,0 +1,11 @@ +\set ECHO none + upgrade_schema +---------------- + 0 +(1 row) + + upgrade_schema +---------------- + 0 +(1 row) + diff --git a/sql/pgq_node/expected/pgq_node_test.out b/sql/pgq_node/expected/pgq_node_test.out index 1bd4ce55..a19549db 100644 --- a/sql/pgq_node/expected/pgq_node_test.out +++ b/sql/pgq_node/expected/pgq_node_test.out @@ -1,18 +1,3 @@ -\set ECHO none - upgrade_schema ----------------- - 0 -(1 row) - - sub_consumer | sub_id | co_name ---------------+--------+--------- -(0 rows) - - upgrade_schema ----------------- - 0 -(1 row) - select * from pgq_node.register_location('aqueue', 'node1', 'dbname=node1', false); ret_code | ret_note ----------+--------------------- @@ -431,57 +416,57 @@ select * from pgq_node.get_worker_state('mqueue'); (1 row) select * from pgq_node.drop_node('asd', 'asd'); - ret_code | ret_note -----------+-------------------- - 304 | No such queue: asd + ret_code | ret_note +----------+------------------- + 200 | Node dropped: asd (1 row) select * from pgq_node.drop_node('mqueue', 'node3'); - ret_code | ret_note -----------+-------------- - 200 | Node dropped + ret_code | ret_note +----------+--------------------- + 200 | Node dropped: node3 (1 row) select * from pgq_node.drop_node('mqueue', 'node2'); - ret_code | ret_note -----------+-------------- - 200 | Node dropped + ret_code | ret_note +----------+--------------------- + 200 | Node dropped: node2 (1 row) select * from pgq_node.drop_node('mqueue', 'node1'); - ret_code | ret_note -----------+----------------------- - 304 | No such queue: mqueue + ret_code | ret_note +----------+--------------------- + 200 | Node dropped: node1 (1 row) select * from pgq_node.drop_node('aqueue', 'node5'); - ret_code | ret_note -----------+-------------- - 200 | Node dropped + ret_code | ret_note +----------+--------------------- + 200 | Node dropped: node5 (1 row) select * from pgq_node.drop_node('aqueue', 'node4'); - ret_code | ret_note -----------+-------------- - 200 | Node dropped + ret_code | ret_note +----------+--------------------- + 200 | Node dropped: node4 (1 row) select * from pgq_node.drop_node('aqueue', 'node1'); - ret_code | ret_note -----------+-------------- - 200 | Node dropped + ret_code | ret_note +----------+--------------------- + 200 | Node dropped: node1 (1 row) select * from pgq_node.drop_node('aqueue', 'node2'); - ret_code | ret_note -----------+----------------------- - 304 | No such queue: aqueue + ret_code | ret_note +----------+--------------------- + 200 | Node dropped: node2 (1 row) select * from pgq_node.drop_node('aqueue', 'node3'); - ret_code | ret_note -----------+----------------------- - 304 | No such queue: aqueue + ret_code | ret_note +----------+--------------------- + 200 | Node dropped: node3 (1 row) \q diff --git a/sql/pgq_node/functions/pgq_node.create_node.sql b/sql/pgq_node/functions/pgq_node.create_node.sql index eaa19f24..0a852235 100644 --- a/sql/pgq_node/functions/pgq_node.create_node.sql +++ b/sql/pgq_node/functions/pgq_node.create_node.sql @@ -70,7 +70,10 @@ begin return; end if; perform pgq.create_queue(i_queue_name); - update pgq.queue set queue_external_ticker = true where queue_name = i_queue_name; + update pgq.queue + set queue_external_ticker = true, + queue_disable_insert = true + where queue_name = i_queue_name; if i_global_watermark > 1 then perform pgq.ticker(i_queue_name, i_global_watermark, now(), 1); end if; diff --git a/sql/pgq_node/functions/pgq_node.drop_node.sql b/sql/pgq_node/functions/pgq_node.drop_node.sql index 0dcfed1d..15ae7c87 100644 --- a/sql/pgq_node/functions/pgq_node.drop_node.sql +++ b/sql/pgq_node/functions/pgq_node.drop_node.sql @@ -42,8 +42,10 @@ begin where n.queue_name = i_queue_name; if not found then - select 304, 'No such queue: ' || i_queue_name into ret_code, ret_note; - return; + -- proceed with cleaning anyway, as there schenarios + -- where some data is left around + _is_prov := false; + _is_local := true; end if; -- drop local state @@ -76,7 +78,8 @@ begin from pgq_node.unregister_location(i_queue_name, i_node_name) f into ret_code, ret_note; - select 200, 'Node dropped' into ret_code, ret_note; + select 200, 'Node dropped: ' || i_node_name + into ret_code, ret_note; return; end; $$ language plpgsql security definer; diff --git a/sql/pgq_node/functions/pgq_node.unregister_location.sql b/sql/pgq_node/functions/pgq_node.unregister_location.sql index 62dc1651..9a189a3e 100644 --- a/sql/pgq_node/functions/pgq_node.unregister_location.sql +++ b/sql/pgq_node/functions/pgq_node.unregister_location.sql @@ -48,6 +48,15 @@ begin end if; end if; + -- + -- There may be obsolete subscriptions around + -- drop them silently. + -- + perform pgq_node.unregister_subscriber(i_queue_name, i_node_name); + + -- + -- Actual removal + -- delete from pgq_node.node_location where queue_name = i_queue_name and node_name = i_node_name; diff --git a/sql/pgq_node/functions/pgq_node.upgrade_schema.sql b/sql/pgq_node/functions/pgq_node.upgrade_schema.sql index d9c1b6f4..1678559f 100644 --- a/sql/pgq_node/functions/pgq_node.upgrade_schema.sql +++ b/sql/pgq_node/functions/pgq_node.upgrade_schema.sql @@ -19,4 +19,3 @@ begin end; $$ language plpgsql; - diff --git a/sql/pgq_node/functions/pgq_node.version.sql b/sql/pgq_node/functions/pgq_node.version.sql index d2c1ad0a..3985250c 100644 --- a/sql/pgq_node/functions/pgq_node.version.sql +++ b/sql/pgq_node/functions/pgq_node.version.sql @@ -1,8 +1,14 @@ create or replace function pgq_node.version() returns text as $$ +-- ---------------------------------------------------------------------- +-- Function: pgq_node.version(0) +-- +-- Returns version string for pgq_node. ATM it is based on SkyTools version +-- and only bumped when database code changes. +-- ---------------------------------------------------------------------- begin - return '3.0.0.14'; + return '3.1.0.0'; end; $$ language plpgsql; diff --git a/sql/pgq_node/pgq_node.control b/sql/pgq_node/pgq_node.control new file mode 100644 index 00000000..a214cee6 --- /dev/null +++ b/sql/pgq_node/pgq_node.control @@ -0,0 +1,7 @@ +# pgq_node +comment = 'Cascaded queue infrastructure' +default_version = '3.1' +relocatable = false +superuser = true +schema = 'pg_catalog' +requires = 'pgq' diff --git a/sql/pgq_node/sql/pgq_node_init_ext.sql b/sql/pgq_node/sql/pgq_node_init_ext.sql new file mode 100644 index 00000000..9bc55793 --- /dev/null +++ b/sql/pgq_node/sql/pgq_node_init_ext.sql @@ -0,0 +1,13 @@ + +create extension pgq; + +\set ECHO none +\i structure/install.sql +\set ECHO all +create extension pgq_node from unpackaged; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; +drop extension pgq_node; + +create extension pgq_node; +select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; + diff --git a/sql/pgq_node/sql/pgq_node_init_noext.sql b/sql/pgq_node/sql/pgq_node_init_noext.sql new file mode 100644 index 00000000..12c0109e --- /dev/null +++ b/sql/pgq_node/sql/pgq_node_init_noext.sql @@ -0,0 +1,6 @@ + +\set ECHO none +\i ../pgq/pgq.sql +\i structure/tables.sql +\i structure/functions.sql + diff --git a/sql/pgq_node/sql/pgq_node_test.sql b/sql/pgq_node/sql/pgq_node_test.sql index 1b2e1b92..2a312a26 100644 --- a/sql/pgq_node/sql/pgq_node_test.sql +++ b/sql/pgq_node/sql/pgq_node_test.sql @@ -1,10 +1,4 @@ -\set ECHO none -\i ../pgq/pgq.sql -\i structure/tables.sql -\i structure/functions.sql -\set ECHO all - select * from pgq_node.register_location('aqueue', 'node1', 'dbname=node1', false); select * from pgq_node.register_location('aqueue', 'node2', 'dbname=node2', false); select * from pgq_node.register_location('aqueue', 'node3', 'dbname=node3', false); diff --git a/sql/pgq_node/structure/ext_postproc.sql b/sql/pgq_node/structure/ext_postproc.sql new file mode 100644 index 00000000..2e3b2e39 --- /dev/null +++ b/sql/pgq_node/structure/ext_postproc.sql @@ -0,0 +1,9 @@ + +-- tag data objects as dumpable + +SELECT pg_catalog.pg_extension_config_dump('pgq_node.node_location', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq_node.node_info', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq_node.local_state', ''); +SELECT pg_catalog.pg_extension_config_dump('pgq_node.subscriber_info', ''); + + diff --git a/sql/pgq_node/structure/ext_unpackaged.sql b/sql/pgq_node/structure/ext_unpackaged.sql new file mode 100644 index 00000000..524e75ad --- /dev/null +++ b/sql/pgq_node/structure/ext_unpackaged.sql @@ -0,0 +1,8 @@ + +ALTER EXTENSION pgq_node ADD SCHEMA pgq_node; + +ALTER EXTENSION pgq_node ADD TABLE pgq_node.node_location; +ALTER EXTENSION pgq_node ADD TABLE pgq_node.node_info; +ALTER EXTENSION pgq_node ADD TABLE pgq_node.local_state; +ALTER EXTENSION pgq_node ADD TABLE pgq_node.subscriber_info; + diff --git a/sql/pgq_node/structure/grants.ini b/sql/pgq_node/structure/grants.ini new file mode 100644 index 00000000..d1cc4558 --- /dev/null +++ b/sql/pgq_node/structure/grants.ini @@ -0,0 +1,73 @@ +[GrantFu] +roles = pgq_writer, pgq_admin, pgq_reader, public + +[1.public.fns] +on.functions = %(pgq_node_public_fns)s +public = execute + +# cascaded consumer, target side +[2.consumer.fns] +on.functions = %(pgq_node_consumer_fns)s +pgq_writer = execute +pgq_admin = execute + +# cascaded worker, target side +[3.worker.fns] +on.functions = %(pgq_node_worker_fns)s +pgq_admin = execute + +# cascaded consumer/worker, source side +[4.remote.fns] +on.functions = %(pgq_node_remote_fns)s +pgq_reader = execute +pgq_writer = execute +pgq_admin = execute + +# called by ticker, upgrade script +[4.admin.fns] +on.functions = %(pgq_node_admin_fns)s +pgq_admin = execute + +# define various groups of functions +[DEFAULT] + +pgq_node_remote_fns = + pgq_node.get_queue_locations(text), + pgq_node.get_node_info(text), + pgq_node.get_subscriber_info(text), + pgq_node.register_subscriber(text, text, text, int8), + pgq_node.unregister_subscriber(text, text), + pgq_node.set_subscriber_watermark(text, text, bigint) + +pgq_node_public_fns = + pgq_node.is_root_node(text), + pgq_node.is_leaf_node(text), + pgq_node.version() + +pgq_node_admin_fns = + pgq_node.upgrade_schema(), + pgq_node.maint_watermark(text) + +pgq_node_consumer_fns = + pgq_node.get_consumer_info(text), + pgq_node.get_consumer_state(text, text), + pgq_node.register_consumer(text, text, text, int8), + pgq_node.unregister_consumer(text, text), + pgq_node.change_consumer_provider(text, text, text), + pgq_node.set_consumer_uptodate(text, text, boolean), + pgq_node.set_consumer_paused(text, text, boolean), + pgq_node.set_consumer_completed(text, text, int8), + pgq_node.set_consumer_error(text, text, text) + +pgq_node_worker_fns = + pgq_node.register_location(text, text, text, boolean), + pgq_node.unregister_location(text, text), + pgq_node.create_node(text, text, text, text, text, bigint, text), + pgq_node.drop_node(text, text), + pgq_node.demote_root(text, int4, text), + pgq_node.promote_branch(text), + pgq_node.set_node_attrs(text, text), + pgq_node.get_worker_state(text), + pgq_node.set_global_watermark(text, bigint), + pgq_node.set_partition_watermark(text, text, bigint) + diff --git a/sql/pgq_node/structure/grants.sql b/sql/pgq_node/structure/grants.sql new file mode 100644 index 00000000..1efff292 --- /dev/null +++ b/sql/pgq_node/structure/grants.sql @@ -0,0 +1,3 @@ + +grant usage on schema pgq_node to public; + diff --git a/sql/pgq_node/structure/install.sql b/sql/pgq_node/structure/install.sql index a6d95cad..9a2e23e9 100644 --- a/sql/pgq_node/structure/install.sql +++ b/sql/pgq_node/structure/install.sql @@ -1,2 +1,5 @@ + \i structure/tables.sql \i structure/functions.sql +\i structure/grants.sql + diff --git a/sql/pgq_node/structure/tables.sql b/sql/pgq_node/structure/tables.sql index bd8d4e1d..464a4454 100644 --- a/sql/pgq_node/structure/tables.sql +++ b/sql/pgq_node/structure/tables.sql @@ -16,7 +16,6 @@ -- ---------------------------------------------------------------------- create schema pgq_node; -grant usage on schema pgq_node to public; -- ---------------------------------------------------------------------- -- Table: pgq_node.location diff --git a/tests/localconsumer/init.sh b/tests/localconsumer/init.sh new file mode 100755 index 00000000..0b884158 --- /dev/null +++ b/tests/localconsumer/init.sh @@ -0,0 +1,9 @@ +#! /bin/sh + +. ../env.sh + +mkdir -p log pid + +dropdb qdb +createdb qdb + diff --git a/tests/localconsumer/regen.sh b/tests/localconsumer/regen.sh new file mode 100755 index 00000000..f0e6cf87 --- /dev/null +++ b/tests/localconsumer/regen.sh @@ -0,0 +1,47 @@ +#! /bin/sh + +. ../testlib.sh + +for db in qdb; do + cleardb $db +done + +rm -f log/*.log +mkdir -p state +rm -f state/* + +set -e + +title LocalConsumer test + +title2 Initialization + +msg Install PgQ + +run_qadmin qdb "install pgq;" +run_qadmin qdb "create queue test_queue;" + +msg Run ticker + +cat_file conf/pgqd.ini <<EOF +[pgqd] +database_list = qdb +logfile = log/pgqd.log +pidfile = pid/pgqd.pid +EOF + +run pgqd -d conf/pgqd.ini + +msg Run consumer + +cat_file conf/testconsumer_qdb.ini <<EOF +[testconsumer] +queue_name = test_queue +db = dbname=qdb +logfile = log/%(job_name)s.log +pidfile = pid/%(job_name)s.pid +local_tracking_file = state/%(job_name)s.tick +EOF + +run ./testconsumer.py -v conf/testconsumer_qdb.ini + diff --git a/tests/localconsumer/testconsumer.py b/tests/localconsumer/testconsumer.py new file mode 100755 index 00000000..bf4e8366 --- /dev/null +++ b/tests/localconsumer/testconsumer.py @@ -0,0 +1,12 @@ +#! /usr/bin/env python + +import sys, time, skytools, pgq + +class TestLocalConsumer(pgq.LocalConsumer): + def process_local_event(self, src_db, batch_id, ev): + self.log.info("event: type=%s data=%s", ev.type, ev.data) + +if __name__ == '__main__': + script = TestLocalConsumer('testconsumer', 'db', sys.argv[1:]) + script.start() + diff --git a/tests/londiste/regen.sh b/tests/londiste/regen.sh index 46741f7e..6e60f1dc 100755 --- a/tests/londiste/regen.sh +++ b/tests/londiste/regen.sh @@ -103,7 +103,7 @@ run londiste3 $v conf/londiste_db1.ini add-seq mytable_id_seq msg "Register table on other node with creation" for db in db2 db3 db4 db5; do run psql -d $db -c "create sequence mytable_id_seq" - run londiste3 $v conf/londiste_db1.ini add-seq mytable_id_seq + run londiste3 $v conf/londiste_$db.ini add-seq mytable_id_seq run londiste3 $v conf/londiste_$db.ini add-table mytable --create-full done @@ -115,21 +115,24 @@ for db in db2 db3 db4 db5; do done msg "Wait until tables are in sync on db5" -cnt=0 -while test $cnt -ne 2; do - sleep 5 - cnt=`psql -A -t -d db5 -c "select count(*) from londiste.table_info where merge_state = 'ok'"` - echo " cnt=$cnt" -done + +run londiste3 conf/londiste_db5.ini wait-sync msg "Unregister table2 from root" run londiste3 $v conf/londiste_db1.ini remove-table mytable2 + msg "Wait until unregister reaches db5" -while test $cnt -ne 1; do - sleep 5 - cnt=`psql -A -t -d db5 -c "select count(*) from londiste.table_info where merge_state = 'ok'"` - echo " cnt=$cnt" -done +run londiste3 conf/londiste_db5.ini wait-root + + + +run londiste3 conf/londiste_db5.ini status + +msg "Test skipped copy" +run londiste3 $v conf/londiste_db1.ini add-table mytable2 +run londiste3 $v conf/londiste_db5.ini wait-root +run londiste3 $v conf/londiste_db5.ini add-table mytable2 --find-copy-node +run londiste3 $v conf/londiste_db5.ini wait-sync ## ## basic setup done @@ -156,16 +159,8 @@ run londiste3 $v conf/londiste_db2.ini worker -d run londiste3 $v conf/londiste_db2.ini change-provider --provider=node3 --dead=node1 msg "Wait until catchup" -top=$(psql -A -t -d db3 -c "select max(tick_id) from pgq.queue join pgq.tick on (tick_queue = queue_id) where queue_name = 'replika'") -echo " top=$top" -while test $cnt -ne 2; do - cur=$(psql -A -t -d db2 -c "select max(tick_id) from pgq.queue join pgq.tick on (tick_queue = queue_id) where queue_name = 'replika'") - echo " cur=$cur" - if test "$cur" = "$top"; then - break - fi - sleep 5 -done +run londiste3 $v conf/londiste_db2.ini wait-provider + msg "Promoting db2 to root" run londiste3 $v conf/londiste_db2.ini takeover node1 --dead-root run londiste3 $v conf/londiste_db2.ini tag-dead node1 diff --git a/tests/merge/addcol-data2.sql b/tests/merge/addcol-data2.sql new file mode 100644 index 00000000..fe5de5a7 --- /dev/null +++ b/tests/merge/addcol-data2.sql @@ -0,0 +1,5 @@ + +--*-- Local-Table: mydata + +alter table @mydata@ add column data2 text; + diff --git a/tests/merge/regen.sh b/tests/merge/regen.sh index fe31be1a..8a3c3ffc 100755 --- a/tests/merge/regen.sh +++ b/tests/merge/regen.sh @@ -188,3 +188,17 @@ run_sql full2 "select * from londiste.get_table_list('replika_part2')" ../zcheck.sh +msg "Test EXECUTE through cascade" + +for db in part1 part2 part3 part4; do + run londiste3 $v conf/londiste_$db.ini execute addcol-data2.sql +done +msg "Sleep a bit" +run sleep 10 + +psql -d part1 -c '\d mydata' +psql -d full1 -c '\d mydata' +psql -d part1 -c '\d mydata' + +../zcheck.sh + diff --git a/tests/simplecons/init.sh b/tests/simplecons/init.sh new file mode 100755 index 00000000..0b884158 --- /dev/null +++ b/tests/simplecons/init.sh @@ -0,0 +1,9 @@ +#! /bin/sh + +. ../env.sh + +mkdir -p log pid + +dropdb qdb +createdb qdb + diff --git a/tests/simplecons/regen.sh b/tests/simplecons/regen.sh new file mode 100755 index 00000000..f1fbf293 --- /dev/null +++ b/tests/simplecons/regen.sh @@ -0,0 +1,69 @@ +#! /bin/sh + +. ../testlib.sh + +for db in qdb; do + cleardb $db +done + +rm -f log/*.log pid/*.pid +mkdir -p state +rm -f state/* + +set -e + +title SimpleConsumer test + +title2 Initialization + +msg Install PgQ + +run_qadmin qdb "install pgq;" +run psql -d qdb -f schema.sql + +msg Run ticker + +cat_file conf/pgqd.ini <<EOF +[pgqd] +database_list = qdb +logfile = log/pgqd.log +pidfile = pid/pgqd.pid +EOF + +run pgqd -d conf/pgqd.ini + +msg Run consumer + +cat_file conf/simple1_qdb.ini <<EOF +[simple_consumer3] +queue_name = testqueue +src_db = dbname=qdb +dst_db = dbname=qdb +dst_query = insert into logtable (script, event_id, data) values ('simplecons', %%(pgq.ev_id)s, %%(data)s); +table_filter = qtable +logfile = log/%(job_name)s.log +pidfile = pid/%(job_name)s.pid +local_tracking_file = state/%(job_name)s.tick +EOF + +cat_file conf/simple2_qdb.ini <<EOF +[simple_local_consumer3] +queue_name = testqueue +src_db = dbname=qdb +dst_db = dbname=qdb +dst_query = insert into logtable (script, event_id, data) values ('simplelocalcons', %%(pgq.ev_id)s, %%(data)s); +table_filter = qtable +logfile = log/%(job_name)s.log +pidfile = pid/%(job_name)s.pid +local_tracking_file = state/%(job_name)s.tick +EOF + + +run simple_consumer3 -v conf/simple1_qdb.ini --register +run simple_consumer3 -v -d conf/simple1_qdb.ini +run simple_local_consumer3 -v -d conf/simple2_qdb.ini + +run_sql qdb "insert into qtable values ('data1')" + +run sleep 10 +run cat log/* diff --git a/tests/simplecons/schema.sql b/tests/simplecons/schema.sql new file mode 100644 index 00000000..94b8931a --- /dev/null +++ b/tests/simplecons/schema.sql @@ -0,0 +1,15 @@ + +create table qtable ( + data text +); + +create trigger qtrigger before insert on qtable +for each row execute procedure pgq.logutriga('testqueue'); + +create table logtable ( + event_id bigint, + script text, + data text +); + +select pgq.create_queue('testqueue'); diff --git a/tests/testlib.sh b/tests/testlib.sh index 68f140c2..82f6e8be 100755 --- a/tests/testlib.sh +++ b/tests/testlib.sh @@ -13,7 +13,7 @@ v=-q #set -o pipefail cleardb() { - echo "Clearing database $1" + echo "// Clearing database $1" psql -q -d $1 -c " set client_min_messages=warning; drop schema if exists londiste cascade; @@ -32,7 +32,7 @@ cleardb() { clearlogs() { code_off - echo "clean logs" + echo "// clean logs" rm -f log/*.log log/*.log.[0-9] } @@ -55,6 +55,20 @@ title() { echo "" } +title2() { + code_off + echo "" + echo "==" "$@" "==" + echo "" +} + +title3() { + code_off + echo "" + echo "==" "$@" "==" + echo "" +} + run() { code_on echo "$ $*" @@ -67,6 +81,12 @@ run_sql() { psql -d "$1" -c "$2" 2>&1 } +run_qadmin() { + code_on + echo "$ qadmin -d \"$1\" -c \"$2\"" + qadmin -d "$1" -c "$2" 2>&1 +} + msg() { code_off echo "" @@ -74,3 +94,21 @@ msg() { echo "" } +cat_file() { + code_off + mkdir -p `dirname $1` + echo ".File: $1" + case "$1" in + *Makefile) echo "[source,makefile]" ;; + #*.[ch]) echo "[source,c]" ;; + #*.ac) echo "[source,autoconf]" ;; + #*.sh) echo "[source,shell]" ;; + #*.sql) echo "[source,sql]" ;; + *.*) printf "[source,%s]\n" `echo $1 | sed 's/.*\.//'` ;; + esac + echo "-----------------------------------" + sed 's/^ //' > $1 + cat $1 + echo "-----------------------------------" +} + |