diff options
author | martinko | 2013-03-06 16:47:36 +0000 |
---|---|---|
committer | martinko | 2013-03-06 16:47:36 +0000 |
commit | 5a53f0c62c7ef557280ed781e9d90182a064d426 (patch) | |
tree | cf61ffcc75ecf4f4f50bbe32221e8b17a659cbcc | |
parent | 611de298f91776b4d6cdf439395d5bd59d2e3f7c (diff) |
python: text fixes
-rwxr-xr-x | python/londiste.py | 30 | ||||
-rw-r--r-- | python/londiste/playback.py | 16 | ||||
-rw-r--r-- | python/londiste/setup.py | 36 | ||||
-rw-r--r-- | python/londiste/table_copy.py | 2 | ||||
-rw-r--r-- | python/londiste/util.py | 3 | ||||
-rw-r--r-- | python/pgq/cascade/admin.py | 25 | ||||
-rw-r--r-- | python/pgq/cascade/consumer.py | 3 |
7 files changed, 55 insertions, 60 deletions
diff --git a/python/londiste.py b/python/londiste.py index 272b824c..6582abea 100755 --- a/python/londiste.py +++ b/python/londiste.py @@ -92,17 +92,17 @@ class Londiste(skytools.DBScript): g = optparse.OptionGroup(p, "options for cascading") g.add_option("--provider", help = "init: upstream node temp connect string") - g.add_option("--target", + g.add_option("--target", metavar = "NODE", help = "switchover: target node") - g.add_option("--merge", + g.add_option("--merge", metavar = "QUEUE", help = "create-leaf: combined queue name") - g.add_option("--dead", action = 'append', + g.add_option("--dead", metavar = "NODE", action = 'append', help = "cascade: assume node is dead") g.add_option("--dead-root", action = 'store_true', help = "takeover: old node was root") g.add_option("--dead-branch", action = 'store_true', help = "takeover: old node was branch") - g.add_option("--sync-watermark", + g.add_option("--sync-watermark", metavar = "NODES", help = "create-branch: list of node names to sync wm with") p.add_option_group(g) @@ -110,15 +110,15 @@ class Londiste(skytools.DBScript): g.add_option("--rewind", action = "store_true", help = "change queue position according to destination") g.add_option("--reset", action = "store_true", - help = "reset queue pos on destination side") + help = "reset queue position on destination side") p.add_option_group(g) g = optparse.OptionGroup(p, "options for add") g.add_option("--all", action="store_true", - help = "add: include add possible tables") + help = "add: include all possible tables") g.add_option("--wait-sync", action="store_true", help = "add: wait until all tables are in sync"), - g.add_option("--dest-table", + g.add_option("--dest-table", metavar = "NAME", help = "add: redirect changes to different table") g.add_option("--expect-sync", action="store_true", dest="expect_sync", help = "add: no copy needed", default=False) @@ -129,24 +129,24 @@ class Londiste(skytools.DBScript): g.add_option("--create-full", action="store_true", help = "add: create table/seq if not exist, with full schema") g.add_option("--trigger-flags", - help="add: Set trigger flags (BAIUDLQ)") + help="add: set trigger flags (BAIUDLQ)") g.add_option("--trigger-arg", action="append", - help="add: Custom trigger arg (can be specified multiply times)") + help="add: custom trigger arg (can be specified multiple times)") g.add_option("--no-triggers", action="store_true", - help="add: Dont put triggers on table (makes sense on leaf)") + help="add: do not put triggers on table (makes sense on leaf)") g.add_option("--handler", action="store", - help="add: Custom handler for table") + help="add: custom handler for table") g.add_option("--handler-arg", action="append", - help="add: Argument to custom handler") + help="add: argument to custom handler") g.add_option("--find-copy-node", dest="find_copy_node", action="store_true", help = "add: walk upstream to find node to copy from") - g.add_option("--copy-node", dest="copy_node", + g.add_option("--copy-node", metavar = "NODE", dest="copy_node", help = "add: use NODE as source for initial COPY") g.add_option("--merge-all", action="store_true", help="merge tables from all source queues", default=False) g.add_option("--no-merge", action="store_true", help="don't merge tables from source queues", default=False) - g.add_option("--max-parallel-copy", type = "int", + g.add_option("--max-parallel-copy", metavar = "NUM", type = "int", help="max number of parallel copy processes") p.add_option_group(g) @@ -155,7 +155,7 @@ class Londiste(skytools.DBScript): help = "add: ignore table differences, repair: ignore lag") g.add_option("--apply", action = "store_true", help="repair: apply fixes automatically") - p.add_option("--count-only", action="store_true", + g.add_option("--count-only", action="store_true", help="compare: just count rows, do not compare data") p.add_option_group(g) diff --git a/python/londiste/playback.py b/python/londiste/playback.py index 4fa87014..3deea68a 100644 --- a/python/londiste/playback.py +++ b/python/londiste/playback.py @@ -24,7 +24,7 @@ TABLE_OK = 5 # setup SYNC_OK = 0 # continue with batch SYNC_LOOP = 1 # sleep, try again -SYNC_EXIT = 2 # nothing to do, exit skript +SYNC_EXIT = 2 # nothing to do, exit script MAX_PARALLEL_COPY = 8 # default number of allowed max parallel copy processes @@ -54,7 +54,6 @@ class Counter(object): elif t.state == TABLE_OK: self.ok += 1 - def get_copy_count(self): return self.copy + self.catching_up + self.wanna_sync + self.do_sync @@ -81,7 +80,7 @@ class TableState(object): self.changed = 0 # position in parallel copy work order self.copy_pos = 0 - # max number of parallel copy processesses allowed + # max number of parallel copy processes allowed self.max_parallel_copy = MAX_PARALLEL_COPY def forget(self): @@ -234,7 +233,7 @@ class TableState(object): def gc_snapshot(self, copy_thread, prev_tick, cur_tick, no_lag): """Remove attached snapshot if possible. - If the event processing is in current moment. the snapshot + If the event processing is in current moment, the snapshot is not needed beyond next batch. The logic is needed for mostly unchanging tables, @@ -495,7 +494,7 @@ class Replicator(CascadedWorker): self.log.info("Table %s not OK on provider, waiting", t.name) continue - # dont allow more copies than configured + # don't allow more copies than configured if npossible == 0: break npossible -= 1 @@ -507,7 +506,7 @@ class Replicator(CascadedWorker): # failure inbetween self.change_table_state(dst_db, t, TABLE_IN_COPY) - # the copy _may_ happen immidiately + # the copy _may_ happen immediately self.launch_copy(t) # there cannot be interesting events in current batch @@ -516,7 +515,6 @@ class Replicator(CascadedWorker): return ret - def sync_from_copy_thread(self, cnt, src_db, dst_db): "Copy thread sync logic." @@ -873,7 +871,7 @@ class Replicator(CascadedWorker): cmd.append('-v') # let existing copy finish and clean its pidfile, - # otherwise new copy will exit immidiately. + # otherwise new copy will exit immediately. # FIXME: should not happen on per-table pidfile ??? copy_pidfile = "%s.copy.%s" % (self.pidfile, tbl_stat.name) while skytools.signal_pidfile(copy_pidfile, 0): @@ -906,7 +904,7 @@ class Replicator(CascadedWorker): dst_curs.execute("set client_encoding = %s", [src_enc]) def copy_snapshot_cleanup(self, dst_db): - """Remove unnecassary snapshot info from tables.""" + """Remove unnecessary snapshot info from tables.""" no_lag = not self.work_state changes = False for t in self.table_list: diff --git a/python/londiste/setup.py b/python/londiste/setup.py index 8a64cc8d..55ded040 100644 --- a/python/londiste/setup.py +++ b/python/londiste/setup.py @@ -43,41 +43,41 @@ class LondisteSetup(CascadeAdmin): p = CascadeAdmin.init_optparse(self, parser) p.add_option("--expect-sync", action="store_true", dest="expect_sync", - help = "no copy needed", default=False) + help = "no copy needed", default=False) p.add_option("--skip-truncate", action="store_true", dest="skip_truncate", - help = "dont delete old data", default=False) + help = "do not delete old data", default=False) p.add_option("--find-copy-node", action="store_true", dest="find_copy_node", help = "add: find table source for copy by walking upwards") - p.add_option("--copy-node", dest="copy_node", + p.add_option("--copy-node", metavar = "NODE", dest="copy_node", help = "add: use NODE as source for initial copy") p.add_option("--force", action="store_true", - help="force", default=False) + help="force", default=False) p.add_option("--all", action="store_true", - help="include all tables", default=False) + help="include all tables", default=False) p.add_option("--wait-sync", action="store_true", help = "add: wait until all tables are in sync"), p.add_option("--create", action="store_true", - help="create, minimal", default=False) + help="create, minimal", default=False) p.add_option("--create-full", action="store_true", - help="create, full") + help="create, full") p.add_option("--trigger-flags", - help="Set trigger flags (BAIUDLQ)") + help="set trigger flags (BAIUDLQ)") p.add_option("--trigger-arg", action="append", - help="Custom trigger arg") + help="custom trigger arg") p.add_option("--no-triggers", action="store_true", - help="Custom trigger arg") + help="no triggers on table") p.add_option("--handler", action="store", - help="add: Custom handler for table") + help="add: custom handler for table") p.add_option("--handler-arg", action="append", - help="add: Argument to custom handler") + help="add: argument to custom handler") p.add_option("--merge-all", action="store_true", - help="merge tables from all source queues", default=False) + help="merge tables from all source queues", default=False) p.add_option("--no-merge", action="store_true", - help="don't merge tables from source queues", default=False) - p.add_option("--max-parallel-copy", type = "int", - help="max number of parallel copy processes") - p.add_option("--dest-table", - help="add: name for actual table") + help="do not merge tables from source queues", default=False) + p.add_option("--max-parallel-copy", metavar = "NUM", type = "int", + help="max number of parallel copy processes") + p.add_option("--dest-table", metavar = "NAME", + help="add: name for actual table") return p def extra_init(self, node_type, node_db, provider_db): diff --git a/python/londiste/table_copy.py b/python/londiste/table_copy.py index 8e026235..92ccd7f3 100644 --- a/python/londiste/table_copy.py +++ b/python/londiste/table_copy.py @@ -214,7 +214,7 @@ class CopyTable(Replicator): if tbl_stat.copy_role == 'wait-replay': return - # if copy done, request immidiate tick from pgqadm, + # if copy done, request immediate tick from pgqd, # to make state juggling faster. on mostly idle db-s # each step may take tickers idle_timeout secs, which is pain. q = "select pgq.force_tick(%s)" diff --git a/python/londiste/util.py b/python/londiste/util.py index 334a0103..c01ebdcf 100644 --- a/python/londiste/util.py +++ b/python/londiste/util.py @@ -35,7 +35,7 @@ def find_copy_source(script, queue_name, copy_table_name, node_name, node_locati src_curs.execute(q, [queue_name]) info = src_curs.fetchone() if info['ret_code'] >= 400: - raise skytools.UsageError("Node does not exists") + raise skytools.UsageError("Node does not exist") script.log.info("Checking if %s can be used for copy", info['node_name']) @@ -69,4 +69,3 @@ def find_copy_source(script, queue_name, copy_table_name, node_name, node_locati node_name = info['provider_node'] node_location = info['provider_location'] worker_name = info['worker_name'] - diff --git a/python/pgq/cascade/admin.py b/python/pgq/cascade/admin.py index dd9215d8..61f1f448 100644 --- a/python/pgq/cascade/admin.py +++ b/python/pgq/cascade/admin.py @@ -32,26 +32,26 @@ Node Initialization: create-root NAME [PUBLIC_CONNSTR] create-branch NAME [PUBLIC_CONNSTR] --provider=<public_connstr> create-leaf NAME [PUBLIC_CONNSTR] --provider=<public_connstr> - Initializes node. + All of the above initialize a node Node Administration: - pause Pause node worker. - resume Resume node worker. - wait-root Wait until node has catched up to root - wait-provider Wait until node has catched up to provider + pause Pause node worker + resume Resume node worker + wait-root Wait until node has caught up with root + wait-provider Wait until node has caught up with provider status Show cascade state - node-status Show status of a local node + node-status Show status of local node members Show members in set Cascade layout change: change-provider --provider NEW_NODE Change where worker reads from - takeover FROMNODE [--all] [--dead] - Take other node position. + takeover FROM_NODE [--all] [--dead] + Take other node position drop-node NAME - Remove node from cascade. + Remove node from cascade tag-dead NODE .. Tag node as dead @@ -64,7 +64,7 @@ standalone_usage = """ setadm extra switches: pause/resume/change-provider: - --node=NODENAME | --consumer=CONSUMER_NAME + --node=NODE_NAME | --consumer=CONSUMER_NAME create-root/create-branch/create-leaf: --worker=WORKER_NAME @@ -595,7 +595,6 @@ class CascadeAdmin(skytools.AdminScript): provider_node = node.provider_node subscriber_list = self.get_node_subscriber_list(old_name) - # create copy of member info / subscriber+queue info step1 = 'select * from pgq_node.rename_node_step1(%s, %s, %s)' # rename node itself, drop copies @@ -1220,7 +1219,7 @@ class CascadeAdmin(skytools.AdminScript): # # This is done snapshots, to make sure we delete only events # that were dumped out previously. This uses the long-tx - # resustant logic described in pgq.batch_event_sql(). + # resistant logic described in pgq.batch_event_sql(). # # find snapshots @@ -1297,7 +1296,7 @@ class CascadeAdmin(skytools.AdminScript): else: sep = ',' - # create orinary dict to avoid problems with row class and datetime + # create ordinary dict to avoid problems with row class and datetime d = { 'ev_id': ev.ev_id, 'ev_type': ev.ev_type, diff --git a/python/pgq/cascade/consumer.py b/python/pgq/cascade/consumer.py index 200de338..cc1bd7a9 100644 --- a/python/pgq/cascade/consumer.py +++ b/python/pgq/cascade/consumer.py @@ -41,7 +41,7 @@ class CascadedConsumer(BaseConsumer): p.add_option("--rewind", action = "store_true", help = "change queue position according to destination") p.add_option("--reset", action = "store_true", - help = "reset queue pos on destination side") + help = "reset queue position on destination side") return p def startup(self): @@ -291,4 +291,3 @@ class CascadedConsumer(BaseConsumer): self.log.warning("Failure to call pgq_node.set_consumer_error()") self.reset() BaseConsumer.exception_hook(self, det, emsg) - |