diff options
author | Marko Kreen | 2010-11-26 11:32:34 +0000 |
---|---|---|
committer | Marko Kreen | 2010-11-26 11:32:34 +0000 |
commit | b418010cd9b24f32dab91a452a539d0e6d247516 (patch) | |
tree | 4e576a02e7d28fb4e7c63dc5d0de75a39c91016a | |
parent | 871bde659bcfead0fdf158eb5c4eb7ec4e21ad83 (diff) |
remove duplicate batch_info setting
-rw-r--r-- | python/londiste/playback.py | 8 | ||||
-rw-r--r-- | python/pgq/cascade/consumer.py | 7 | ||||
-rw-r--r-- | python/pgq/cascade/worker.py | 4 |
3 files changed, 8 insertions, 11 deletions
diff --git a/python/londiste/playback.py b/python/londiste/playback.py index d8f06da0..ed892c36 100644 --- a/python/londiste/playback.py +++ b/python/londiste/playback.py @@ -316,8 +316,8 @@ class Replicator(CascadedWorker): self.sync_database_encodings(src_db, dst_db) - self.cur_tick = self._batch_info['tick_id'] - self.prev_tick = self._batch_info['prev_tick_id'] + self.cur_tick = self.batch_info['tick_id'] + self.prev_tick = self.batch_info['prev_tick_id'] dst_curs = dst_db.cursor() self.load_table_state(dst_curs) @@ -344,7 +344,7 @@ class Replicator(CascadedWorker): self.flush_sql(dst_curs) for p in self.used_plugins.values(): - p.finish_batch(self._batch_info) + p.finish_batch(self.batch_info) self.used_plugins = {} # finalize table changes @@ -527,7 +527,7 @@ class Replicator(CascadedWorker): except KeyError: p = t.get_plugin() self.used_plugins[ev.extra1] = p - p.prepare_batch(self._batch_info, dst_curs) + p.prepare_batch(self.batch_info, dst_curs) p.process_event(ev, self.apply_sql, dst_curs) diff --git a/python/pgq/cascade/consumer.py b/python/pgq/cascade/consumer.py index 10866620..533f8655 100644 --- a/python/pgq/cascade/consumer.py +++ b/python/pgq/cascade/consumer.py @@ -18,7 +18,6 @@ class CascadedConsumer(Consumer): Loads provider from target node, accepts pause/resume commands. """ - _batch_info = None _consumer_state = None def __init__(self, service_name, db_name, args): @@ -156,15 +155,13 @@ class CascadedConsumer(Consumer): dst_db.commit() def process_batch(self, src_db, batch_id, event_list): - self._batch_info = self.get_batch_info(batch_id) - state = self._consumer_state - if self.is_batch_done(state, self._batch_info): + if self.is_batch_done(state, self.batch_info): return dst_db = self.get_database(self.target_db) - tick_id = self._batch_info['tick_id'] + tick_id = self.batch_info['tick_id'] self.process_remote_batch(src_db, tick_id, event_list, dst_db) # this also commits diff --git a/python/pgq/cascade/worker.py b/python/pgq/cascade/worker.py index b21c8c25..af4f3e21 100644 --- a/python/pgq/cascade/worker.py +++ b/python/pgq/cascade/worker.py @@ -239,8 +239,8 @@ class CascadedWorker(CascadedConsumer): dst_curs.execute(q, [st.target_queue, str(tick_id), self.pgq_queue_name]) if st.create_tick: # create actual tick - tick_id = self._batch_info['tick_id'] - tick_time = self._batch_info['batch_end'] + tick_id = self.batch_info['tick_id'] + tick_time = self.batch_info['batch_end'] q = "select pgq.ticker(%s, %s, %s, %s)" dst_curs.execute(q, [self.pgq_queue_name, tick_id, tick_time, self.cur_max_id]) |