1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
|
"""Cascaded worker.
CascadedConsumer that also maintains node.
"""
import time
from pgq.cascade.consumer import CascadedConsumer
from pgq.producer import bulk_insert_events
__all__ = ['CascadedWorker']
class WorkerState:
"""Depending on node state decides on actions worker needs to do."""
# node_type,
# node_name, provider_node,
# global_watermark, local_watermark
# combined_queue, combined_type
process_batch = 0 # handled in CascadedConsumer
copy_events = 0 # ok
global_wm_event = 0 # ok
local_wm_publish = 1 # ok
process_events = 0 # ok
send_tick_event = 0 # ok
wait_behind = 0 # ok
process_tick_event = 0 # ok
target_queue = '' # ok
keep_event_ids = 0 # ok
create_tick = 0 # ok
filtered_copy = 0 # ok
def __init__(self, queue_name, nst):
self.node_type = nst['node_type']
self.node_name = nst['node_name']
self.local_watermark = nst['local_watermark']
ntype = nst['node_type']
ctype = nst['combined_type']
if ntype == 'root':
self.global_wm_event = 1
self.local_wm_publish = 0
elif ntype == 'branch':
self.target_queue = queue_name
self.process_batch = 1
self.process_events = 1
self.copy_events = 1
self.process_tick_event = 1
self.keep_event_ids = 1
self.create_tick = 1
elif ntype == 'leaf' and not ctype:
self.process_batch = 1
self.process_events = 1
elif ntype == 'leaf' and ctype:
self.target_queue = nst['combined_queue']
if ctype == 'root':
self.process_batch = 1
self.process_events = 1
self.copy_events = 1
self.filtered_copy = 1
self.send_tick_event = 1
elif ctype == 'branch':
self.process_batch = 1
self.wait_behind = 1
else:
raise Exception('invalid state 1')
else:
raise Exception('invalid state 2')
if ctype and ntype != 'leaf':
raise Exception('invalid state 3')
class CascadedWorker(CascadedConsumer):
"""CascadedWorker base class.
Config fragment::
## Parameters for pgq.CascadedWorker ##
# how often the root node should push wm downstream (seconds)
#global_wm_publish_period = 300
# how often the nodes should report their wm upstream (seconds)
#local_wm_publish_period = 300
"""
global_wm_publish_time = 0
global_wm_publish_period = 5 * 60
local_wm_publish_time = 0
local_wm_publish_period = 5 * 60
max_evbuf = 500
cur_event_seq = 0
cur_max_id = 0
seq_buffer = 10000
main_worker = True
_worker_state = None
ev_buf = []
def __init__(self, service_name, db_name, args):
"""Initialize new consumer.
@param service_name: service_name for DBScript
@param db_name: target database name for get_database()
@param args: cmdline args for DBScript
"""
CascadedConsumer.__init__(self, service_name, db_name, args)
def reload(self):
CascadedConsumer.reload(self)
self.global_wm_publish_period = self.cf.getfloat('global_wm_publish_period', CascadedWorker.global_wm_publish_period)
self.local_wm_publish_period = self.cf.getfloat('local_wm_publish_period', CascadedWorker.local_wm_publish_period)
def process_remote_batch(self, src_db, tick_id, event_list, dst_db):
"""Worker-specific event processing."""
self.ev_buf = []
max_id = 0
st = self._worker_state
if st.wait_behind:
self.wait_for_tick(dst_db, tick_id)
src_curs = src_db.cursor()
dst_curs = dst_db.cursor()
for ev in event_list:
if st.copy_events:
self.copy_event(dst_curs, ev, st.filtered_copy)
if ev.ev_type[:4] == "pgq.":
# process cascade events even on waiting leaf node
self.process_remote_event(src_curs, dst_curs, ev)
else:
if st.process_events:
self.process_remote_event(src_curs, dst_curs, ev)
if ev.ev_id > max_id:
max_id = ev.ev_id
if st.local_wm_publish:
self.publish_local_wm(src_db)
if max_id > self.cur_max_id:
self.cur_max_id = max_id
def wait_for_tick(self, dst_db, tick_id):
"""On combined-branch leaf needs to wait from tick
to appear from combined-root.
"""
while 1:
cst = self._consumer_state
if cst['completed_tick'] >= tick_id:
return
time.sleep(10 * self.loop_delay)
self._consumer_state = self.refresh_state(dst_db)
def is_batch_done(self, state, batch_info):
wst = self._worker_state
# on combined-branch the target can get several batches ahead
if wst.wait_behind:
cur_tick = batch_info['tick_id']
dst_tick = state['completed_tick']
if cur_tick < dst_tick:
return True
return CascadedConsumer.is_batch_done(self, state, batch_info)
def publish_local_wm(self, src_db):
"""Send local watermark to provider.
"""
if not self.main_worker:
return
t = time.time()
if t - self.local_wm_publish_time < self.local_wm_publish_period:
return
st = self._worker_state
self.log.debug("Publishing local watermark: %d" % st.local_watermark)
src_curs = src_db.cursor()
q = "select * from pgq_node.set_subscriber_watermark(%s, %s, %s)"
src_curs.execute(q, [self.pgq_queue_name, st.node_name, st.local_watermark])
self.local_wm_publish_time = t
def process_remote_event(self, src_curs, dst_curs, ev):
"""Handle cascading events.
"""
if ev.retry:
raise Exception('CascadedWorker must not get retry events')
# non cascade events send to CascadedConsumer to error out
if ev.ev_type[:4] != 'pgq.':
CascadedConsumer.process_remote_event(self, src_curs, dst_curs, ev)
return
# ignore cascade events if not main worker
if not self.main_worker:
return
# check if for right queue
t = ev.ev_type
if ev.ev_extra1 != self.pgq_queue_name and t != "pgq.tick-id":
raise Exception("bad event in queue: "+str(ev))
self.log.info("got cascade event: %s" % t)
if t == "pgq.location-info":
node = ev.ev_data
loc = ev.ev_extra2
dead = ev.ev_extra3
q = "select * from pgq_node.register_location(%s, %s, %s, %s)"
dst_curs.execute(q, [self.pgq_queue_name, node, loc, dead])
elif t == "pgq.global-watermark":
tick_id = int(ev.ev_data)
q = "select * from pgq_node.set_global_watermark(%s, %s)"
dst_curs.execute(q, [self.pgq_queue_name, tick_id])
elif t == "pgq.tick-id":
tick_id = int(ev.ev_data)
if ev.ev_extra1 == self.pgq_queue_name:
raise Exception('tick-id event for own queue?')
st = self._worker_state
if st.process_tick_event:
q = "select * from pgq_node.set_partition_watermark(%s, %s, %s)"
dst_curs.execute(q, [self.pgq_queue_name, ev.ev_extra1, tick_id])
else:
raise Exception("unknown cascade event: %s" % t)
def finish_remote_batch(self, src_db, dst_db, tick_id):
"""Worker-specific cleanup on target node.
"""
if self.main_worker:
st = self._worker_state
dst_curs = dst_db.cursor()
self.flush_events(dst_curs)
# send tick event into queue
if st.send_tick_event:
q = "select pgq.insert_event(%s, 'pgq.tick-id', %s, %s, null, null, null)"
dst_curs.execute(q, [st.target_queue, str(tick_id), self.pgq_queue_name])
if st.create_tick:
# create actual tick
tick_id = self.batch_info['tick_id']
tick_time = self.batch_info['batch_end']
q = "select pgq.ticker(%s, %s, %s, %s)"
dst_curs.execute(q, [self.pgq_queue_name, tick_id, tick_time, self.cur_max_id])
CascadedConsumer.finish_remote_batch(self, src_db, dst_db, tick_id)
def copy_event(self, dst_curs, ev, filtered_copy):
"""Add event to copy buffer.
"""
if not self.main_worker:
return
if filtered_copy:
if ev.type[:4] == "pgq.":
return
if len(self.ev_buf) >= self.max_evbuf:
self.flush_events(dst_curs)
self.ev_buf.append(ev)
def flush_events(self, dst_curs):
"""Send copy buffer to target queue.
"""
if len(self.ev_buf) == 0:
return
flds = ['ev_time', 'ev_type', 'ev_data', 'ev_extra1',
'ev_extra2', 'ev_extra3', 'ev_extra4']
st = self._worker_state
if st.keep_event_ids:
flds.append('ev_id')
bulk_insert_events(dst_curs, self.ev_buf, flds, st.target_queue)
self.ev_buf = []
def refresh_state(self, dst_db, full_logic = True):
"""Load also node state from target node.
"""
res = CascadedConsumer.refresh_state(self, dst_db, full_logic)
q = "select * from pgq_node.get_node_info(%s)"
st = self.exec_cmd(dst_db, q, [ self.pgq_queue_name ])
self._worker_state = WorkerState(self.pgq_queue_name, st[0])
return res
def process_root_node(self, dst_db):
"""On root node send global watermark downstream.
"""
CascadedConsumer.process_root_node(self, dst_db)
t = time.time()
if t - self.global_wm_publish_time < self.global_wm_publish_period:
return
self.log.debug("Publishing global watermark")
dst_curs = dst_db.cursor()
q = "select * from pgq_node.set_global_watermark(%s, NULL)"
dst_curs.execute(q, [self.pgq_queue_name])
dst_db.commit()
self.global_wm_publish_time = t
|