summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Wong2017-12-13 19:54:43 +0000
committerMark Wong2018-02-05 21:06:28 +0000
commite4821c776ac0179af6d54ad35c143c0fd4fff281 (patch)
treec98b2d829dae12f830aafc7267699cd3b5602d5f
parent015daacf6a8b53a682b989888ae7b75ddc82ccd8 (diff)
Use collectd for system and postgres statistics
-rw-r--r--client/collectors/collectd.conf.in423
-rw-r--r--client/collectors/collectd.py74
-rw-r--r--client/collectors/linux.py58
-rw-r--r--client/collectors/postgres.py177
-rwxr-xr-xclient/perffarm-client.py6
5 files changed, 525 insertions, 213 deletions
diff --git a/client/collectors/collectd.conf.in b/client/collectors/collectd.conf.in
new file mode 100644
index 0000000..20291d2
--- /dev/null
+++ b/client/collectors/collectd.conf.in
@@ -0,0 +1,423 @@
+##############################################################################
+# Global #
+#----------------------------------------------------------------------------#
+# Global settings for the daemon. #
+##############################################################################
+
+#Hostname "localhost"
+#FQDNLookup true
+#BaseDir "/var/lib/collectd"
+#PIDFile "/run/collectd/collectd.pid"
+#PluginDir "/usr/lib64/collectd"
+#TypesDB "/usr/share/collectd/types.db"
+
+#----------------------------------------------------------------------------#
+# Interval at which to query values. This may be overwritten on a per-plugin #
+# base by using the 'Interval' option of the LoadPlugin block: #
+# <LoadPlugin foo> #
+# Interval 60 #
+# </LoadPlugin> #
+#----------------------------------------------------------------------------#
+Interval 60
+
+##############################################################################
+# Logging #
+#----------------------------------------------------------------------------#
+# Plugins which provide logging functions should be loaded first, so log #
+# messages generated when loading or configuring other plugins can be #
+# accessed. #
+##############################################################################
+
+LoadPlugin logfile
+
+<Plugin logfile>
+ LogLevel info
+ File STDOUT
+ Timestamp true
+ PrintSeverity false
+</Plugin>
+
+##############################################################################
+# LoadPlugin section #
+#----------------------------------------------------------------------------#
+# Lines beginning with a single `#' belong to plugins which have been built #
+# but are disabled by default. #
+# #
+# Lines beginning with `##' belong to plugins which have not been built due #
+# to missing dependencies or because they have been deactivated explicitly. #
+##############################################################################
+
+%(modules)s
+
+##############################################################################
+# Plugin configuration #
+#----------------------------------------------------------------------------#
+# In this section configuration stubs for each plugin are provided. A desc- #
+# ription of those options is available in the collectd.conf(5) manual page. #
+##############################################################################
+
+<Plugin aggregation>
+ <Aggregation>
+ Plugin "cpu"
+ Type "percent"
+
+ GroupBy "Host"
+ GroupBy "TypeInstance"
+
+ CalculateSum false
+ CalculateAverage true
+ </Aggregation>
+</Plugin>
+
+<Plugin cpu>
+ ReportByCpu true
+ ReportByState true
+ ValuesPercentage true
+</Plugin>
+
+<Plugin csv>
+ DataDir "%(datadir)s"
+ StoreRates false
+</Plugin>
+
+<Plugin postgresql>
+ <Query db_stats>
+ Statement "
+SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit,
+ tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted,
+ conflicts, temp_files, temp_bytes, deadlocks, blk_read_time,
+ blk_write_time
+FROM pg_stat_database
+WHERE datname = $1"
+ Param database
+ <Result>
+ Type "gauge"
+ InstancePrefix "database-numbackends"
+ ValuesFrom "numbackends"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-xact_rollback"
+ ValuesFrom "xact_rollback"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-xact_commit"
+ ValuesFrom "xact_commit"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-blks_read"
+ ValuesFrom "blks_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-blks_hit"
+ ValuesFrom "blks_hit"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-tup_returned"
+ ValuesFrom "tup_returned"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-tup_fetched"
+ ValuesFrom "tup_fetched"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-tup_inserted"
+ ValuesFrom "tup_inserted"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-tup_updated"
+ ValuesFrom "tup_updated"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-tup_deleted"
+ ValuesFrom "tup_deleted"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-conflicts"
+ ValuesFrom "conflicts"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-temp_files"
+ ValuesFrom "temp_files"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-temp_bytes"
+ ValuesFrom "temp_bytes"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-deadlocks"
+ ValuesFrom "deadlocks"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-blk_read_time"
+ ValuesFrom "blk_read_time"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "database-blk_write_time"
+ ValuesFrom "blk_write_time"
+ </Result>
+ </Query>
+ <Query table_stats>
+ Statement "\
+SELECT a.schemaname AS schema, a.relname AS tablename, \
+ seq_scan, seq_tup_read, COALESCE(idx_scan, 0) AS idx_scan, \
+ COALESCE(idx_tup_fetch, 0) AS idx_tup_fetch, n_tup_ins, n_tup_upd, \
+ n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup, heap_blks_read, \
+ heap_blks_hit, COALESCE(idx_blks_read, 0) AS idx_blks_read, \
+ COALESCE(idx_blks_hit, 0) AS idx_blks_hit, \
+ COALESCE(toast_blks_read, 0) AS toast_blks_read, \
+ COALESCE(toast_blks_hit, 0) AS toast_blks_hit, \
+ COALESCE(tidx_blks_read, 0) AS tidx_blks_read, \
+ COALESCE(tidx_blks_hit, 0) AS tidx_blks_hit \
+FROM pg_statio_all_tables a, pg_stat_all_tables b \
+WHERE a.relid = b.relid;"
+ <Result>
+ Type "counter"
+ InstancePrefix "table-seq_scan"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "seq_scan"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-seq_tup_read"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "seq_tup_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-idx_scan"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "idx_scan"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-idx_tup_fetch"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "idx_tup_fetch"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-n_tup_ins"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "n_tup_ins"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-n_tup_upd"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "n_tup_upd"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-n_tup_del"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "n_tup_del"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-n_tup_hot_upd"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "n_tup_hot_upd"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-n_live_tup"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "n_live_tup"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-n_dead_tup"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "n_dead_tup"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-n_dead_tup"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "n_dead_tup"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-heap_blks_read"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "heap_blks_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-heap_blks_hit"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "heap_blks_hit"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-idx_blks_read"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "idx_blks_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-idx_blks_hit"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "idx_blks_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-idx_blks_hit"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "idx_blks_hit"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-toast_blks_read"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "toast_blks_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-toast_blks_hit"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "toast_blks_hit"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-tidx_blks_read"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "tidx_blks_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "table-tidx_blks_hit"
+ InstancesFrom "schema" "tablename"
+ ValuesFrom "tidx_blks_hit"
+ </Result>
+ </Query>
+ <Query index_stats>
+ Statement "\
+SELECT a.schemaname AS schema, a.relname AS tablename, \
+ a.indexrelname AS indexname, idx_scan, idx_tup_read, idx_tup_fetch, \
+ idx_blks_read, idx_blks_hit \
+FROM pg_stat_all_indexes a, pg_statio_all_indexes b \
+WHERE a.indexrelid = b.indexrelid;"
+ <Result>
+ Type "counter"
+ InstancePrefix "index-idx_scan"
+ InstancesFrom "schema" "tablename" "indexname"
+ ValuesFrom "idx_scan"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "index-idx_tup_read"
+ InstancesFrom "schema" "tablename" "indexname"
+ ValuesFrom "idx_tup_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "index-idx_tup_fetch"
+ InstancesFrom "schema" "tablename" "indexname"
+ ValuesFrom "idx_tup_fetch"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "index-idx_blks_read"
+ InstancesFrom "schema" "tablename" "indexname"
+ ValuesFrom "idx_blks_read"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "index-idx_blks_hit"
+ InstancesFrom "schema" "tablename" "indexname"
+ ValuesFrom "idx_blks_hit"
+ </Result>
+ </Query>
+ <Query index_stats>
+ Statement "\
+SELECT checkpoints_timed, checkpoints_req, checkpoint_write_time, \
+ checkpoint_sync_time, buffers_checkpoint, buffers_clean, \
+ maxwritten_clean, buffers_backend, buffers_backend_fsync,\
+ buffers_alloc \
+FROM pg_stat_bgwriter;"
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-checkpoints_timed"
+ ValuesFrom "checkpoints_timed"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-checkpoints_req"
+ ValuesFrom "checkpoints_req"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-checkpoint_write_time"
+ ValuesFrom "checkpoint_write_time"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-checkpoint_sync_time"
+ ValuesFrom "checkpoint_sync_time"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-buffers_checkpoint"
+ ValuesFrom "buffers_checkpoint"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-buffers_clean"
+ ValuesFrom "buffers_clean"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-maxwritten_clean"
+ ValuesFrom "maxwritten_clean"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-buffers_backend"
+ ValuesFrom "buffers_backend"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-buffers_backend_fsync"
+ ValuesFrom "buffers_backend_fsync"
+ </Result>
+ <Result>
+ Type "counter"
+ InstancePrefix "bgwriter-buffers_alloc"
+ ValuesFrom "buffers_alloc"
+ </Result>
+ </Query>
+ <Database %(database)s>
+ Host "localhost"
+ User "postgres"
+ Query db_stats
+ Query table_stats
+ Query index_stats
+ </Database>
+</Plugin>
+
+<Plugin swap>
+ ReportByDevice false
+ ReportBytes true
+ ValuesAbsolute true
+ ValuesPercentage false
+</Plugin>
+
+<Plugin vmem>
+ Verbose false
+</Plugin>
diff --git a/client/collectors/collectd.py b/client/collectors/collectd.py
new file mode 100644
index 0000000..57a2ce0
--- /dev/null
+++ b/client/collectors/collectd.py
@@ -0,0 +1,74 @@
+import os
+
+from utils.logging import log
+from utils.misc import run_cmd
+
+COLLECTD_CONFIG = '/tmp/.collectd.conf'
+COLLECTD_PIDFILE = '/tmp/.collectd.pid'
+
+
+class CollectdCollector(object):
+ """
+ Collect basic system and database statistics using collectd.
+ """
+
+ def __init__(self, outdir, dbname, bin_path):
+ self._bin_path = bin_path
+
+ # Hard code all possible places a packager might install collectd.
+ self._env = os.environ
+ self._env['PATH'] = ':'.join(['/usr/sbin/', '/sbin/', self._env['PATH']])
+
+ # Assume collectd.conf.in file to be in the same directory as this
+ # file.
+ cwd = os.path.dirname(os.path.realpath(__file__))
+
+ modules = (
+ 'LoadPlugin aggregation\n'
+ 'LoadPlugin contextswitch\n'
+ 'LoadPlugin cpu\n'
+ 'LoadPlugin csv\n'
+ 'LoadPlugin disk\n'
+ 'LoadPlugin interface\n'
+ 'LoadPlugin memory\n'
+ 'LoadPlugin postgresql\n'
+ 'LoadPlugin processes\n'
+ 'LoadPlugin swap\n'
+ )
+
+ system = os.popen("uname").readlines()[0].split()[0]
+
+ if system == 'Linux':
+ modules += (
+ 'LoadPlugin ipc\n'
+ 'LoadPlugin vmem\n'
+ )
+
+ outdir = '%s/stats' % outdir
+ config_template = open('%s/collectd.conf.in' % cwd, 'r')
+ config = open(COLLECTD_CONFIG, 'w')
+ config.write(config_template.read() % {'database': dbname,
+ 'datadir': outdir,
+ 'modules': modules})
+ config.close()
+ config_template.close()
+
+ # TODO: Use collectd to test config act exit appropriately.
+
+ def start(self):
+ log("starting collectd")
+ cmd = 'collectd -C %s -P %s' % (COLLECTD_CONFIG, COLLECTD_PIDFILE)
+ run_cmd(cmd.split(' '), env=self._env)
+
+ def stop(self):
+ log("stopping collectd")
+ pidfile = open(COLLECTD_PIDFILE, 'r')
+ pid = pidfile.read().strip()
+ run_cmd(['kill', pid])
+
+ def result(self):
+ return {}
+
+
+def run_collector(in_queue, out_queue, dbname, bin_path, outdir, interval=1.0):
+ pass
diff --git a/client/collectors/linux.py b/client/collectors/linux.py
index 3a8b231..3461aec 100644
--- a/client/collectors/linux.py
+++ b/client/collectors/linux.py
@@ -6,80 +6,30 @@ from utils.misc import run_cmd
class LinuxCollector(object):
- 'collects various Linux-specific statistics (cpuinfo, mounts, sar)'
+ 'Collect various Linux-specific statistics (cpuinfo, mounts)'
- def __init__(self, outdir, sar_path='/var/log/sa'):
+ def __init__(self, outdir):
self._outdir = outdir
- self._sar = sar_path
-
- self._start_ts = None
- self._end_ts = None
# Hard code all possible places a packager might install sysctl.
self._env = os.environ
self._env['PATH'] = ':'.join(['/usr/sbin/', '/sbin/', self._env['PATH']])
def start(self):
- self._start_ts = datetime.now()
+ pass
def stop(self):
- self._end_ts = datetime.now()
+ pass
def result(self):
'build the results'
r = {'sysctl': self._collect_sysctl()}
- # ignore sar if we've not found it
- self._collect_sar_stats()
-
r.update(self._collect_system_info())
return r
- def _collect_sar_stats(self):
- 'extracts all data available in sar, filters by timestamp range'
-
- log("collecting sar stats")
-
- d = self._start_ts.date()
- while d <= self._end_ts.date():
-
- # FIXME maybe skip if the file does not exist
- filename = '%(path)s/sa%(day)s' % {'path': self._sar,
- 'day': d.strftime('%d')}
-
- # if the sar file does not exist, skip it
- if os.path.isfile(filename):
-
- log("extracting sar data from '%s'" % (filename,))
-
- # need to use the right combination of start/end timestamps
- s = self._start_ts.strftime('%H:%M:%S')
- e = self._end_ts.strftime('%H:%M:%S')
-
- if d == self._start_ts.date() and d == self._end_ts.date():
- r = run_cmd(['sar', '-A', '-p', '-s', s, '-e', e, '-f',
- filename])
- elif d == self._start_ts.date():
- r = run_cmd(['sar', '-A', '-p', '-s', s, '-f', filename])
- elif d == self._end_ts.date():
- r = run_cmd(['sar', '-A', '-p', '-e', e, '-f', filename])
- else:
- r = run_cmd(['sar', '-A', '-p', '-f', filename])
-
- with open(''.join([self._outdir, '/sar-', d.strftime('%d'),
- '.txt']),
- 'w') as f:
- f.write(r[1])
- f.close()
- else:
-
- log("file '%s' does not exist, skipping" % (filename,))
-
- # proceed to the next day
- d += timedelta(days=1)
-
def _collect_sysctl(self):
'collect kernel configuration'
diff --git a/client/collectors/postgres.py b/client/collectors/postgres.py
index 86cb786..fb6a32d 100644
--- a/client/collectors/postgres.py
+++ b/client/collectors/postgres.py
@@ -22,170 +22,31 @@ class PostgresCollector(object):
self._bin_path = bin_path
def start(self):
- self._in_queue = Queue()
- self._out_queue = Queue()
- self._worker = Process(target=run_collector,
- args=(self._in_queue, self._out_queue,
- self._dbname, self._bin_path,
- self._outdir))
- self._worker.start()
-
- def stop(self):
-
- # signal the worker process to stop by writing a value into the queue
- self._in_queue.put(True)
-
- log("stopping the PostgreSQL statistics collector")
-
- # Wait for collector to place result into the output queue. This needs
- # to happen before calling join() otherwise it causes a deadlock.
- log("waiting for collector result in a queue")
- self._result = self._out_queue.get()
-
- # And wait for the worker to terminate. This should be pretty fast as
- # the collector places result into the queue right before terminating.
- log("waiting for collector process to terminate")
- self._worker.join()
-
- self._worker = None
- self._in_queue = None
- self._out_queue = None
-
- def result(self):
- return self._result
-
-
-def run_collector(in_queue, out_queue, dbname, bin_path, outdir, interval=1.0):
- """
- collector code for a separate process, communicating through a pair of
- queues
- """
-
- bgwriter_log = None
- tables_log = None
- indexes_log = None
- database_log = None
-
- # get current timestamp
- ts = time.time()
-
- while True:
-
- # wait until the next tick
- ts += interval
-
- # if we're behind, skip forward
- if ts < time.time():
- continue
-
- # sleep (but only for the remaining time, to prevent drift)
- time.sleep(ts - time.time())
-
- # if we've received message in the input queue (not empty), terminate
- if not in_queue.empty():
- log("PostgreSQL collector received request to terminate")
- break
-
- # open connection to the benchmark database (if can't open, continue)
- # notice this is intentionally after the wait, so we'll wait before
- # next connection attempt
+ log("saving postgres settings")
try:
- conn = psycopg2.connect('host=localhost dbname=%s' % (dbname,))
+ conn = psycopg2.connect('host=localhost dbname=%s' % self._dbname)
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
- except Exception as ex:
- continue
-
- # background writer stats
- cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * '
- 'FROM pg_stat_bgwriter')
-
- # on the first iteration, construct the CSV files
- if bgwriter_log is None:
+ cur.execute(
+ 'SELECT name, setting, source '
+ 'FROM pg_settings ORDER BY lower(name)'
+ )
fields = [desc[0] for desc in cur.description]
- filename = ''.join([outdir, '/bgwriter.csv'])
- bgwriter_log = csv.DictWriter(open(filename, 'w'), fields,
+ filename = ''.join([self._outdir, '/settings.csv'])
+ settings_log = csv.DictWriter(open(filename, 'w'), fields,
lineterminator='\n')
- bgwriter_log.writeheader()
-
- bgwriter_log.writerows(cur.fetchall())
-
- # TODO we can assume statistics for most objects (tables, indexes)
- # won't change every second, so we can optimize the amount of data by
- # detecting changes and only keeping the two rows next to it
-
- # table statistics
- cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * '
- 'FROM pg_stat_all_tables JOIN pg_statio_all_tables '
- 'USING (relid, schemaname, relname)')
-
- # on the first iteration, construct the CSV files
- if tables_log is None:
- fields = [desc[0] for desc in cur.description]
- filename = ''.join([outdir, '/tables.csv'])
- tables_log = csv.DictWriter(open(filename, 'w'), fields,
- lineterminator='\n')
- tables_log.writeheader()
-
- tables_log.writerows(cur.fetchall())
-
- # index statistics
- cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * '
- 'FROM pg_stat_all_indexes JOIN pg_statio_all_indexes '
- 'USING (relid, indexrelid, schemaname, relname, '
- 'indexrelname)')
-
- # on the first iteration, construct the CSV files
- if indexes_log is None:
- fields = [desc[0] for desc in cur.description]
- filename = ''.join([outdir, '/indexes.csv'])
- indexes_log = csv.DictWriter(open(filename, 'w'), fields,
- lineterminator='\n')
- indexes_log.writeheader()
-
- indexes_log.writerows(cur.fetchall())
-
- # database statistics
- cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * '
- 'FROM pg_stat_database')
-
- # on the first iteration, construct the CSV files
- if database_log is None:
- fields = [desc[0] for desc in cur.description]
- filename = ''.join([outdir, '/database.csv'])
- database_log = csv.DictWriter(open(filename, 'w'), fields,
- lineterminator='\n')
- database_log.writeheader()
-
- database_log.writerows(cur.fetchall())
-
- conn.close()
+ settings_log.writeheader()
+ settings_log.writerows(cur.fetchall())
+ settings_log.close()
+ conn.close()
+ except Exception as ex:
+ pass
- try:
- conn = psycopg2.connect('host=localhost dbname=%s' % (dbname,))
- cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
- cur.execute('SELECT name, setting, source '
- 'FROM pg_settings ORDER BY lower(name)')
- fields = [desc[0] for desc in cur.description]
- filename = ''.join([outdir, '/settings.csv'])
- settings_log = csv.DictWriter(open(filename, 'w'), fields,
- lineterminator='\n')
- settings_log.writeheader()
- settings_log.writerows(cur.fetchall())
- conn.close()
- except Exception as ex:
+ def stop(self):
pass
- # close the CSV writers
- bgwriter_log = None
- tables_log = None
- indexes_log = None
- database_log = None
-
- result = {}
-
- r = run_cmd([bin_path + '/pg_config'])
- result['config'] = r[1]
+ def result(self):
+ return {}
- out_queue.put(result)
- log("PostgreSQL collector put results into output queue and terminates")
+def run_collector(in_queue, out_queue, dbname, bin_path, outdir, interval=1.0):
+ pass
diff --git a/client/perffarm-client.py b/client/perffarm-client.py
index 035bf7d..0b0c0d3 100755
--- a/client/perffarm-client.py
+++ b/client/perffarm-client.py
@@ -7,6 +7,7 @@ import os
from benchmarks.pgbench import PgBench
from benchmarks.runner import BenchmarkRunner
+from collectors.collectd import CollectdCollector
from collectors.linux import LinuxCollector
from collectors.postgres import PostgresCollector
from collectors.collector import MultiCollector
@@ -44,7 +45,10 @@ if __name__ == '__main__':
system = os.popen("uname").readlines()[0].split()[0]
if system == 'Linux':
- collectors.register('system', LinuxCollector(OUTPUT_DIR))
+ collectors.register('linux', LinuxCollector(OUTPUT_DIR))
+
+ collectors.register('collectd',
+ CollectdCollector(OUTPUT_DIR, DATABASE_NAME, ''))
pg_collector = PostgresCollector(OUTPUT_DIR, dbname=DATABASE_NAME,
bin_path=('%s/bin' % (BUILD_PATH)))