diff options
author | Tomas Vondra | 2016-10-13 13:39:10 +0000 |
---|---|---|
committer | Tomas Vondra | 2017-02-27 00:32:51 +0000 |
commit | d2e86de59149c0078a8d1709c837f13ce5313775 (patch) | |
tree | 3f129e55653e13a9eac080aee18676c597ef367e | |
parent | fe20d05fb812a96de9933c254ecaefb25352254c (diff) |
also add CSV output to pgbench
-rw-r--r-- | client/benchmarks/pgbench.py | 11 | ||||
-rw-r--r-- | client/settings.py | 3 |
2 files changed, 9 insertions, 5 deletions
diff --git a/client/benchmarks/pgbench.py b/client/benchmarks/pgbench.py index 2ec38d5..d92b2c7 100644 --- a/client/benchmarks/pgbench.py +++ b/client/benchmarks/pgbench.py @@ -185,7 +185,7 @@ class PgBench(object): return issues - def _run(self, duration, nclients=1, njobs=1, read_only=False, aggregate=True): + def _run(self, duration, nclients=1, njobs=1, read_only=False, aggregate=True, csv_queue): 'run pgbench on the database (either a warmup or actual benchmark run)' args = ['pgbench', '-c', str(nclients), '-j', str(njobs), '-T', str(duration)] @@ -216,10 +216,13 @@ class PgBench(object): r.update({'start' : start, 'end' : end}) + if csv_queue: + csv_queue.put([start, end, r['scale'], nclients, njobs, mode, duration, latency, tps]) + return r - def run_tests(self): + def run_tests(self, csv_queue): 'execute the whole benchmark, including initialization, warmup and benchmark runs' # derive configuration for the CPU count / RAM size @@ -240,12 +243,12 @@ class PgBench(object): for clients in config['clients']: # read-only - r = self._run(self._duration, clients, clients, True) + r = self._run(self._duration, clients, clients, True, True, csv_queue) r.update({'run' : run}) results.append(r) # read-write - r = self._run(self._duration, clients, clients, False) + r = self._run(self._duration, clients, clients, False, True, csv_queue) r.update({'run' : run}) results.append(r) diff --git a/client/settings.py b/client/settings.py index 4895f74..9ce8522 100644 --- a/client/settings.py +++ b/client/settings.py @@ -31,7 +31,8 @@ OUTPUT_DIR = '/home/user/tmp/perf-output' # PGBENCH_CONFIG = { 'runs' : 3, - 'duration' : 60 # duration of per-client-count benchmark + 'duration' : 60, # duration of per-client-count benchmark + 'csv' : False } # ignore missing file with local config |