summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Wong2017-07-20 16:38:15 +0000
committerMark Wong2017-08-04 04:51:01 +0000
commit60284eb4b0755c2a5b6e6559774690dddd369ee3 (patch)
treeff17121af18a83e95c66c6efb7cb3a414e8440f2
parentd2e86de59149c0078a8d1709c837f13ce5313775 (diff)
pep8 coding style
-rw-r--r--client/benchmarks/pgbench.py478
-rw-r--r--client/benchmarks/runner.py200
-rw-r--r--client/collectors/collector.py36
-rw-r--r--client/collectors/linux.py134
-rw-r--r--client/collectors/postgres.py241
-rwxr-xr-xclient/perffarm-client.py60
-rw-r--r--client/settings.py37
-rw-r--r--client/utils/cluster.py126
-rw-r--r--client/utils/git.py106
-rw-r--r--client/utils/locking.py24
-rw-r--r--client/utils/logging.py13
-rw-r--r--client/utils/misc.py78
-rw-r--r--web/pgperffarm/auth.py287
-rw-r--r--web/pgperffarm/settings.py14
-rw-r--r--web/pgperffarm/urls.py7
-rw-r--r--web/pgperffarm/views.py19
16 files changed, 951 insertions, 909 deletions
diff --git a/client/benchmarks/pgbench.py b/client/benchmarks/pgbench.py
index d92b2c7..2ddcc1b 100644
--- a/client/benchmarks/pgbench.py
+++ b/client/benchmarks/pgbench.py
@@ -10,251 +10,267 @@ from utils.misc import available_ram, run_cmd
class PgBench(object):
- 'a simple wrapper around pgbench, running TPC-B-like workload by default'
+ 'a simple wrapper around pgbench, running TPC-B-like workload by default'
+
+ # TODO allow running custom scripts, not just the default
+ # read-write/read-only tests
+ # TODO allow running 'prepared' mode
+
+ def __init__(self, bin_path, dbname, runs=3, duration=60):
+ '''
+ bin_path - path to PostgreSQL binaries (dropdb, createdb, psql
+ commands)
+ dbname - name of the database to use
+ runs - number of runs (for each client count)
+ duration - duration of each execution
+ '''
+
+ self._bin = bin_path
+ self._dbname = dbname
+ self._results = {}
+ self._duration = duration
+ self._runs = runs
+
+ @staticmethod
+ def _configure(cpu_count, ram_mbs):
+ 'derive the configurations to benchmark from CPU count and RAM size'
+
+ config = []
+
+ # TODO allow overriding this from a global config
+
+ # scales: 10 (small), 50% of RAM, 200% of RAM
+ # for s in [10, ram_mbs/15/2, ram_mbs*2/15]:
+ for s in [10]:
+ config.append({'scale': int(math.ceil(s / 10) * 10),
+ 'clients': [1, cpu_count, 2 * cpu_count]})
+
+ return config
+
+ def _init(self, scale):
+ """
+ recreate the database (drop + create) and populate it with given scale
+ """
+
+ # initialize results for this dataset scale
+ self._results[scale] = {'init': None, 'warmup': None, 'runs': []}
+
+ log("recreating '%s' database" % (self._dbname,))
+ run_cmd(['dropdb', '--if-exists', self._dbname],
+ env={'PATH': self._bin})
+ run_cmd(['createdb', self._dbname], env={'PATH': self._bin})
+
+ log("initializing pgbench '%s' with scale %s" % (self._dbname, scale))
+ r = run_cmd(['pgbench', '-i', '-s', str(scale), self._dbname],
+ env={'PATH': self._bin})
+
+ # remember the init duration
+ self._results[scale]['init'] = r[2]
+
+ @staticmethod
+ def _parse_results(data):
+ 'extract results (including parameters) from the pgbench output'
+
+ scale = -1
+ r = re.search('scaling factor: ([0-9]+)', data)
+ if r:
+ scale = r.group(1)
+
+ mode = -1
+ r = re.search('query mode: (.+)', data)
+ if r:
+ mode = r.group(1)
+
+ clients = -1
+ r = re.search('number of clients: ([0-9]+)', data)
+ if r:
+ clients = r.group(1)
+
+ threads = -1
+ r = re.search('number of threads: ([0-9]+)', data)
+ if r:
+ threads = r.group(1)
+
+ duration = -1
+ r = re.search('duration: ([0-9]+) s', data)
+ if r:
+ duration = r.group(1)
+
+ latency = -1
+ r = re.search('latency average: ([0-9\.]+) ms', data)
+ if r:
+ latency = r.group(1)
+
+ tps = -1
+ r = re.search('tps = ([0-9]+\.[0-9]+) \(excluding connections '
+ 'establishing\)', data)
+ if r:
+ tps = r.group(1)
+
+ return {'scale': scale,
+ 'mode': mode,
+ 'clients': clients,
+ 'threads': threads,
+ 'duration': duration,
+ 'latency': latency,
+ 'tps': tps}
+
+ @staticmethod
+ def _merge_logs():
+ 'merge log files produced by pgbench threads (aggregated per second)'
+
+ r = {}
+
+ # find pgbench transaction logs in current directory
+ logs = [v for v in os.listdir(os.getcwd())
+ if re.match('pgbench_log.[0-9]+(\.[0-9]+)?', v)]
+
+ # parse each transaction log, and merge it into the existing results
+ for l in logs:
+ worker_log = open(l, 'r')
+ for row in worker_log:
+ values = row.split(' ')
+
+ timestamp = values[0]
+ tps = int(values[1])
+ lat_sum = long(values[2])
+ lat_sum2 = long(values[3])
+ lat_min = int(values[4])
+ lat_max = int(values[5])
+
+ # if first record for the timestamp, store it, otherwise merge
+ if timestamp not in r:
+ r[timestamp] = {'tps': tps,
+ 'lat_sum': lat_sum, 'lat_sum2': lat_sum2,
+ 'lat_min': lat_min, 'lat_max': lat_max}
+ else:
+ r[timestamp]['tps'] += int(tps)
+ r[timestamp]['lat_sum'] += long(lat_sum)
+ r[timestamp]['lat_sum2'] += long(lat_sum2)
+ r[timestamp]['lat_min'] = min(r[timestamp]['lat_min'],
+ int(lat_min))
+ r[timestamp]['lat_max'] = max(r[timestamp]['lat_max'],
+ int(lat_max))
+
+ os.remove(l)
+
+ # now produce a simple text log sorted by the timestamp
+ o = []
+ for t in sorted(r.keys()):
+ o.append('%s %d %d %d %d %d' % (t, r[t]['tps'], r[t]['lat_sum'],
+ r[t]['lat_sum2'], r[t]['lat_min'],
+ r[t]['lat_max']))
+
+ return '\n'.join(o)
+
+ def check_config(self):
+ 'check pgbench configuration (existence of binaries etc.)'
+
+ issues = []
+
+ if not os.path.isdir(self._bin):
+ issues.append("bin_dir='%s' does not exist" % (self._bin,))
+ elif not os.path.exists('%s/pgbench' % (self._bin,)):
+ issues.append("pgbench not found in bin_dir='%s'" % (self._bin,))
+ elif not os.path.exists('%s/createdb' % (self._bin,)):
+ issues.append("createdb not found in bin_dir='%s'" % (self._bin,))
+ elif not os.path.exists('%s/dropdb' % (self._bin,)):
+ issues.append("dropdb not found in bin_dir='%s'" % (self._bin,))
+ elif not os.path.exists('%s/psql' % (self._bin,)):
+ issues.append("psql not found in bin_dir='%s'" % (self._bin,))
+
+ if type(self._duration) is not int:
+ issues.append("duration (%s) needs to be an integer" %
+ self._duration)
+ elif not self._duration >= 1:
+ issues.append("duration (%s) needs to be >= 1" % (self._duration,))
+
+ if type(self._runs) is not int:
+ issues.append("runs (%s) needs to be an integer" % self._duration)
+ elif not self._runs >= 1:
+ issues.append("runs (%s) needs to be >= 1" % (self._runs,))
+
+ return issues
+
+ def _run(self, duration, nclients=1, njobs=1, read_only=False,
+ aggregate=True, csv_queue):
+ 'run pgbench on the database (either a warmup or actual benchmark run)'
+
+ args = ['pgbench', '-c', str(nclients), '-j', str(njobs), '-T',
+ str(duration)]
+
+ # aggregate on per second resolution
+ if aggregate:
+ args.extend(['-l', '--aggregate-interval', '1'])
+
+ if read_only:
+ args.extend(['-S'])
+
+ args.extend([self._dbname])
+
+ # do an explicit checkpoint before each run
+ run_cmd(['psql', self._dbname, '-c', 'checkpoint'],
+ env={'PATH': self._bin})
- # TODO allow running custom scripts, not just the default read-write/read-only tests
- # TODO allow running 'prepared' mode
+ log("pgbench: clients=%d, jobs=%d, aggregate=%s, read-only=%s, "
+ "duration=%d" % (nclients, njobs, aggregate, read_only, duration))
- def __init__(self, bin_path, dbname, runs = 3, duration = 60):
- '''
- bin_path - path to PostgreSQL binaries (dropdb, createdb, psql commands)
- dbname - name of the database to use
- runs - number of runs (for each client count)
- duration - duration of each execution
- '''
+ start = time.time()
+ r = run_cmd(args, env={'PATH': self._bin})
+ end = time.time()
- self._bin = bin_path
- self._dbname = dbname
- self._results = {}
- self._duration = duration
- self._runs = runs
+ r = PgBench._parse_results(r[1])
+ r.update({'read-only': read_only})
+
+ if aggregate:
+ r.update({'transaction-log': PgBench._merge_logs()})
+ r.update({'start': start, 'end': end})
+
+ if csv_queue:
+ csv_queue.put([start, end, r['scale'], nclients, njobs, mode,
+ duration, latency, tps])
- @staticmethod
- def _configure(cpu_count, ram_mbs):
- 'derive the configurations to benchmark from CPU count and RAM size'
+ return r
- config = []
+ def run_tests(self, csv_queue):
+ """
+ execute the whole benchmark, including initialization, warmup and
+ benchmark runs
+ """
- # TODO allow overriding this from a global config
+ # derive configuration for the CPU count / RAM size
+ configs = PgBench._configure(cpu_count(), available_ram())
- # scales: 10 (small), 50% of RAM, 200% of RAM
- #for s in [10, ram_mbs/15/2, ram_mbs*2/15]:
- for s in [10]:
- config.append({'scale' : int(math.ceil(s/10)*10),
- 'clients' : [1, cpu_count, 2*cpu_count]})
-
- return config
-
-
- def _init(self, scale):
- 'recreate the database (drop + create) and populate it with given scale'
-
- # initialize results for this dataset scale
- self._results[scale] = {'init' : None, 'warmup' : None, 'runs' : []}
+ for config in configs:
- log("recreating '%s' database" % (self._dbname,))
- run_cmd(['dropdb', '--if-exists', self._dbname], env={'PATH' : self._bin})
- run_cmd(['createdb', self._dbname], env={'PATH' : self._bin})
+ # init for the dataset scale and warmup
+ self._init(config['scale'])
- log("initializing pgbench '%s' with scale %s" % (self._dbname, scale))
- r = run_cmd(['pgbench', '-i', '-s', str(scale), self._dbname], env={'PATH' : self._bin})
+ warmup = self._run(self._duration, cpu_count(), cpu_count())
+ results = []
- # remember the init duration
- self._results[scale]['init'] = r[2]
+ for run in range(self._runs):
+ log("pgbench : run=%d" % (run,))
- @staticmethod
- def _parse_results(data):
- 'extract results (including parameters) from the pgbench output'
+ for clients in config['clients']:
- scale = -1
- r = re.search('scaling factor: ([0-9]+)', data)
- if r:
- scale = r.group(1)
+ # read-only
+ r = self._run(self._duration, clients, clients, True, True,
+ csv_queue)
+ r.update({'run': run})
+ results.append(r)
- mode = -1
- r = re.search('query mode: (.+)', data)
- if r:
- mode = r.group(1)
-
- clients = -1
- r = re.search('number of clients: ([0-9]+)', data)
- if r:
- clients = r.group(1)
-
- threads = -1
- r = re.search('number of threads: ([0-9]+)', data)
- if r:
- threads = r.group(1)
+ # read-write
+ r = self._run(self._duration, clients, clients, False,
+ True, csv_queue)
+ r.update({'run': run})
+ results.append(r)
- duration = -1
- r = re.search('duration: ([0-9]+) s', data)
- if r:
- duration = r.group(1)
+ self._results[config['scale']] = {
+ 'warmup': warmup,
+ 'runs': results
+ }
- latency = -1
- r = re.search('latency average: ([0-9\.]+) ms', data)
- if r:
- latency = r.group(1)
-
- tps = -1
- r = re.search('tps = ([0-9]+\.[0-9]+) \(excluding connections establishing\)', data)
- if r:
- tps = r.group(1)
-
- return {'scale' : scale,
- 'mode' : mode,
- 'clients' : clients,
- 'threads' : threads,
- 'duration' : duration,
- 'latency' : latency,
- 'tps' : tps}
-
-
- @staticmethod
- def _merge_logs():
- 'merge log files produced by pgbench threads (aggregated per second)'
-
- r = {}
-
- # find pgbench transaction logs in current directory
- logs = [v for v in os.listdir(os.getcwd()) if re.match('pgbench_log.[0-9]+(\.[0-9]+)?', v)]
-
- # parse each transaction log, and merge it into the existing results
- for l in logs:
- worker_log = open(l, 'r')
- for row in worker_log:
- values = row.split(' ')
-
- timestamp = values[0]
- tps = int(values[1])
- lat_sum = long(values[2])
- lat_sum2 = long(values[3])
- lat_min = int(values[4])
- lat_max = int(values[5])
-
- # if first record for the timestamp, store it, otherwise merge
- if timestamp not in r:
- r[timestamp] = {'tps' : tps,
- 'lat_sum' : lat_sum, 'lat_sum2' : lat_sum2,
- 'lat_min' : lat_min, 'lat_max' : lat_max}
- else:
- r[timestamp]['tps'] += int(tps)
- r[timestamp]['lat_sum'] += long(lat_sum)
- r[timestamp]['lat_sum2'] += long(lat_sum2)
- r[timestamp]['lat_min'] = min(r[timestamp]['lat_min'], int(lat_min))
- r[timestamp]['lat_max'] = max(r[timestamp]['lat_max'], int(lat_max))
-
- os.remove(l)
-
- # now produce a simple text log sorted by the timestamp
- o = []
- for t in sorted(r.keys()):
- o.append('%s %d %d %d %d %d' % (t, r[t]['tps'], r[t]['lat_sum'], r[t]['lat_sum2'], r[t]['lat_min'], r[t]['lat_max']))
-
- return '\n'.join(o)
-
-
- def check_config(self):
- 'check pgbench configuration (existence of binaries etc.)'
-
- issues = []
-
- if not os.path.isdir(self._bin):
- issues.append("bin_dir='%s' does not exist" % (self._bin,))
- elif not os.path.exists('%s/pgbench' % (self._bin,)):
- issues.append("pgbench not found in bin_dir='%s'" % (self._bin,))
- elif not os.path.exists('%s/createdb' % (self._bin,)):
- issues.append("createdb not found in bin_dir='%s'" % (self._bin,))
- elif not os.path.exists('%s/dropdb' % (self._bin,)):
- issues.append("dropdb not found in bin_dir='%s'" % (self._bin,))
- elif not os.path.exists('%s/psql' % (self._bin,)):
- issues.append("psql not found in bin_dir='%s'" % (self._bin,))
-
- if type(self._duration) is not int:
- issues.append("duration (%s) needs to be an integer" % (self._duration,))
- elif not self._duration >= 1:
- issues.append("duration (%s) needs to be >= 1" % (self._duration,))
-
- if type(self._runs) is not int:
- issues.append("runs (%s) needs to be an integer" % (self._duration,))
- elif not self._runs >= 1:
- issues.append("runs (%s) needs to be >= 1" % (self._runs,))
-
- return issues
-
-
- def _run(self, duration, nclients=1, njobs=1, read_only=False, aggregate=True, csv_queue):
- 'run pgbench on the database (either a warmup or actual benchmark run)'
-
- args = ['pgbench', '-c', str(nclients), '-j', str(njobs), '-T', str(duration)]
-
- # aggregate on per second resolution
- if aggregate:
- args.extend(['-l', '--aggregate-interval', '1'])
-
- if read_only:
- args.extend(['-S'])
-
- args.extend([self._dbname])
-
- # do an explicit checkpoint before each run
- run_cmd(['psql', self._dbname, '-c', 'checkpoint'], env={'PATH' : self._bin})
-
- log("pgbench : clients=%d, jobs=%d, aggregate=%s, read-only=%s, duration=%d" % (nclients, njobs, aggregate, read_only, duration))
-
- start = time.time()
- r = run_cmd(args, env={'PATH' : self._bin})
- end = time.time()
-
- r = PgBench._parse_results(r[1])
- r.update({'read-only' : read_only})
-
- if aggregate:
- r.update({'transaction-log' : PgBench._merge_logs()})
-
- r.update({'start' : start, 'end' : end})
-
- if csv_queue:
- csv_queue.put([start, end, r['scale'], nclients, njobs, mode, duration, latency, tps])
-
- return r
-
-
- def run_tests(self, csv_queue):
- 'execute the whole benchmark, including initialization, warmup and benchmark runs'
-
- # derive configuration for the CPU count / RAM size
- configs = PgBench._configure(cpu_count(), available_ram())
-
- for config in configs:
-
- # init for the dataset scale and warmup
- self._init(config['scale'])
-
- warmup = self._run(self._duration, cpu_count(), cpu_count())
- results = []
-
- for run in range(self._runs):
-
- log("pgbench : run=%d" % (run,))
-
- for clients in config['clients']:
-
- # read-only
- r = self._run(self._duration, clients, clients, True, True, csv_queue)
- r.update({'run' : run})
- results.append(r)
-
- # read-write
- r = self._run(self._duration, clients, clients, False, True, csv_queue)
- r.update({'run' : run})
- results.append(r)
-
- self._results[config['scale']] = {
- 'warmup' : warmup,
- 'runs' : results
- }
-
- return self._results
+ return self._results
diff --git a/client/benchmarks/runner.py b/client/benchmarks/runner.py
index 1a420fe..6651bf9 100644
--- a/client/benchmarks/runner.py
+++ b/client/benchmarks/runner.py
@@ -4,147 +4,147 @@ import os
from utils.logging import log
from multiprocessing import Process, Queue
-class BenchmarkRunner(object):
- 'manages runs of all the benchmarks, including cluster restarts etc.'
-
- def __init__(self, out_dir, cluster, collector):
- ''
-
- self._output = out_dir # where to store output files
- self._benchmarks = {} # bench name => class implementing the benchmark
- self._configs = {} # config name => (bench name, config)
- self._cluster = cluster
- self._collector = collector
-
-
- def register_benchmark(self, benchmark_name, benchmark_class):
- ''
-
- # FIXME check if a mapping for the same name already exists
- self._benchmarks.update({benchmark_name : benchmark_class})
+class BenchmarkRunner(object):
+ 'manages runs of all the benchmarks, including cluster restarts etc.'
- def register_config(self, config_name, benchmark_name, postgres_config, **kwargs):
- ''
+ def __init__(self, out_dir, cluster, collector):
+ ''
- # FIXME check if a mapping for the same name already exists
- # FIXME check that the benchmark mapping already exists
- self._configs.update({config_name : {'benchmark' : benchmark_name, 'config' : kwargs, 'postgres' : postgres_config}})
+ self._output = out_dir # where to store output files
+ self._benchmarks = {} # bench name => class implementing the benchmark
+ self._configs = {} # config name => (bench name, config)
+ self._cluster = cluster
+ self._collector = collector
+ def register_benchmark(self, benchmark_name, benchmark_class):
+ ''
- def _check_config(self, config_name):
- ''
+ # FIXME check if a mapping for the same name already exists
+ self._benchmarks.update({benchmark_name: benchmark_class})
- log("checking benchmark configuration '%s'" % (config_name,))
+ def register_config(self, config_name, benchmark_name, postgres_config,
+ **kwargs):
+ ''
- # construct the benchmark class for the given config name
- config = self._configs[config_name]
- bench = self._benchmarks[config['benchmark']]
+ # FIXME check if a mapping for the same name already exists
+ # FIXME check that the benchmark mapping already exists
+ self._configs.update({config_name: {'benchmark': benchmark_name,
+ 'config': kwargs,
+ 'postgres': postgres_config}})
- # expand the attribute names
- bench = bench(**config['config'])
+ def _check_config(self, config_name):
+ ''
- # run the tests
- return bench.check_config()
+ log("checking benchmark configuration '%s'" % (config_name,))
+ # construct the benchmark class for the given config name
+ config = self._configs[config_name]
+ bench = self._benchmarks[config['benchmark']]
- def check(self):
- 'check configurations for all benchmarks'
+ # expand the attribute names
+ bench = bench(**config['config'])
- issues = {}
+ # run the tests
+ return bench.check_config()
- if os.path.exists(self._output):
- issues['global'] = ["output directory '%s' already exists" % (self._output,)]
+ def check(self):
+ 'check configurations for all benchmarks'
- for config_name in self._configs:
- t = self._check_config(config_name)
- if t:
- issues[config_name] = t
+ issues = {}
- return issues
+ if os.path.exists(self._output):
+ issues['global'] = ["output directory '%s' already exists" %
+ (self._output,)]
+ for config_name in self._configs:
+ t = self._check_config(config_name)
+ if t:
+ issues[config_name] = t
- def _run_config(self, config_name):
- ''
+ return issues
- log("running benchmark configuration '%s'" % (config_name,))
+ def _run_config(self, config_name):
+ ''
- # construct the benchmark class for the given config name
- config = self._configs[config_name]
- bench = self._benchmarks[config['benchmark']]
+ log("running benchmark configuration '%s'" % (config_name,))
- # expand the attribute names
- bench = bench(**config['config'])
+ # construct the benchmark class for the given config name
+ config = self._configs[config_name]
+ bench = self._benchmarks[config['benchmark']]
- self._cluster.start(config = config['postgres'])
+ # expand the attribute names
+ bench = bench(**config['config'])
- # start collector(s) of additional info
- self._collector.start()
+ self._cluster.start(config=config['postgres'])
- # if requested output to CSV, create a queue and collector process
- csv_queue = None
- csv_collector = None
- if config['benchmark']['csv']:
- csv_queue = Queue()
- csv_collector = Process(target=csv_collect_results, args=(config_name, csv_queue))
- csv_collector.start()
+ # start collector(s) of additional info
+ self._collector.start()
- # run the tests
- r = bench.run_tests(csv_queue)
+ # if requested output to CSV, create a queue and collector process
+ csv_queue = None
+ csv_collector = None
+ if config['benchmark']['csv']:
+ csv_queue = Queue()
+ csv_collector = Process(target=csv_collect_results,
+ args=(config_name, csv_queue))
+ csv_collector.start()
- # notify the result collector to end and wait for it to terminate
- if csv_queue:
- csv_queue.put("STOP")
- csv_collector.join()
+ # run the tests
+ r = bench.run_tests(csv_queue)
- # stop the cluster and collector
- log("terminating collectors")
- self._collector.stop()
- self._cluster.stop()
+ # notify the result collector to end and wait for it to terminate
+ if csv_queue:
+ csv_queue.put("STOP")
+ csv_collector.join()
- # merge data from the collectors into the JSON document with results
- r.update(self._collector.result())
+ # stop the cluster and collector
+ log("terminating collectors")
+ self._collector.stop()
+ self._cluster.stop()
- # read the postgres log
- with open('pg.log', 'r') as f:
- r['postgres-log'] = f.read()
+ # merge data from the collectors into the JSON document with results
+ r.update(self._collector.result())
- r['meta'] = {'benchmark' : config['benchmark'],
- 'name' : config_name}
+ # read the postgres log
+ with open('pg.log', 'r') as f:
+ r['postgres-log'] = f.read()
- os.remove('pg.log')
+ r['meta'] = {'benchmark': config['benchmark'],
+ 'name': config_name}
- with open('%s/%s.json' % (self._output, config_name), 'w') as f:
- f.write(json.dumps(r, indent=4))
+ os.remove('pg.log')
+ with open('%s/%s.json' % (self._output, config_name), 'w') as f:
+ f.write(json.dumps(r, indent=4))
- def run(self):
- 'run all the configured benchmarks'
+ def run(self):
+ 'run all the configured benchmarks'
- os.mkdir(self._output)
+ os.mkdir(self._output)
- for config_name in self._configs:
- self._run_config(config_name)
+ for config_name in self._configs:
+ self._run_config(config_name)
def csv_collect_results(bench_name, queue):
- 'collect results into a CSV files (through a queue)'
+ 'collect results into a CSV files (through a queue)'
- with open("%s.csv" % (bench_name,), 'w') as results_file:
+ with open("%s.csv" % (bench_name,), 'w') as results_file:
- # collect data from the queue - once we get a plain string (instead of
- # a list), it's a sign to terminate the collector
- while True:
+ # collect data from the queue - once we get a plain string (instead of
+ # a list), it's a sign to terminate the collector
+ while True:
- v = queue.get()
+ v = queue.get()
- # if we got a string, it means 'terminate'
- if isinstance(v, str):
- log("terminating CSV result collector")
- return
+ # if we got a string, it means 'terminate'
+ if isinstance(v, str):
+ log("terminating CSV result collector")
+ return
- v = [str(x) for x in v]
+ v = [str(x) for x in v]
- # otherwise we expect the value to be a list, and we just print it
- results_file.write(bench_name + "\t" + "\t".join(v) + "\n")
- results_file.flush()
+ # otherwise we expect the value to be a list, and we just print it
+ results_file.write(bench_name + "\t" + "\t".join(v) + "\n")
+ results_file.flush()
diff --git a/client/collectors/collector.py b/client/collectors/collector.py
index d73ef5b..743e11b 100644
--- a/client/collectors/collector.py
+++ b/client/collectors/collector.py
@@ -1,28 +1,24 @@
class MultiCollector(object):
- 'a collector combining multiple other collectors'
+ 'a collector combining multiple other collectors'
- def __init__(self):
- self._collectors = {}
+ def __init__(self):
+ self._collectors = {}
+ def register(self, name, collector):
+ self._collectors[name] = collector
- def register(self, name, collector):
- self._collectors[name] = collector
+ def start(self):
+ for name in self._collectors:
+ self._collectors[name].start()
+ def stop(self):
+ for name in self._collectors:
+ self._collectors[name].stop()
- def start(self):
- for name in self._collectors:
- self._collectors[name].start()
+ def result(self):
+ r = {}
+ for name in self._collectors:
+ r.update({name: self._collectors[name].result()})
-
- def stop(self):
- for name in self._collectors:
- self._collectors[name].stop()
-
-
- def result(self):
- r = {}
- for name in self._collectors:
- r.update({name : self._collectors[name].result()})
-
- return r
+ return r
diff --git a/client/collectors/linux.py b/client/collectors/linux.py
index 605e2c3..8bacacf 100644
--- a/client/collectors/linux.py
+++ b/client/collectors/linux.py
@@ -6,103 +6,99 @@ from utils.misc import run_cmd
class LinuxCollector(object):
- 'collects various Linux-specific statistics (cpuinfo, mounts, sar)'
+ 'collects various Linux-specific statistics (cpuinfo, mounts, sar)'
- def __init__(self, sar_path = '/var/log/sa'):
- self._start_ts = None
- self._end_ts = None
- self._sar = sar_path
+ def __init__(self, sar_path='/var/log/sa'):
+ self._start_ts = None
+ self._end_ts = None
+ self._sar = sar_path
+ def start(self):
+ self._start_ts = datetime.now()
- def start(self):
- self._start_ts = datetime.now()
+ def stop(self):
+ self._end_ts = datetime.now()
+ def result(self):
+ 'build the results'
- def stop(self):
- self._end_ts = datetime.now()
+ r = {'sysctl': self._collect_sysctl()}
+ # ignore sar if we've not found it
+ sar = self._collect_sar_stats()
+ if sar:
+ r['sar'] = sar
- def result(self):
- 'build the results'
+ r.update(self._collect_system_info())
- r = {'sysctl' : self._collect_sysctl()}
+ return r
- # ignore sar if we've not found it
- sar = self._collect_sar_stats()
- if sar:
- r['sar'] = sar
+ def _collect_sar_stats(self):
+ 'extracts all data available in sar, filters by timestamp range'
- r.update(self._collect_system_info())
+ sar = {}
+ log("collecting sar stats")
- return r
+ d = self._start_ts.date()
+ while d <= self._end_ts.date():
+ # FIXME maybe skip if the file does not exist
+ filename = '%(path)s/sa%(day)s' % {'path': self._sar,
+ 'day': d.strftime('%d')}
- def _collect_sar_stats(self):
- 'extracts all data available in sar, filters by timestamp range'
+ # if the sar file does not exist, skip it
+ if os.path.isfile(filename):
- sar = {}
- log("collecting sar stats")
+ log("extracting sar data from '%s'" % (filename,))
- d = self._start_ts.date()
- while d <= self._end_ts.date():
+ # need to use the right combination of start/end timestamps
+ s = self._start_ts.strftime('%H:%M:%S')
+ e = self._end_ts.strftime('%H:%M:%S')
- # FIXME maybe skip if the file does not exist
- filename = '%(path)s/sa%(day)s' % {'path' : self._sar, 'day' : d.strftime('%d')}
+ if d == self._start_ts.date() and d == self._end_ts.date():
+ r = run_cmd(['sar', '-A', '-p', '-s', s, '-e', e, '-f',
+ filename])
+ elif d == self._start_ts.date():
+ r = run_cmd(['sar', '-A', '-p', '-s', s, '-f', filename])
+ elif d == self._end_ts.date():
+ r = run_cmd(['sar', '-A', '-p', '-e', e, '-f', filename])
+ else:
+ r = run_cmd(['sar', '-A', '-p', '-f', filename])
- # if the sar file does not exist, skip it
- if os.path.isfile(filename):
+ sar[str(d)] = r[1]
- log("extracting sar data from '%s'" % (filename,))
+ else:
- # need to use the right combination of start/end timestamps
- s = self._start_ts.strftime('%H:%M:%S')
- e = self._end_ts.strftime('%H:%M:%S')
+ log("file '%s' does not exist, skipping" % (filename,))
- if d == self._start_ts.date() and d == self._end_ts.date():
- r = run_cmd(['sar', '-A', '-p', '-s', s, '-e', e, '-f', filename])
- elif d == self._start_ts.date():
- r = run_cmd(['sar', '-A', '-p', '-s', s, '-f', filename])
- elif d == self._end_ts.date():
- r = run_cmd(['sar', '-A', '-p', '-e', e, '-f', filename])
- else:
- r = run_cmd(['sar', '-A', '-p', '-f', filename])
+ # proceed to the next day
+ d += timedelta(days=1)
- sar[str(d)] = r[1]
+ if not sar:
+ return None
- else:
+ return sar
- log("file '%s' does not exist, skipping" % (filename,))
+ def _collect_sysctl(self):
+ 'collect kernel configuration'
- # proceed to the next day
- d += timedelta(days=1)
+ log("collecting sysctl")
+ r = run_cmd(['/usr/sbin/sysctl', '-a'])
- if not sar:
- return None
+ return r[1]
- return sar
+ def _collect_system_info(self):
+ 'collect cpuinfo, meminfo, mounts'
+ system = {}
- def _collect_sysctl(self):
- 'collect kernel configuration'
+ with open('/proc/cpuinfo', 'r') as f:
+ system['cpuinfo'] = f.read()
- log("collecting sysctl")
- r = run_cmd(['/usr/sbin/sysctl', '-a'])
+ with open('/proc/meminfo', 'r') as f:
+ system['meminfo'] = f.read()
- return r[1]
+ with open('/proc/mounts', 'r') as f:
+ system['mounts'] = f.read()
-
- def _collect_system_info(self):
- 'collect cpuinfo, meminfo, mounts'
-
- system = {}
-
- with open('/proc/cpuinfo', 'r') as f:
- system['cpuinfo'] = f.read()
-
- with open('/proc/meminfo', 'r') as f:
- system['meminfo'] = f.read()
-
- with open('/proc/mounts', 'r') as f:
- system['mounts'] = f.read()
-
- return system
+ return system
diff --git a/client/collectors/postgres.py b/client/collectors/postgres.py
index 7096c27..3d4fc37 100644
--- a/client/collectors/postgres.py
+++ b/client/collectors/postgres.py
@@ -8,161 +8,174 @@ import time
from multiprocessing import Process, Queue
from utils.logging import log
-class PostgresCollector(object):
- 'collects basic PostgreSQL-level statistics (bgwriter, databases, tables, indexes)'
-
- def __init__(self, dbname):
- self._dbname = dbname
-
- def start(self):
- self._in_queue = Queue()
- self._out_queue = Queue()
- self._worker = Process(target=run_collector, args=(self._in_queue, self._out_queue, self._dbname))
- self._worker.start()
+class PostgresCollector(object):
+ """
+ collects basic PostgreSQL-level statistics (bgwriter, databases, tables,
+ indexes)
+ """
+ def __init__(self, dbname):
+ self._dbname = dbname
- def stop(self):
+ def start(self):
+ self._in_queue = Queue()
+ self._out_queue = Queue()
+ self._worker = Process(target=run_collector,
+ args=(self._in_queue, self._out_queue,
+ self._dbname))
+ self._worker.start()
- # signal the worker process to stop by writing a value into the queue
- self._in_queue.put(True)
+ def stop(self):
- log("stopping the PostgreSQL statistics collector")
+ # signal the worker process to stop by writing a value into the queue
+ self._in_queue.put(True)
- # Wait for collector to place result into the output queue. This needs
- # to happen before calling join() otherwise it causes a deadlock.
- log("waiting for collector result in a queue")
- self._result = self._out_queue.get()
+ log("stopping the PostgreSQL statistics collector")
- # And wait for the worker to terminate. This should be pretty fast as
- # the collector places result into the queue right before terminating.
- log("waiting for collector process to terminate")
- self._worker.join()
+ # Wait for collector to place result into the output queue. This needs
+ # to happen before calling join() otherwise it causes a deadlock.
+ log("waiting for collector result in a queue")
+ self._result = self._out_queue.get()
- self._worker = None
- self._in_queue = None
- self._out_queue = None
+ # And wait for the worker to terminate. This should be pretty fast as
+ # the collector places result into the queue right before terminating.
+ log("waiting for collector process to terminate")
+ self._worker.join()
+ self._worker = None
+ self._in_queue = None
+ self._out_queue = None
- def result(self):
- return self._result
+ def result(self):
+ return self._result
def run_collector(in_queue, out_queue, dbname, interval=1.0):
- 'collector code for a separate process, communicating through a pair of queues'
-
- bgwriter_log = None
- tables_log = None
- indexes_log = None
- database_log = None
+ """
+ collector code for a separate process, communicating through a pair of
+ queues
+ """
+
+ bgwriter_log = None
+ tables_log = None
+ indexes_log = None
+ database_log = None
- # get current timestamp
- ts = time.time()
+ # get current timestamp
+ ts = time.time()
- while True:
+ while True:
- # wait until the next tick
- ts += interval
+ # wait until the next tick
+ ts += interval
- # if we're behind, skip forward
- if ts < time.time():
- continue
+ # if we're behind, skip forward
+ if ts < time.time():
+ continue
- # sleep (but only for the remaining time, to prevent drift)
- time.sleep(ts - time.time())
+ # sleep (but only for the remaining time, to prevent drift)
+ time.sleep(ts - time.time())
- # if we've received message in the input queue (not empty), terminate
- if not in_queue.empty():
- log("PostgreSQL collector received request to terminate")
- break
+ # if we've received message in the input queue (not empty), terminate
+ if not in_queue.empty():
+ log("PostgreSQL collector received request to terminate")
+ break
- # open connection to the benchmark database (if can't open, continue)
- # notice this is intentionally after the wait, so we'll wait before
- # next connection attempt
- try:
- conn = psycopg2.connect('host=localhost dbname=%s' % (dbname,))
- cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
- except Exception as ex:
- continue
+ # open connection to the benchmark database (if can't open, continue)
+ # notice this is intentionally after the wait, so we'll wait before
+ # next connection attempt
+ try:
+ conn = psycopg2.connect('host=localhost dbname=%s' % (dbname,))
+ cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
+ except Exception as ex:
+ continue
- # background writer stats
- cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * FROM pg_stat_bgwriter')
+ # background writer stats
+ cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * '
+ 'FROM pg_stat_bgwriter')
- # on the first iteration, construct the CSV files
- if not bgwriter_log:
- fields = [desc[0] for desc in cur.description]
- bgwriter_log = csv.DictWriter(open('bgwriter.csv', 'w'), fields)
- bgwriter_log.writeheader()
+ # on the first iteration, construct the CSV files
+ if not bgwriter_log:
+ fields = [desc[0] for desc in cur.description]
+ bgwriter_log = csv.DictWriter(open('bgwriter.csv', 'w'), fields)
+ bgwriter_log.writeheader()
- bgwriter_log.writerows(cur.fetchall())
+ bgwriter_log.writerows(cur.fetchall())
- # TODO we can assume statistics for most objects (tables, indexes) won't
- # change every second, so we can optimize the amount of data by detecting
- # changes and only keeping the two rows next to it
+ # TODO we can assume statistics for most objects (tables, indexes)
+ # won't change every second, so we can optimize the amount of data by
+ # detecting changes and only keeping the two rows next to it
- # table statistics
- cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * FROM pg_stat_all_tables JOIN pg_statio_all_tables USING (relid, schemaname, relname)')
+ # table statistics
+ cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * '
+ 'FROM pg_stat_all_tables JOIN pg_statio_all_tables '
+ 'USING (relid, schemaname, relname)')
- # on the first iteration, construct the CSV files
- if not tables_log:
- fields = [desc[0] for desc in cur.description]
- tables_log = csv.DictWriter(open('tables.csv', 'w'), fields)
- tables_log.writeheader()
+ # on the first iteration, construct the CSV files
+ if not tables_log:
+ fields = [desc[0] for desc in cur.description]
+ tables_log = csv.DictWriter(open('tables.csv', 'w'), fields)
+ tables_log.writeheader()
- tables_log.writerows(cur.fetchall())
+ tables_log.writerows(cur.fetchall())
- # index statistics
- cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * FROM pg_stat_all_indexes JOIN pg_statio_all_indexes USING (relid, indexrelid, schemaname, relname, indexrelname)')
+ # index statistics
+ cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * '
+ 'FROM pg_stat_all_indexes JOIN pg_statio_all_indexes '
+ 'USING (relid, indexrelid, schemaname, relname, '
+ 'indexrelname)')
- # on the first iteration, construct the CSV files
- if not indexes_log:
- fields = [desc[0] for desc in cur.description]
- indexes_log = csv.DictWriter(open('indexes.csv', 'w'), fields)
- indexes_log.writeheader()
+ # on the first iteration, construct the CSV files
+ if not indexes_log:
+ fields = [desc[0] for desc in cur.description]
+ indexes_log = csv.DictWriter(open('indexes.csv', 'w'), fields)
+ indexes_log.writeheader()
- indexes_log.writerows(cur.fetchall())
+ indexes_log.writerows(cur.fetchall())
- # database statistics
- cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * FROM pg_stat_database')
+ # database statistics
+ cur.execute('SELECT EXTRACT(EPOCH FROM now()) AS ts, * '
+ 'FROM pg_stat_database')
- # on the first iteration, construct the CSV files
- if not database_log:
- fields = [desc[0] for desc in cur.description]
- database_log = csv.DictWriter(open('database.csv', 'w'), fields)
- database_log.writeheader()
+ # on the first iteration, construct the CSV files
+ if not database_log:
+ fields = [desc[0] for desc in cur.description]
+ database_log = csv.DictWriter(open('database.csv', 'w'), fields)
+ database_log.writeheader()
- database_log.writerows(cur.fetchall())
+ database_log.writerows(cur.fetchall())
- conn.close()
+ conn.close()
- log("PostgreSQL collector generates CSV results")
+ log("PostgreSQL collector generates CSV results")
- # close the CSV writers
- bgwriter_log = None
- tables_log = None
- indexes_log = None
- database_log = None
+ # close the CSV writers
+ bgwriter_log = None
+ tables_log = None
+ indexes_log = None
+ database_log = None
- result = {}
+ result = {}
- with open('bgwriter.csv', 'r') as f:
- result.update({'bgwriter' : f.read()})
+ with open('bgwriter.csv', 'r') as f:
+ result.update({'bgwriter': f.read()})
- with open('tables.csv', 'r') as f:
- result.update({'tables' : f.read()})
+ with open('tables.csv', 'r') as f:
+ result.update({'tables': f.read()})
- with open('indexes.csv', 'r') as f:
- result.update({'indexes' : f.read()})
+ with open('indexes.csv', 'r') as f:
+ result.update({'indexes': f.read()})
- with open('database.csv', 'r') as f:
- result.update({'database' : f.read()})
+ with open('database.csv', 'r') as f:
+ result.update({'database': f.read()})
- # remove the files
- os.remove('bgwriter.csv')
- os.remove('tables.csv')
- os.remove('indexes.csv')
- os.remove('database.csv')
+ # remove the files
+ os.remove('bgwriter.csv')
+ os.remove('tables.csv')
+ os.remove('indexes.csv')
+ os.remove('database.csv')
- out_queue.put(result)
+ out_queue.put(result)
- log("PostgreSQL collector put results into output queue and terminates")
+ log("PostgreSQL collector put results into output queue and terminates")
diff --git a/client/perffarm-client.py b/client/perffarm-client.py
index 709ac5a..263df16 100755
--- a/client/perffarm-client.py
+++ b/client/perffarm-client.py
@@ -20,46 +20,50 @@ from settings import *
if __name__ == '__main__':
- with FileLock('.lock') as lock:
+ with FileLock('.lock') as lock:
- # clone repository and build the sources
+ # clone repository and build the sources
- repository = GitRepository(url = GIT_URL, path = REPOSITORY_PATH)
+ repository = GitRepository(url=GIT_URL, path=REPOSITORY_PATH)
- repository.clone_or_update()
- repository.build_and_install(path = BUILD_PATH)
+ repository.clone_or_update()
+ repository.build_and_install(path=BUILD_PATH)
- # build and start a postgres cluster
+ # build and start a postgres cluster
- cluster = PgCluster(bin_path = BIN_PATH, data_path = DATADIR_PATH)
+ cluster = PgCluster(bin_path=BIN_PATH, data_path=DATADIR_PATH)
- # create collectors
+ # create collectors
- collectors = MultiCollector()
+ collectors = MultiCollector()
- collectors.register('system', LinuxCollector())
- collectors.register('postgres', PostgresCollector(dbname=DATABASE_NAME))
+ collectors.register('system', LinuxCollector())
+ collectors.register('postgres',
+ PostgresCollector(dbname=DATABASE_NAME))
- runner = BenchmarkRunner(OUTPUT_DIR, cluster, collectors)
+ runner = BenchmarkRunner(OUTPUT_DIR, cluster, collectors)
- # register the three tests we currently have
+ # register the three tests we currently have
- runner.register_benchmark('pgbench', PgBench)
+ runner.register_benchmark('pgbench', PgBench)
- # register one config for each benchmark (should be moved to a config file)
+ # register one config for each benchmark (should be moved to a config
+ # file)
- runner.register_config('pgbench-basic', 'pgbench', dbname = DATABASE_NAME,
- bin_path = ('%s/bin' % (BUILD_PATH,)),
- postgres_config = POSTGRES_CONFIG,
- **PGBENCH_CONFIG)
+ runner.register_config('pgbench-basic',
+ 'pgbench',
+ dbname=DATABASE_NAME,
+ bin_path=('%s/bin' % (BUILD_PATH,)),
+ postgres_config=POSTGRES_CONFIG,
+ **PGBENCH_CONFIG)
- # check configuration and report all issues
- issues = runner.check()
+ # check configuration and report all issues
+ issues = runner.check()
- if issues:
- # print the issues
- for k in issues:
- for v in issues[k]:
- print k, ':', v
- else:
- runner.run()
+ if issues:
+ # print the issues
+ for k in issues:
+ for v in issues[k]:
+ print k, ':', v
+ else:
+ runner.run()
diff --git a/client/settings.py b/client/settings.py
index 9ce8522..a3d7239 100644
--- a/client/settings.py
+++ b/client/settings.py
@@ -8,17 +8,17 @@ BUILD_PATH = '/home/user/tmp/bin-postgres'
BIN_PATH = os.path.join(BUILD_PATH, 'bin')
DATADIR_PATH = '/home/user/tmp/data-postgres'
-POSTGRES_CONFIG = {'shared_buffers' : '1GB',
- 'work_mem' : '64MB',
- 'maintenance_work_mem' : '128MB',
- 'min_wal_size' : '2GB',
- 'max_wal_size' : '4GB',
- 'log_line_prefix' : '%n %t ',
- 'log_checkpoints' : 'on',
- 'log_autovacuum_min_duration' : '0',
- 'log_temp_files' : '32',
- 'checkpoint_timeout' : '15min',
- 'checkpoint_completion_target' : '0.9'}
+POSTGRES_CONFIG = {'shared_buffers': '1GB',
+ 'work_mem': '64MB',
+ 'maintenance_work_mem': '128MB',
+ 'min_wal_size': '2GB',
+ 'max_wal_size': '4GB',
+ 'log_line_prefix': '%n %t ',
+ 'log_checkpoints': 'on',
+ 'log_autovacuum_min_duration': '0',
+ 'log_temp_files': '32',
+ 'checkpoint_timeout': '15min',
+ 'checkpoint_completion_target': '0.9'}
DATABASE_NAME = 'perf'
@@ -30,14 +30,15 @@ OUTPUT_DIR = '/home/user/tmp/perf-output'
# duration - duration (in seconds) of a single benchmark (per client count)
#
PGBENCH_CONFIG = {
- 'runs' : 3,
- 'duration' : 60, # duration of per-client-count benchmark
- 'csv' : False
+ 'runs': 3,
+ 'duration': 60, # duration of per-client-count benchmark
+ 'csv': False
}
# ignore missing file with local config
try:
- from settings_local import *
-except:
- print >> sys.stderr, "ERROR: local configuration (settings_local.py) not found"
- sys.exit(1)
+ from settings_local import *
+except Exception as e:
+ print >> sys.stderr, "ERROR: local configuration (settings_local.py) " \
+ "not found"
+ sys.exit(1)
diff --git a/client/utils/cluster.py b/client/utils/cluster.py
index 6e48970..b81eaef 100644
--- a/client/utils/cluster.py
+++ b/client/utils/cluster.py
@@ -9,64 +9,68 @@ from utils.logging import log
class PgCluster(object):
- 'basic manipulation of postgres cluster (init, start, stop, destroy)'
-
- def __init__(self, bin_path, data_path):
- self._bin = bin_path
- self._data = data_path
-
-
- def _initdb(self):
- 'initialize the data directory'
-
- with TemporaryFile() as strout:
- log("initializing cluster into '%s'" % (self._data,))
- call(['pg_ctl', '-D', self._data, 'init'], env={'PATH' : self._bin}, stdout=strout, stderr=STDOUT)
-
-
- def _configure(self, config):
- 'update configuration of a cluster (using postgresql.auto.conf)'
-
- log("configuring cluster in '%s'" % (self._data,))
- with open('%s/postgresql.auto.conf' % (self._data,), 'a+') as f:
- for k in config:
- f.write("%(name)s = '%(value)s'\n" % {'name' : k, 'value' : config[k]})
-
-
- def _destroy(self):
- 'forced cleanup of possibly existing cluster processes and data directory'
-
- with TemporaryFile() as strout:
- log("killing all existing postgres processes")
- call(['killall', 'postgres'], stdout=strout, stderr=STDOUT)
-
- # remove the data directory
- if os.path.exists(self._data):
- shutil.rmtree(self._data)
-
-
- def start(self, config, destroy=True):
- 'init, configure and start the cluster'
-
- # cleanup any previous cluster running, remove data dir if it exists
- if destroy:
- self._destroy()
-
- self._initdb()
- self._configure(config)
-
- with TemporaryFile() as strout:
- log("starting cluster in '%s' using '%s' binaries" % (self._data, self._bin))
- call(['pg_ctl', '-D', self._data, '-l', 'pg.log', '-w', 'start'], env={'PATH' : self._bin}, stdout=strout, stderr=STDOUT)
-
-
- def stop(self, destroy=True):
- 'stop the cluster'
-
- with TemporaryFile() as strout:
- log("stopping cluster in '%s' using '%s' binaries" % (self._data, self._bin))
- call(['pg_ctl', '-D', self._data, '-w', '-t', '60', 'stop'], env={'PATH' : self._bin}, stdout=strout, stderr=STDOUT)
-
- # kill any remaining processes, remove the data dir
- if destroy:
- self._destroy()
+ 'basic manipulation of postgres cluster (init, start, stop, destroy)'
+
+ def __init__(self, bin_path, data_path):
+ self._bin = bin_path
+ self._data = data_path
+
+ def _initdb(self):
+ 'initialize the data directory'
+
+ with TemporaryFile() as strout:
+ log("initializing cluster into '%s'" % (self._data,))
+ call(['pg_ctl', '-D', self._data, 'init'], env={'PATH': self._bin},
+ stdout=strout, stderr=STDOUT)
+
+ def _configure(self, config):
+ 'update configuration of a cluster (using postgresql.auto.conf)'
+
+ log("configuring cluster in '%s'" % (self._data,))
+ with open('%s/postgresql.auto.conf' % (self._data,), 'a+') as f:
+ for k in config:
+ f.write("%(name)s = '%(value)s'\n" %
+ {'name': k, 'value': config[k]})
+
+ def _destroy(self):
+ """
+ forced cleanup of possibly existing cluster processes and data
+ directory
+ """
+
+ with TemporaryFile() as strout:
+ log("killing all existing postgres processes")
+ call(['killall', 'postgres'], stdout=strout, stderr=STDOUT)
+
+ # remove the data directory
+ if os.path.exists(self._data):
+ shutil.rmtree(self._data)
+
+ def start(self, config, destroy=True):
+ 'init, configure and start the cluster'
+
+ # cleanup any previous cluster running, remove data dir if it exists
+ if destroy:
+ self._destroy()
+
+ self._initdb()
+ self._configure(config)
+
+ with TemporaryFile() as strout:
+ log("starting cluster in '%s' using '%s' binaries" %
+ (self._data, self._bin))
+ call(['pg_ctl', '-D', self._data, '-l', 'pg.log', '-w', 'start'],
+ env={'PATH': self._bin}, stdout=strout, stderr=STDOUT)
+
+ def stop(self, destroy=True):
+ 'stop the cluster'
+
+ with TemporaryFile() as strout:
+ log("stopping cluster in '%s' using '%s' binaries" %
+ (self._data, self._bin))
+ call(['pg_ctl', '-D', self._data, '-w', '-t', '60', 'stop'],
+ env={'PATH': self._bin}, stdout=strout, stderr=STDOUT)
+
+ # kill any remaining processes, remove the data dir
+ if destroy:
+ self._destroy()
diff --git a/client/utils/git.py b/client/utils/git.py
index 11b00c1..587e5a4 100644
--- a/client/utils/git.py
+++ b/client/utils/git.py
@@ -8,75 +8,75 @@ from utils.logging import log
class GitRepository(object):
- 'a simple management of a git repository / source building'
+ 'a simple management of a git repository / source building'
- def __init__(self, url, path):
- 'url - repository URL, path - local directory for the clone'
+ def __init__(self, url, path):
+ 'url - repository URL, path - local directory for the clone'
- self._url = url
- self._path = path
+ self._url = url
+ self._path = path
+ def _exists(self):
+ 'check that a local repository clone exists'
- def _exists(self):
- 'check that a local repository clone exists'
+ # TODO verify that the repository uses the proper upstream url
+ return os.path.exists(self._path)
- # TODO verify that the repository uses the proper upstream url
- return os.path.exists(self._path)
+ def _clone(self):
+ ''
+ log("cloning repository '%s' to '%s'" % (self._url, self._path))
+ with TemporaryFile() as strout:
+ call(['git', 'clone', self._url, self._path], stdout=strout,
+ stderr=STDOUT)
- def _clone(self):
- ''
- log("cloning repository '%s' to '%s'" % (self._url, self._path))
+ def _update(self):
+ 'update an existing repository clone'
- with TemporaryFile() as strout:
- call(['git', 'clone', self._url, self._path], stdout=strout, stderr=STDOUT)
+ log("updating repository '%s' from '%s'" % (self._path, self._url))
+ # simply call git-pull and redirect stdout/stderr
+ # FIXME should verify that the repository uses the proper upstream url
+ with TemporaryFile() as strout:
+ call(['git', 'pull'], cwd=self._path, stdout=strout, stderr=STDOUT)
- def _update(self):
- 'update an existing repository clone'
+ def current_commit(self):
+ 'returns current commit hash'
- log("updating repository '%s' from '%s'" % (self._path, self._url))
+ with TemporaryFile() as strout:
+ call(['git', 'rev-parse', 'HEAD'], cwd=self._path, stdout=strout,
+ stderr=STDOUT)
+ strout.seek(0)
+ return strout.read().strip()
- # simply call git-pull and redirect stdout/stderr
- # FIXME should verify that the repository uses the proper upstream url
- with TemporaryFile() as strout:
- call(['git', 'pull'], cwd=self._path, stdout=strout, stderr=STDOUT)
+ def clone_or_update(self):
+ 'refreshes the repository (either clone from scratch or refresh)'
+ if self._exists():
+ self._update()
+ else:
+ self._clone()
- def current_commit(self):
- 'returns current commit hash'
+ log("current commit '%s'" % (self.current_commit(),))
- with TemporaryFile() as strout:
- call(['git', 'rev-parse', 'HEAD'], cwd=self._path, stdout=strout, stderr=STDOUT)
- strout.seek(0)
- return strout.read().strip()
+ def build_and_install(self, path, remove=True):
+ 'builds and installs the sources'
+ # TODO collect output of configure and make commands
+ if os.path.exists(path):
+ shutil.rmtree(path)
- def clone_or_update(self):
- 'refreshes the repository (either clone from scratch or refresh)'
+ with TemporaryFile() as strout:
+ log("configuring sources in '%s' with prefix '%s'" %
+ (self._path, path))
+ call(['./configure', '--prefix', path], cwd=self._path,
+ stdout=strout, stderr=STDOUT)
- if self._exists():
- self._update()
- else:
- self._clone()
+ with TemporaryFile() as strout:
+ log("building sources and installing into '%s'" % (path,))
- log("current commit '%s'" % (self.current_commit(),))
-
-
- def build_and_install(self, path, remove=True):
- 'builds and installs the sources'
-
- # TODO collect output of configure and make commands
- if os.path.exists(path):
- shutil.rmtree(path)
-
- with TemporaryFile() as strout:
- log("configuring sources in '%s' with prefix '%s'" % (self._path, path))
- call(['./configure', '--prefix', path], cwd=self._path, stdout=strout, stderr=STDOUT)
-
- with TemporaryFile() as strout:
- log("building sources and installing into '%s'" % (path,))
-
- # cleanup and build using multiple cpus
- call(['make', '-s', 'clean'], cwd=self._path, stdout=strout, stderr=STDOUT)
- call(['make', '-s', '-j', str(cpu_count()), 'install'], cwd=self._path, stdout=strout, stderr=STDOUT)
+ # cleanup and build using multiple cpus
+ call(['make', '-s', 'clean'], cwd=self._path, stdout=strout,
+ stderr=STDOUT)
+ call(['make', '-s', '-j', str(cpu_count()), 'install'],
+ cwd=self._path, stdout=strout, stderr=STDOUT)
diff --git a/client/utils/locking.py b/client/utils/locking.py
index dfc8f63..d3cbd64 100644
--- a/client/utils/locking.py
+++ b/client/utils/locking.py
@@ -3,19 +3,19 @@ import os
class FileLock():
- 'a simple wrapper around file lock'
+ 'a simple wrapper around file lock'
- def __init__(self, filename):
- self._file = open(filename, 'w')
+ def __init__(self, filename):
+ self._file = open(filename, 'w')
- def __enter__(self):
- 'locks the file and writes the PID of the current process into it'
- fcntl.flock(self._file, fcntl.LOCK_EX)
- self._file.write(str(os.getpid()))
- self._file.flush()
+ def __enter__(self):
+ 'locks the file and writes the PID of the current process into it'
+ fcntl.flock(self._file, fcntl.LOCK_EX)
+ self._file.write(str(os.getpid()))
+ self._file.flush()
- return self._file
+ return self._file
- def __exit__(self, type, value, traceback):
- 'unlock the file'
- fcntl.flock(self._file, fcntl.LOCK_UN)
+ def __exit__(self, type, value, traceback):
+ 'unlock the file'
+ fcntl.flock(self._file, fcntl.LOCK_UN)
diff --git a/client/utils/logging.py b/client/utils/logging.py
index 964480f..1e55fac 100644
--- a/client/utils/logging.py
+++ b/client/utils/logging.py
@@ -1,12 +1,13 @@
import sys
import time
+
def log(message):
- ''
+ ''
- print '%(epoch)s %(date)s %(message)s' % {
- 'epoch' : time.time(),
- 'date' : time.strftime('%Y-%m-%d %H:%M:%S'),
- 'message' : message}
+ print '%(epoch)s %(date)s %(message)s' % {
+ 'epoch': time.time(),
+ 'date': time.strftime('%Y-%m-%d %H:%M:%S'),
+ 'message': message}
- sys.stdout.flush()
+ sys.stdout.flush()
diff --git a/client/utils/misc.py b/client/utils/misc.py
index 6f73998..fa4e54c 100644
--- a/client/utils/misc.py
+++ b/client/utils/misc.py
@@ -8,62 +8,62 @@ from tempfile import TemporaryFile
def available_ram():
- 'determine amount of RAM in the system (in megabytes)'
+ 'determine amount of RAM in the system (in megabytes)'
- return int(os.popen("free -m").readlines()[1].split()[1])
+ return int(os.popen("free -m").readlines()[1].split()[1])
def run_cmd(args, env=None, cwd=None):
- 'run command (a subprocess.call wrapper)'
+ 'run command (a subprocess.call wrapper)'
- with TemporaryFile() as strout:
+ with TemporaryFile() as strout:
- start = time.time()
- retcode = call(args, env=env, cwd=cwd, stdout=strout, stderr=STDOUT)
+ start = time.time()
+ retcode = call(args, env=env, cwd=cwd, stdout=strout, stderr=STDOUT)
- strout.seek(0)
- return (retcode, strout.read(), (time.time() - start))
+ strout.seek(0)
+ return (retcode, strout.read(), (time.time() - start))
-def connect(dbname, conn, cursor, nretries = 60, delay = 1.0):
- '''Try opening a connection and a cursor. If it does not succeed (e.g.
- when the database is performing recovery after a crash, retry multiple
- times (as specified by nretries and delay in seconds).
- '''
+def connect(dbname, conn, cursor, nretries=60, delay=1.0):
+ '''Try opening a connection and a cursor. If it does not succeed (e.g.
+ when the database is performing recovery after a crash, retry multiple
+ times (as specified by nretries and delay in seconds).
+ '''
- # if we already have connection and a cursor, return it
- if conn and cursor:
- return (conn, cursor)
+ # if we already have connection and a cursor, return it
+ if conn and cursor:
+ return (conn, cursor)
- # we'll try repeatedly, with delays between the attempts
- i = 0
- while i < nretries:
+ # we'll try repeatedly, with delays between the attempts
+ i = 0
+ while i < nretries:
- i += 1
+ i += 1
- try:
- conn = psycopg2.connect('host=localhost dbname=%s' % (dbname,))
- # TODO do we actually need autocommit?
- conn.autocommit = True
- cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
+ try:
+ conn = psycopg2.connect('host=localhost dbname=%s' % (dbname,))
+ # TODO do we actually need autocommit?
+ conn.autocommit = True
+ cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
- return (conn, cursor)
- except:
- # connection failure - sleep for a while, then try again
- time.sleep(delay)
+ return (conn, cursor)
+ except Exception as e:
+ # connection failure - sleep for a while, then try again
+ time.sleep(delay)
- return (None, None)
+ return (None, None)
def disconnect(conn, cursor):
- '''Make sure we're disconnected (but prevent exceptions)'''
+ '''Make sure we're disconnected (but prevent exceptions)'''
- try:
- cursor.close()
- except:
- pass
+ try:
+ cursor.close()
+ except Exception as e:
+ pass
- try:
- conn.close()
- except:
- pass
+ try:
+ conn.close()
+ except Exception as e:
+ pass
diff --git a/web/pgperffarm/auth.py b/web/pgperffarm/auth.py
index 5ce4593..b1d74b7 100644
--- a/web/pgperffarm/auth.py
+++ b/web/pgperffarm/auth.py
@@ -35,11 +35,12 @@ from Crypto.Hash import SHA
from Crypto import Random
import time
+
class AuthBackend(ModelBackend):
- # We declare a fake backend that always fails direct authentication -
- # since we should never be using direct authentication in the first place!
- def authenticate(self, username=None, password=None):
- raise Exception("Direct authentication not supported")
+ # We declare a fake backend that always fails direct authentication -
+ # since we should never be using direct authentication in the first place!
+ def authenticate(self, username=None, password=None):
+ raise Exception("Direct authentication not supported")
####
@@ -48,85 +49,91 @@ class AuthBackend(ModelBackend):
# Handle login requests by sending them off to the main site
def login(request):
- if request.GET.has_key('next'):
- # Put together an url-encoded dict of parameters we're getting back,
- # including a small nonce at the beginning to make sure it doesn't
- # encrypt the same way every time.
- s = "t=%s&%s" % (int(time.time()), urllib.urlencode({'r': request.GET['next']}))
- # Now encrypt it
- r = Random.new()
- iv = r.read(16)
- encryptor = AES.new(SHA.new(settings.SECRET_KEY).digest()[:16], AES.MODE_CBC, iv)
- cipher = encryptor.encrypt(s + ' ' * (16-(len(s) % 16))) # pad to 16 bytes
-
- return HttpResponseRedirect("%s?d=%s$%s" % (
- settings.PGAUTH_REDIRECT,
- base64.b64encode(iv, "-_"),
- base64.b64encode(cipher, "-_"),
- ))
- else:
- return HttpResponseRedirect(settings.PGAUTH_REDIRECT)
+ if 'next' in request.GET:
+ # Put together an url-encoded dict of parameters we're getting back,
+ # including a small nonce at the beginning to make sure it doesn't
+ # encrypt the same way every time.
+ s = "t=%s&%s" % (int(time.time()),
+ urllib.urlencode({'r': request.GET['next']}))
+ # Now encrypt it
+ r = Random.new()
+ iv = r.read(16)
+ encryptor = AES.new(SHA.new(settings.SECRET_KEY).digest()[:16],
+ AES.MODE_CBC, iv)
+ # pad to 16 bytes
+ cipher = encryptor.encrypt(s + ' ' * (16 - (len(s) % 16)))
+
+ return HttpResponseRedirect("%s?d=%s$%s" % (
+ settings.PGAUTH_REDIRECT,
+ base64.b64encode(iv, "-_"),
+ base64.b64encode(cipher, "-_"),
+ ))
+ else:
+ return HttpResponseRedirect(settings.PGAUTH_REDIRECT)
+
# Handle logout requests by logging out of this site and then
# redirecting to log out from the main site as well.
def logout(request):
- if request.user.is_authenticated():
- django_logout(request)
- return HttpResponseRedirect("%slogout/" % settings.PGAUTH_REDIRECT)
+ if request.user.is_authenticated():
+ django_logout(request)
+ return HttpResponseRedirect("%slogout/" % settings.PGAUTH_REDIRECT)
+
# Receive an authentication response from the main website and try
# to log the user in.
def auth_receive(request):
- if request.GET.has_key('s') and request.GET['s'] == "logout":
- # This was a logout request
- return HttpResponseRedirect('/')
-
- if not request.GET.has_key('i'):
- return HttpResponse("Missing IV in url!", status=400)
- if not request.GET.has_key('d'):
- return HttpResponse("Missing data in url!", status=400)
-
- # Set up an AES object and decrypt the data we received
- decryptor = AES.new(base64.b64decode(settings.PGAUTH_KEY),
- AES.MODE_CBC,
- base64.b64decode(str(request.GET['i']), "-_"))
- s = decryptor.decrypt(base64.b64decode(str(request.GET['d']), "-_")).rstrip(' ')
-
- # Now un-urlencode it
- try:
- data = urlparse.parse_qs(s, strict_parsing=True)
- except ValueError:
- return HttpResponse("Invalid encrypted data received.", status=400)
-
- # Check the timestamp in the authentication
- if (int(data['t'][0]) < time.time() - 10):
- return HttpResponse("Authentication token too old.", status=400)
-
- # Update the user record (if any)
- try:
- user = User.objects.get(username=data['u'][0])
- # User found, let's see if any important fields have changed
- changed = False
- if user.first_name != data['f'][0]:
- user.first_name = data['f'][0]
- changed = True
- if user.last_name != data['l'][0]:
- user.last_name = data['l'][0]
- changed = True
- if user.email != data['e'][0]:
- user.email = data['e'][0]
- changed= True
- if changed:
- user.save()
- except User.DoesNotExist:
- # User not found, create it!
-
- # NOTE! We have some legacy users where there is a user in
- # the database with a different userid. Instead of trying to
- # somehow fix that live, give a proper error message and
- # have somebody look at it manually.
- if User.objects.filter(email=data['e'][0]).exists():
- return HttpResponse("""A user with email %s already exists, but with
+ if 's' in request.GET and request.GET['s'] == "logout":
+ # This was a logout request
+ return HttpResponseRedirect('/')
+
+ if 'i' not in request.GET:
+ return HttpResponse("Missing IV in url!", status=400)
+ if 'd' not in request.GET:
+ return HttpResponse("Missing data in url!", status=400)
+
+ # Set up an AES object and decrypt the data we received
+ decryptor = AES.new(base64.b64decode(settings.PGAUTH_KEY),
+ AES.MODE_CBC,
+ base64.b64decode(str(request.GET['i']), "-_"))
+ s = decryptor.decrypt(base64.b64decode(str(request.GET['d']),
+ "-_")).rstrip(' ')
+
+ # Now un-urlencode it
+ try:
+ data = urlparse.parse_qs(s, strict_parsing=True)
+ except ValueError:
+ return HttpResponse("Invalid encrypted data received.", status=400)
+
+ # Check the timestamp in the authentication
+ if (int(data['t'][0]) < time.time() - 10):
+ return HttpResponse("Authentication token too old.", status=400)
+
+ # Update the user record (if any)
+ try:
+ user = User.objects.get(username=data['u'][0])
+ # User found, let's see if any important fields have changed
+ changed = False
+ if user.first_name != data['f'][0]:
+ user.first_name = data['f'][0]
+ changed = True
+ if user.last_name != data['l'][0]:
+ user.last_name = data['l'][0]
+ changed = True
+ if user.email != data['e'][0]:
+ user.email = data['e'][0]
+ changed = True
+ if changed:
+ user.save()
+ except User.DoesNotExist:
+ # User not found, create it!
+
+ # NOTE! We have some legacy users where there is a user in
+ # the database with a different userid. Instead of trying to
+ # somehow fix that live, give a proper error message and
+ # have somebody look at it manually.
+ if User.objects.filter(email=data['e'][0]).exists():
+ return HttpResponse("""A user with email %s already exists, but with
a different username than %s.
This is almost certainly caused by some legacy data in our database.
@@ -137,69 +144,69 @@ for you.
We apologize for the inconvenience.
""" % (data['e'][0], data['u'][0]), content_type='text/plain')
- user = User(username=data['u'][0],
- first_name=data['f'][0],
- last_name=data['l'][0],
- email=data['e'][0],
- password='setbypluginnotasha1',
- )
- user.save()
-
- # Ok, we have a proper user record. Now tell django that
- # we're authenticated so it persists it in the session. Before
- # we do that, we have to annotate it with the backend information.
- user.backend = "%s.%s" % (AuthBackend.__module__, AuthBackend.__name__)
- django_login(request, user)
-
- # Finally, check of we have a data package that tells us where to
- # redirect the user.
- if data.has_key('d'):
- (ivs, datas) = data['d'][0].split('$')
- decryptor = AES.new(SHA.new(settings.SECRET_KEY).digest()[:16],
- AES.MODE_CBC,
- base64.b64decode(ivs, "-_"))
- s = decryptor.decrypt(base64.b64decode(datas, "-_")).rstrip(' ')
- try:
- rdata = urlparse.parse_qs(s, strict_parsing=True)
- except ValueError:
- return HttpResponse("Invalid encrypted data received.", status=400)
- if rdata.has_key('r'):
- # Redirect address
- return HttpResponseRedirect(rdata['r'][0])
- # No redirect specified, see if we have it in our settings
- if hasattr(settings, 'PGAUTH_REDIRECT_SUCCESS'):
- return HttpResponseRedirect(settings.PGAUTH_REDIRECT_SUCCESS)
- return HttpResponse("Authentication successful, but don't know where to redirect!", status=500)
-
-
-# Perform a search in the central system. Note that the results are returned as an
-# array of dicts, and *not* as User objects. To be able to for example reference the
-# user through a ForeignKey, a User object must be materialized locally. We don't do
-# that here, as this search might potentially return a lot of unrelated users since
-# it's a wildcard match.
-# Unlike the authentication, searching does not involve the browser - we just make
-# a direct http call.
+ user = User(username=data['u'][0],
+ first_name=data['f'][0],
+ last_name=data['l'][0],
+ email=data['e'][0],
+ password='setbypluginnotasha1',
+ )
+ user.save()
+
+ # Ok, we have a proper user record. Now tell django that
+ # we're authenticated so it persists it in the session. Before
+ # we do that, we have to annotate it with the backend information.
+ user.backend = "%s.%s" % (AuthBackend.__module__, AuthBackend.__name__)
+ django_login(request, user)
+
+ # Finally, check of we have a data package that tells us where to
+ # redirect the user.
+ if 'd' in data:
+ (ivs, datas) = data['d'][0].split('$')
+ decryptor = AES.new(SHA.new(settings.SECRET_KEY).digest()[:16],
+ AES.MODE_CBC,
+ base64.b64decode(ivs, "-_"))
+ s = decryptor.decrypt(base64.b64decode(datas, "-_")).rstrip(' ')
+ try:
+ rdata = urlparse.parse_qs(s, strict_parsing=True)
+ except ValueError:
+ return HttpResponse("Invalid encrypted data received.", status=400)
+ if 'r' in rdata:
+ # Redirect address
+ return HttpResponseRedirect(rdata['r'][0])
+ # No redirect specified, see if we have it in our settings
+ if hasattr(settings, 'PGAUTH_REDIRECT_SUCCESS'):
+ return HttpResponseRedirect(settings.PGAUTH_REDIRECT_SUCCESS)
+ return HttpResponse("Authentication successful, but don't know where to "
+ "redirect!", status=500)
+
+
+# Perform a search in the central system. Note that the results are returned as
+# an array of dicts, and *not* as User objects. To be able to for example
+# reference the user through a ForeignKey, a User object must be materialized
+# locally. We don't do that here, as this search might potentially return a lot
+# of unrelated users since it's a wildcard match. Unlike the authentication,
+# searching does not involve the browser - we just make a direct http call.
def user_search(searchterm=None, userid=None):
- # If upsteam isn't responding quickly, it's not going to respond at all, and
- # 10 seconds is already quite long.
- socket.setdefaulttimeout(10)
- if userid:
- q = {'u': userid}
- else:
- q = {'s': searchterm}
-
- u = urllib.urlopen('%ssearch/?%s' % (
- settings.PGAUTH_REDIRECT,
- urllib.urlencode(q),
- ))
- (ivs, datas) = u.read().split('&')
- u.close()
-
- # Decryption time
- decryptor = AES.new(base64.b64decode(settings.PGAUTH_KEY),
- AES.MODE_CBC,
- base64.b64decode(ivs, "-_"))
- s = decryptor.decrypt(base64.b64decode(datas, "-_")).rstrip(' ')
- j = json.loads(s)
-
- return j
+ # If upsteam isn't responding quickly, it's not going to respond at all,
+ # and 10 seconds is already quite long.
+ socket.setdefaulttimeout(10)
+ if userid:
+ q = {'u': userid}
+ else:
+ q = {'s': searchterm}
+
+ u = urllib.urlopen('%ssearch/?%s' % (
+ settings.PGAUTH_REDIRECT,
+ urllib.urlencode(q),
+ ))
+ (ivs, datas) = u.read().split('&')
+ u.close()
+
+ # Decryption time
+ decryptor = AES.new(base64.b64decode(settings.PGAUTH_KEY),
+ AES.MODE_CBC,
+ base64.b64decode(ivs, "-_"))
+ s = decryptor.decrypt(base64.b64decode(datas, "-_")).rstrip(' ')
+ j = json.loads(s)
+
+ return j
diff --git a/web/pgperffarm/settings.py b/web/pgperffarm/settings.py
index 29d74dc..38fdf8a 100644
--- a/web/pgperffarm/settings.py
+++ b/web/pgperffarm/settings.py
@@ -1,3 +1,9 @@
+# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
+import os
+
+# Load local settings overrides
+from settings_local import *
+
"""
Django settings for pgperfarm project.
@@ -10,9 +16,6 @@ For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
-# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
-import os
-
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -56,7 +59,7 @@ ROOT_URLCONF = 'pgperffarm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
- 'DIRS': ['templates/',],
+ 'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
@@ -109,6 +112,3 @@ STATICFILES_DIRS = [
AUTHENTICATION_BACKENDS = (
'pgperffarm.auth.AuthBackend',
)
-
-# Load local settings overrides
-from settings_local import *
diff --git a/web/pgperffarm/urls.py b/web/pgperffarm/urls.py
index ea9962b..3b439eb 100644
--- a/web/pgperffarm/urls.py
+++ b/web/pgperffarm/urls.py
@@ -26,13 +26,14 @@ urlpatterns = [
url(r'^(?:account/)?login/?$', 'pgperffarm.auth.login'),
url(r'^(?:account/)?logout/?$', 'pgperffarm.auth.logout'),
url(r'^auth_receive/$', 'pgperffarm.auth.auth_receive'),
-
+
# Admin site
url(r'^admin/', include(admin.site.urls)),
-
+
# This should not happen in production - serve with lightty!
url(r'^static/(.*)$', 'django.views.static.serve', {
'document_root': '/static',
}),
- url(r'^favicon\.ico$', RedirectView.as_view(url='/static/favicon.ico', permanent=True))
+ url(r'^favicon\.ico$', RedirectView.as_view(url='/static/favicon.ico',
+ permanent=True))
]
diff --git a/web/pgperffarm/views.py b/web/pgperffarm/views.py
index 7fdcf34..91e1808 100644
--- a/web/pgperffarm/views.py
+++ b/web/pgperffarm/views.py
@@ -5,15 +5,18 @@ from django.template import RequestContext
import datetime
+
# Handle the static pages
def index(request):
- return render_to_response('index.html',
- context_instance=RequestContext(request))
-
+ return render_to_response('index.html',
+ context_instance=RequestContext(request))
+
+
def licence(request):
- return render_to_response('licence.html',
- context_instance=RequestContext(request))
-
+ return render_to_response('licence.html',
+ context_instance=RequestContext(request))
+
+
def ppolicy(request):
- return render_to_response('ppolicy.html',
- context_instance=RequestContext(request)) \ No newline at end of file
+ return render_to_response('ppolicy.html',
+ context_instance=RequestContext(request))