summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Wong2017-07-18 20:55:51 +0000
committerMark Wong2017-08-04 04:51:25 +0000
commit429a5995d592d6b3fd26354ea8e31cfac387af3f (patch)
tree0aec1bff7502f6f75b37b96664cabd02356fffeb
parent984e2c99d25d0d56e443867e9707d393adacf14f (diff)
Pythonic way to check for None
Also check to make sure files exist before trying to collect them.
-rw-r--r--client/collectors/postgres.py30
1 files changed, 10 insertions, 20 deletions
diff --git a/client/collectors/postgres.py b/client/collectors/postgres.py
index 3d4fc37..319fe40 100644
--- a/client/collectors/postgres.py
+++ b/client/collectors/postgres.py
@@ -96,7 +96,7 @@ def run_collector(in_queue, out_queue, dbname, interval=1.0):
'FROM pg_stat_bgwriter')
# on the first iteration, construct the CSV files
- if not bgwriter_log:
+ if bgwriter_log is None:
fields = [desc[0] for desc in cur.description]
bgwriter_log = csv.DictWriter(open('bgwriter.csv', 'w'), fields)
bgwriter_log.writeheader()
@@ -113,7 +113,7 @@ def run_collector(in_queue, out_queue, dbname, interval=1.0):
'USING (relid, schemaname, relname)')
# on the first iteration, construct the CSV files
- if not tables_log:
+ if tables_log is None:
fields = [desc[0] for desc in cur.description]
tables_log = csv.DictWriter(open('tables.csv', 'w'), fields)
tables_log.writeheader()
@@ -127,7 +127,7 @@ def run_collector(in_queue, out_queue, dbname, interval=1.0):
'indexrelname)')
# on the first iteration, construct the CSV files
- if not indexes_log:
+ if indexes_log is None:
fields = [desc[0] for desc in cur.description]
indexes_log = csv.DictWriter(open('indexes.csv', 'w'), fields)
indexes_log.writeheader()
@@ -139,7 +139,7 @@ def run_collector(in_queue, out_queue, dbname, interval=1.0):
'FROM pg_stat_database')
# on the first iteration, construct the CSV files
- if not database_log:
+ if database_log is None:
fields = [desc[0] for desc in cur.description]
database_log = csv.DictWriter(open('database.csv', 'w'), fields)
database_log.writeheader()
@@ -158,23 +158,13 @@ def run_collector(in_queue, out_queue, dbname, interval=1.0):
result = {}
- with open('bgwriter.csv', 'r') as f:
- result.update({'bgwriter': f.read()})
+ for file in ['bgwriter', 'tables', 'indexes', 'database']:
+ if os.path.isfile(''.join([file, '.csv'])):
+ with open(''.join([file, '.csv']), 'r') as f:
+ result.update({file : f.read()})
- with open('tables.csv', 'r') as f:
- result.update({'tables': f.read()})
-
- with open('indexes.csv', 'r') as f:
- result.update({'indexes': f.read()})
-
- with open('database.csv', 'r') as f:
- result.update({'database': f.read()})
-
- # remove the files
- os.remove('bgwriter.csv')
- os.remove('tables.csv')
- os.remove('indexes.csv')
- os.remove('database.csv')
+ # remove the files
+ os.remove(''.join([file, '.csv']))
out_queue.put(result)