Skip to content

Commit fad356a

Browse files
Added io stats to the backend
1 parent 859d202 commit fad356a

File tree

1 file changed

+66
-4
lines changed

1 file changed

+66
-4
lines changed

flask-backend/app.py

Lines changed: 66 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -487,8 +487,8 @@ def get_btree_bloat_csv():
487487
@app.route('/table_info/csv', methods=['GET'])
488488
def get_table_info_csv():
489489
"""
490-
Get comprehensive table information including bloat metrics and detailed size information as a CSV table.
491-
Combines pg_table_bloat and table_size_detailed metrics for complete table analysis.
490+
Get comprehensive table information including bloat metrics, detailed size information, and I/O statistics as a CSV table.
491+
Combines pg_table_bloat, table_size_detailed, and pg_statio_all_tables metrics for complete table analysis.
492492
"""
493493
try:
494494
# Get query parameters
@@ -514,7 +514,7 @@ def get_table_info_csv():
514514
filter_str = '{' + ','.join(filters) + '}' if filters else ''
515515

516516
# Metrics to fetch with last_over_time to get only the most recent value
517-
# Include both bloat metrics and detailed size metrics
517+
# Include bloat metrics, detailed size metrics, and I/O metrics
518518
metric_queries = [
519519
# Bloat metrics
520520
f'last_over_time(pgwatch_pg_table_bloat_real_size_mib{filter_str}[1d])',
@@ -535,6 +535,15 @@ def get_table_info_csv():
535535
f'last_over_time(pgwatch_table_size_detailed_toast_indexes_size_b{filter_str}[1d])',
536536
f'last_over_time(pgwatch_table_size_detailed_total_relation_size_b{filter_str}[1d])',
537537
f'last_over_time(pgwatch_table_size_detailed_total_toast_size_b{filter_str}[1d])',
538+
# I/O metrics
539+
f'last_over_time(pgwatch_pg_statio_all_tables_heap_blks_read{filter_str}[1d])',
540+
f'last_over_time(pgwatch_pg_statio_all_tables_heap_blks_hit{filter_str}[1d])',
541+
f'last_over_time(pgwatch_pg_statio_all_tables_idx_blks_read{filter_str}[1d])',
542+
f'last_over_time(pgwatch_pg_statio_all_tables_idx_blks_hit{filter_str}[1d])',
543+
f'last_over_time(pgwatch_pg_statio_all_tables_toast_blks_read{filter_str}[1d])',
544+
f'last_over_time(pgwatch_pg_statio_all_tables_toast_blks_hit{filter_str}[1d])',
545+
f'last_over_time(pgwatch_pg_statio_all_tables_tidx_blks_read{filter_str}[1d])',
546+
f'last_over_time(pgwatch_pg_statio_all_tables_tidx_blks_hit{filter_str}[1d])',
538547
]
539548

540549
prom = get_prometheus_client()
@@ -595,10 +604,58 @@ def get_table_info_csv():
595604
metric_results[key]['total_relation_size_mib'] = value / (1024 * 1024)
596605
elif 'total_toast_size_b' in query:
597606
metric_results[key]['total_toast_size_mib'] = value / (1024 * 1024)
607+
608+
# I/O metrics
609+
elif 'heap_blks_read' in query:
610+
metric_results[key]['heap_blks_read'] = int(value)
611+
elif 'heap_blks_hit' in query:
612+
metric_results[key]['heap_blks_hit'] = int(value)
613+
elif 'idx_blks_read' in query:
614+
metric_results[key]['idx_blks_read'] = int(value)
615+
elif 'idx_blks_hit' in query:
616+
metric_results[key]['idx_blks_hit'] = int(value)
617+
elif 'toast_blks_read' in query:
618+
metric_results[key]['toast_blks_read'] = int(value)
619+
elif 'toast_blks_hit' in query:
620+
metric_results[key]['toast_blks_hit'] = int(value)
621+
elif 'tidx_blks_read' in query:
622+
metric_results[key]['tidx_blks_read'] = int(value)
623+
elif 'tidx_blks_hit' in query:
624+
metric_results[key]['tidx_blks_hit'] = int(value)
598625
except Exception as e:
599626
logger.warning(f"Failed to query: {query}, error: {e}")
600627
continue
601628

629+
# Calculate I/O hit ratios
630+
for key, row in metric_results.items():
631+
# Heap hit ratio
632+
heap_total = row.get('heap_blks_read', 0) + row.get('heap_blks_hit', 0)
633+
if heap_total > 0:
634+
row['heap_hit_ratio'] = round(row.get('heap_blks_hit', 0) / heap_total * 100, 2)
635+
else:
636+
row['heap_hit_ratio'] = 0.0
637+
638+
# Index hit ratio
639+
idx_total = row.get('idx_blks_read', 0) + row.get('idx_blks_hit', 0)
640+
if idx_total > 0:
641+
row['idx_hit_ratio'] = round(row.get('idx_blks_hit', 0) / idx_total * 100, 2)
642+
else:
643+
row['idx_hit_ratio'] = 0.0
644+
645+
# TOAST hit ratio
646+
toast_total = row.get('toast_blks_read', 0) + row.get('toast_blks_hit', 0)
647+
if toast_total > 0:
648+
row['toast_hit_ratio'] = round(row.get('toast_blks_hit', 0) / toast_total * 100, 2)
649+
else:
650+
row['toast_hit_ratio'] = 0.0
651+
652+
# TOAST index hit ratio
653+
tidx_total = row.get('tidx_blks_read', 0) + row.get('tidx_blks_hit', 0)
654+
if tidx_total > 0:
655+
row['tidx_hit_ratio'] = round(row.get('tidx_blks_hit', 0) / tidx_total * 100, 2)
656+
else:
657+
row['tidx_hit_ratio'] = 0.0
658+
602659
# Prepare CSV output
603660
output = io.StringIO()
604661
fieldnames = [
@@ -610,7 +667,12 @@ def get_table_info_csv():
610667
'table_main_size_mib', 'table_fsm_size_mib', 'table_vm_size_mib',
611668
'table_indexes_size_mib', 'toast_main_size_mib', 'toast_fsm_size_mib',
612669
'toast_vm_size_mib', 'toast_indexes_size_mib', 'total_relation_size_mib',
613-
'total_toast_size_mib'
670+
'total_toast_size_mib',
671+
# I/O metrics
672+
'heap_blks_read', 'heap_blks_hit', 'heap_hit_ratio',
673+
'idx_blks_read', 'idx_blks_hit', 'idx_hit_ratio',
674+
'toast_blks_read', 'toast_blks_hit', 'toast_hit_ratio',
675+
'tidx_blks_read', 'tidx_blks_hit', 'tidx_hit_ratio'
614676
]
615677
writer = csv.DictWriter(output, fieldnames=fieldnames)
616678
writer.writeheader()

0 commit comments

Comments
 (0)