Skip to content

Commit

Permalink
Updated output of several use cases
Browse files Browse the repository at this point in the history
  • Loading branch information
perdelt committed Jul 12, 2024
1 parent 574085b commit c2e0da4
Show file tree
Hide file tree
Showing 7 changed files with 700 additions and 13 deletions.
4 changes: 4 additions & 0 deletions bexhoma/configurations.py
Original file line number Diff line number Diff line change
Expand Up @@ -741,6 +741,10 @@ def start_monitoring(self, app='', component='monitoring', experiment='', config
#if not os.path.isfile(self.yamlfolder+self.deployment):
name = self.create_monitoring(app, component, experiment, configuration)
name_sut = self.create_monitoring(app, 'sut', experiment, configuration)
if self.experiment.cluster.monitor_cluster_active:
print("{:30s}: wants to monitor all components in cluster".format(configuration))
if not self.experiment.cluster.monitor_cluster_exists:
print("{:30s}: cannot rely on preinstalled monitoring".format(configuration))
print("{:30s}: start monitoring with prometheus pod".format(configuration))
deployment_experiment = self.experiment.path+'/{name}.yml'.format(name=name)
with open(self.experiment.cluster.yamlfolder+deployment) as stream:
Expand Down
5 changes: 3 additions & 2 deletions bexhoma/evaluators.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,11 +432,12 @@ def transform_monitoring_results(self, component="loading"):
monitor.metrics.saveMetricsDataframe(self.path+"/"+filename, df_all)
def get_monitoring_metric(self, metric, component="loading"):
"""
Returns list of names of metrics using during monitoring.
Returns DataFrame containing metrics measured from a specific component.
:return: List of monitoring metrics
:return: DataFrame of monitoring metrics
"""
filename = '/query_{component}_metric_{metric}.csv'.format(component=component, metric=metric)
#print("Looking for {}".format(filename))
if os.path.isfile(self.path+"/"+filename):
df = pd.read_csv(self.path+"/"+filename).T
#print(df)
Expand Down
45 changes: 42 additions & 3 deletions bexhoma/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -1239,17 +1239,38 @@ def show_summary(self):
print(info)
evaluate = inspector.inspector(resultfolder)
evaluate.load_experiment(code=code, silent=True)
query_properties = evaluate.get_experiment_query_properties()
#print(query_properties)
def map_index_to_queryname(numQuery):
if numQuery[1:] in query_properties and 'config' in query_properties[numQuery[1:]] and 'title' in query_properties[numQuery[1:]]['config']:
return query_properties[numQuery[1:]]['config']['title']
else:
return numQuery
#####################
print("\n### Errors (failed queries)")
print(evaluate.get_total_errors().T)
df = evaluate.get_total_errors().T
num_errors = df.sum().sum()
if num_errors > 0:
df.index = df.index.map(map_index_to_queryname)
print(df)
else:
print("No errors")
#####################
print("\n### Warnings (result mismatch)")
print(evaluate.get_total_warnings().T)
df = evaluate.get_total_warnings().T
num_warnings = df.sum().sum()
if num_warnings > 0:
df.index = df.index.map(map_index_to_queryname)
print(df)
else:
print("No warnings")
#####################
print("\n### Latency of Timer Execution [ms]")
df = evaluate.get_aggregated_query_statistics(type='latency', name='execution', query_aggregate='Mean')
if not df is None:
print(df.sort_index().T.round(2))
df = df.sort_index().T.round(2)
df.index = df.index.map(map_index_to_queryname)
print(df)
#####################
print("\n### Loading [s]")
times = {}
Expand Down Expand Up @@ -1563,6 +1584,12 @@ def evaluate_results(self, pod_dashboard=''):
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct stream -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct loader -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct benchmarker -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
# copy logs and yamls to result folder
#print("Copy configuration and logs", end="", flush=True)
#directory = os.fsencode(self.path)
Expand Down Expand Up @@ -1923,6 +1950,12 @@ def evaluate_results(self, pod_dashboard=''):
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct stream -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct loader -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct benchmarker -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
cmd = {}
#stdout = self.experiment.cluster.kubectl('cp --container dashboard '+self.path+'/connections.config '+pod_dashboard+':/results/'+str(self.code)+'/connections.config')
#self.logger.debug('copy config connections.config: {}'.format(stdout))
Expand Down Expand Up @@ -2135,6 +2168,12 @@ def evaluate_results(self, pod_dashboard=''):
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct stream -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct loader -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
cmd['transform_benchmarking_metrics'] = 'python metrics.evaluation.py -r /results/ -db -ct benchmarker -e {}'.format(self.code)
stdin, stdout, stderr = self.cluster.execute_command_in_pod(command=cmd['transform_benchmarking_metrics'], pod=pod_dashboard, container="dashboard")
self.cluster.logger.debug(stdout)
cmd = {}
cmd['download_results'] = 'cp {from_file} {to} -c dashboard'.format(from_file=pod_dashboard+':/results/'+str(self.code)+'/', to=self.path+"/")
self.cluster.kubectl(cmd['download_results'])
Expand Down
Loading

0 comments on commit c2e0da4

Please sign in to comment.