WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 9dd7d2ed authored by Zixian Cai's avatar Zixian Cai
Browse files

Support comparing local results

parent 394d09e1
......@@ -24,13 +24,12 @@ logger = logging.getLogger(__name__)
@click.command()
@click.argument('file', type=click.Path(exists=True))
@click.argument('keywords', nargs=-1)
@click.option('--comp_remote', is_flag=True, default=False)
def local(file, keywords, comp_remote):
def local(file, comp_remote):
logger.info("Constructing a LocalRevision")
revision = LocalRevision(file)
logger.info("Running tasks specified in file")
revision.run_taskset(*keywords)
revision.run_tasksets()
logger.info("Generating report, compare remote?: {}".format(comp_remote))
report = revision.generate_report()
report_pipelines = {
......
......@@ -15,7 +15,6 @@
import logging
from mubench.models.result import Result
logger = logging.getLogger(__name__)
......@@ -28,30 +27,14 @@ class Pipeline:
class LogOutputPipeline(Pipeline):
def process(self, report):
logger.info("Report for Revision: {}".format(report.revision))
for ts, data in report.results.items():
print('-' * 8 + ' Task set %s ' % ts.name + '-' * 8)
if ts.callback['name'] == 'clock':
proc_fnc = process_clock_callback_output
else:
raise NotImplementedError(
'output processing function not defined for %(name)s callback' % ts.callback)
for task, output_d in data.items():
cb_dpts = list(map(proc_fnc, output_d))
proc_dpts = list(map(process_subproc_output, output_d))
print('%s:' % task)
print('%(name)s callback result:' % ts.callback, end=' ')
res = Result(cb_dpts)
print('%.9fs (± %.9fs)' % (res.mean, res.std))
print('subprocess stat:', end=' ')
res = Result(proc_dpts)
print('%.9fs (± %.9fs)' % (res.mean, res.std))
print()
return report
def process_subproc_output(output_d):
return output_d['t_proc']
def process_clock_callback_output(output_d):
return float(output_d['stdout'].split()[-1]) # catch only the last line.
for ts in report.tasksets:
logger.info("TaskSet {}:".format(ts.name))
for task_name, result in ts.results.items():
logger.info("\n".join([
"Task {}:".format(task_name),
"Measurement inside programs:",
"{}".format(result.callback),
"",
"Measurement including startup time:",
"{}".format(result.t_proc)
]))
......@@ -19,7 +19,20 @@ class Report:
Collection of results corresponding to a Revision
"""
def __init__(self, results, **kwargs):
self.results = results
def __init__(self, tasksets=None, **kwargs):
if tasksets:
self.tasksets = tasksets
else:
self.tasksets = []
for k in kwargs:
setattr(self, k, kwargs[k])
self.compare()
def compare(self):
for ts in self.tasksets:
for comp in ts.comparison:
op1 = comp.op1
op2 = comp.op2
ts.results[op1].callback.compare(ts.results[op2].callback)
ts.results[op1].t_proc.compare(ts.results[op2].t_proc)
......@@ -23,20 +23,41 @@ class Result:
It can be specified by a combination of Revision and Task.
"""
def __init__(self, data_points):
def __init__(self, data_points, name):
self.name = name
self.raw = data_points
self.mean = mean(data_points)
self.median = median(data_points)
self.std = std(data_points)
self.description = [
"Mean: {0:.9f}".format(self.mean),
"Median: {0:.9f}".format(self.median),
"Std: {0:.9f}".format(self.std)
]
def compare(self, prev_res):
prev_mean = prev_res.mean
decreased = significant_decrease(prev_mean, self.raw)
increased = significant_increase(prev_mean, self.raw)
changed = significant_change(prev_mean, self.raw)
return decreased, changed, increased
if self.mean < prev_mean:
ratio = "{0:.2f} times faster".format(prev_mean / self.mean)
else:
ratio = "{0:.2f} times slower".format(self.mean / prev_mean)
self.description.append(
"Compared with {}, {:.4%}, {}".format(
prev_res.name,
self.mean / prev_mean,
ratio
))
if decreased:
self.description.append("Significantly decreased")
if changed:
self.description.append("Significantly changed")
if increased:
self.description.append("Significantly increased")
if not any([decreased, changed, increased]):
self.description.append("Not enough evidence to determine change")
def __str__(self):
return "Mean: {}\nMedian: {}\nStd: {}".format(self.mean,
self.median,
self.std)
return "\n".join(self.description)
......@@ -63,27 +63,16 @@ class LocalRevision(Revision):
super().__init__(**kwargs)
self.file = file
self.tasksets = TaskSet.from_file(self.file)
self.results = {}
def run_taskset(self, *keywords):
def run_tasksets(self):
"""
Run tasks and store the result as attribute
"""
"""
self.results is in the form:
{TaskSet: {
taskname: [{
'stdout': str,
'stderr': str,
't_proc': float}]
}
}
"""
for ts in self.tasksets:
self.results[ts] = ts.run(*keywords)
ts.run()
def generate_report(self):
return Report(revision=self, results=self.results)
return Report(tasksets=self.tasksets, revision=self)
def __str__(self):
return "LocalRevision({})".format(self.file)
......@@ -17,8 +17,9 @@ import logging
from mubench import SUITE_DIR
from mubench.lang import get_lang
from mubench.util import expandenv
from mubench.util import dictify
from mubench.models.result import Result
from types import SimpleNamespace
logger = logging.getLogger(__name__)
......@@ -36,7 +37,8 @@ class Task:
self.env.update(conf.get('environ', {}))
self.env['MUBENCH_TASK_NAME'] = name
self.output_dir = taskset.output_dir
self.data_callback = []
self.data_t_proc = []
# benchmark
self.benchmark = taskset.benchmark
......@@ -78,8 +80,22 @@ class Task:
return res
def add_datapoint(self, stdout, stderr, t_proc):
pass
self.data_callback.append(float(stdout.split()[-1]))
self.data_t_proc.append(float(t_proc))
def aggregate_datapoint(self):
self.result_callback = Result(self.data_callback,
"{}:{} callback".format(self.taskset.name,
self.name)
)
self.result_t_proc = Result(self.data_t_proc,
"{}:{} t_proc".format(self.taskset.name,
self.name)
)
def get_result(self):
return SimpleNamespace(callback=self.result_callback,
t_proc=self.result_t_proc)
def __str__(self):
return self.name
......@@ -51,11 +51,11 @@ class TaskSet:
self.comparison = []
# environ
self.env = os.environ.copy() # base on os.environ
self.env.update(getattr(settings, 'ENVIRON', {})) # local settings
self.env['MUBENCH_TASKSET_NAME'] = name # taskset name
ts_env = kwds['env'] # taskset definitions
for v in ts_env: # first expand the environs (based on what's been defined so far)
self.env = os.environ.copy() # base on os.environ
self.env.update(getattr(settings, 'ENVIRON', {})) # local settings
self.env['MUBENCH_TASKSET_NAME'] = name # taskset name
ts_env = kwds['env'] # taskset definitions
for v in ts_env: # first expand the environs (based on what's been defined so far)
ts_env[v] = expandenv(ts_env[v], self.env)
self.env.update(ts_env)
......@@ -135,7 +135,11 @@ class TaskSet:
with self.resfile.open('w') as fp:
json.dump(record, fp, indent=2, separators=(', ', ': '))
self.set_result(data)
for task in tasks:
task.aggregate_datapoint()
self.results = {task.name: task.get_result() for task in tasks}
return tasks
@staticmethod
def from_yaml(yaml_s, run_dir):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment