Commit ce635e3f authored by Zixian Cai's avatar Zixian Cai

Fix pipelines for new tasksets attributes

parent d114b16b
......@@ -41,5 +41,4 @@ def local(file, pipeline, skip_compile, comp_remote):
logger.info("Running tasks specified in file")
revision.run_tasksets(skip_compile)
logger.info("Generating report, compare remote?: {}".format(comp_remote))
report = revision.generate_report()
go_through_pipelines(report, pipeline_names=pipeline.split(","))
go_through_pipelines(revision.tasksets, pipeline_names=pipeline.split(","))
......@@ -15,6 +15,7 @@
import logging
import sys
import crayons
......@@ -27,22 +28,21 @@ class Pipeline:
class LogOutputPipeline(Pipeline):
def process(self, report):
logger.info("Report for Revision: {}".format(report.revision))
for ts in report.tasksets:
description = [""]
def process(self, tasksets):
for ts in tasksets:
description = ["", ""]
for task_name, result in ts.results.items():
description.append("\n{}\n{}".format(
result.callback,
result.t_proc))
description.append("")
# task_name: str, "c_O3"
# result: dict<str, Trails>
description.append("\n".join([str(x) for x in result.values()]))
description.append("")
for comp in ts.comparison:
r1 = ts.results[comp.op1]
r2 = ts.results[comp.op2]
description.append(self.compare(r1.callback, r2.callback))
description.append(self.compare(r1.t_proc, r2.t_proc))
r1 = ts.results[comp.op1] # dict<str, Trails>
r2 = ts.results[comp.op2] # dict<str, Trails>
for metric in r1.keys():
description.append(self.compare(r1[metric], r2[metric]))
logger.info("\n".join(description))
return report
return tasksets
@staticmethod
def compare(r1, r2):
......@@ -73,86 +73,73 @@ class LogOutputPipeline(Pipeline):
class BarplotPipeline(Pipeline):
def process(self, report):
def autolabel(rects, yerrs, up=True):
for rect, yerr in zip(rects, yerrs):
def process(self, tasksets):
def autolabel(rects):
for rect in rects:
height = rect.get_height()
xpostion = rect.get_x() + rect.get_width() / 2
if up:
ypostion = height + 1.36 * yerr
ax.text(xpostion,
ypostion,
"{:.3f}".format(height),
ha='center', va='bottom')
else:
ypostion = height - 1.36 * yerr
ax.text(xpostion,
ypostion,
"{:.3f}".format(height),
ha='center', va='top')
ypostion = 1.05 * height
ax.text(xpostion,
ypostion,
"{:.3f}".format(height),
ha='center', va='bottom')
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
for idx, ts in enumerate(report.tasksets):
items = sorted(ts.results.items(), key=lambda x: x[1].callback.mean)
n = len(items)
callback_means = [x[1].callback.mean for x in items]
callback_stds = [x[1].callback.std for x in items]
t_proc_means = [x[1].t_proc.mean for x in items]
t_proc_stds = [x[1].t_proc.std for x in items]
labels = [x[0] for x in items]
ind = np.arange(n)
width = 0.35
fig, ax = plt.subplots()
rects2 = ax.bar(ind, t_proc_means, width,
color='lightblue',
yerr=t_proc_stds,
capsize=3)
rects1 = ax.bar(ind, callback_means, width,
color='pink',
yerr=callback_stds,
capsize=3)
ax.set_ylabel('Time')
ax.set_title(ts.name)
ax.set_xticks(ind)
ax.set_xticklabels(labels)
ax.legend((rects1[0], rects2[0]), ('Callback', 'T_proc'))
autolabel(rects1, callback_stds, up=False)
autolabel(rects2, t_proc_stds)
for idx, ts in enumerate(tasksets):
tasks = list(ts.results.items()) # [(str, dict)]
if not tasks:
logger.error("No task")
sys.exit(1)
keys = tasks[0][1].keys()
for key in keys:
items = sorted([v[key] for k, v in ts.results.items()],
key=lambda x: x.mean)
n = len(items)
means = [x.mean for x in items]
stds = [x.std for x in items]
labels = [x.name for x in items]
ind = np.arange(n)
width = 0.35
fig, ax = plt.subplots()
rects = ax.bar(ind, means, width,
color='lightblue',
yerr=stds,
capsize=3)
ax.set_ylabel(key)
ax.set_title(ts.name)
ax.set_xticks(ind)
ax.set_xticklabels(labels)
autolabel(rects)
plt.show()
return report
return tasksets
class BoxplotPipeline(Pipeline):
def process(self, report):
def process(self, tasksets):
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
for idx, ts in enumerate(report.tasksets):
plt.figure()
items = sorted(ts.results.items(), key=lambda x: x[1].callback.mean)
vectors = []
labels = []
for x in items:
vectors.append(x[1].callback.raw)
vectors.append(x[1].t_proc.raw)
labels.append("\n".join(x[1].callback.name.split()))
labels.append("\n".join(x[1].t_proc.name.split()))
bplot = plt.boxplot(vectors, labels=labels, patch_artist=True)
boxes = bplot["boxes"]
for idx_box, box in enumerate(boxes):
if idx_box % 2 == 0:
box.set_facecolor("pink")
else:
box.set_facecolor("lightblue")
for idx, ts in enumerate(tasksets):
tasks = list(ts.results.items()) # [(str, dict)]
if not tasks:
logger.error("No task")
sys.exit(1)
keys = tasks[0][1].keys()
for key in keys:
plt.figure()
items = sorted([v[key] for k, v in ts.results.items()],
key=lambda x: x.mean)
labels = [x.name for x in items]
vectors = [x.raw for x in items]
bplot = plt.boxplot(vectors, labels=labels, patch_artist=True)
plt.show()
return report
return tasksets
pipelines = {
......
#!/usr/bin/env python3
# Copyright 2017 The Australian National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Report:
"""
Collection of results corresponding to a Revision
"""
def __init__(self, tasksets=None, **kwargs):
if tasksets:
self.tasksets = tasksets
else:
self.tasksets = []
for k in kwargs:
setattr(self, k, kwargs[k])
self.compare()
def compare(self):
for ts in self.tasksets:
for comp in ts.comparison:
op1 = comp.op1
op2 = comp.op2
ts.results[op1].callback.compare(ts.results[op2].callback)
ts.results[op1].t_proc.compare(ts.results[op2].t_proc)
......@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from mubench.models.report import Report
from mubench.models.taskset import load_file
......@@ -40,20 +39,6 @@ class Revision:
"""
pass
def generate_report(self):
"""
Generate result from history data points and store it in database
:return:
"""
pass
def get_report(self):
"""
Fetch generated report from database
:return:
"""
pass
class LocalRevision(Revision):
def __init__(self, file, **kwargs):
......@@ -86,8 +71,5 @@ class LocalRevision(Revision):
skipcomp_d[tup[0]] = tup[1].split(',')
return skipcomp_d
def generate_report(self):
return Report(tasksets=self.tasksets, revision=self)
def __str__(self):
return "LocalRevision({})".format(self.file)
......@@ -28,8 +28,9 @@ from mubench.conf import settings
from mubench.util import expandenv, dictify, run_in_subproc, ExecutionFailure
from mubench.util import add_path_to_ld_library_path
from mubench.lang import get_lang, Language
from mubench.models.result import Result
from mubench.models.trails import Trails
from mubench.models.callback import CALLBACK_BY_NAME
from collections import defaultdict
logger = logging.getLogger(__name__)
......@@ -153,8 +154,8 @@ class TaskSet:
for task in self.tasks:
if not task.lang_cls.compiled: # interpreted
targets[task] = task.srcfile
else: # need compilation
if task.name in skipcomp_l: # skip compilation -> assume default target
else: # need compilation
if task.name in skipcomp_l: # skip compilation -> assume default target
targets[task] = task.get_default_target()
else:
task_print(task.name, 'compiling...')
......@@ -168,7 +169,8 @@ class TaskSet:
errlog_file = self.output_dir / (task.name + '.log')
e.dump(errlog_file)
task_print(task.name,
crayons.red('error output written to %s' % errlog_file))
crayons.red(
'error output written to %s' % errlog_file))
# run
data = {t: [] for t in targets} # only run tasks that have a target
......@@ -279,8 +281,15 @@ class Task:
self.stats.append(stat_d)
def get_result(self):
name = "{}:{} callback".format(self.taskset.name, self.name)
return Result(self.stats, name)
result = defaultdict(list)
for pair in self.stats:
for k, v in pair.items():
result[k].append(v)
result = {k: Trails("{}:{} {}".format(self.taskset.name,
self.name,
k), v)
for k, v in result.items()}
return result
def __str__(self):
return self.name
......
......@@ -18,19 +18,19 @@ from mubench.stats import mean, median, std, significant_change, \
significant_decrease, significant_increase
class Result:
class Trails:
"""
A result consists of raw data and our analysis.
Trails consists of raw data points and our analysis.
It can be specified by a combination of Revision and Task.
"""
def __init__(self, data_points, name):
def __init__(self, name, data_points):
self.name = name
self.raw = data_points
self.mean = mean(data_points)
self.median = median(data_points)
self.std = std(data_points)
self.description = "{}: {:.9f} (±{:.9f})".format(
self.description = "{} {:.9f} (±{:.9f})".format(
crayons.blue(self.name),
self.mean,
self.std
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment