# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/.
import six import json import os import re import shutil import sys from abc import ABCMeta, abstractmethod, abstractproperty from argparse import ArgumentParser from collections import defaultdict
from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException from mozprocess import run_and_wait
here = os.path.abspath(os.path.dirname(__file__))
build = MozbuildObject.from_environment(cwd=here)
JSSHELL_NOT_FOUND = """
Could not detect a JS shell. Either make sure you have a non-artifact build with `ac_add_options --enable-js-shell` or specify it with `--binary`. """.strip()
@six.add_metaclass(ABCMeta) class Benchmark(object):
lower_is_better = True
should_alert = True
def _provision_benchmark_script(self): if os.path.isdir(self.path): return
# Some benchmarks may have been downloaded from a fetch task, make # sure they get copied over.
fetches_dir = os.environ.get("MOZ_FETCHES_DIR") if fetches_dir and os.path.isdir(fetches_dir):
fetchdir = os.path.join(fetches_dir, self.name) if os.path.isdir(fetchdir):
shutil.copytree(fetchdir, self.path)
def run(self):
self.reset()
# Update the environment variables
env = os.environ.copy()
class RunOnceBenchmark(Benchmark): def collect_results(self):
bench_total = 0 # NOTE: for this benchmark we run the test once, so we have a single value array for bench, scores in self.scores.items(): for score, values in scores.items():
test_name = "{}-{}".format(self.name, score) # pylint --py3k W1619
mean = sum(values) / len(values)
self.suite["subtests"].append({"name": test_name, "value": mean})
bench_total += int(sum(values))
self.suite["value"] = bench_total
class Ares6(Benchmark):
name = "ares6"
path = os.path.join("third_party", "webkit", "PerformanceTests", "ARES-6")
unit = "ms"
def collect_results(self): # NOTE: for this benchmark we run the test once, so we have a single value array
bench_mean = None for bench, scores in self.scores.items(): for score_name, values in scores.items():
test_name = "{}-{}".format(self.name, score_name) # pylint --py3k W1619
mean = sum(values) / len(values)
self.suite["subtests"].append(
{ "lowerIsBetter": self.subtests_lower_is_better, "name": test_name, "value": mean,
}
) if score_name == "mean":
bench_mean = mean
self.suite["value"] = bench_mean
def collect_results(self):
bench_score = None # NOTE: for this benchmark we run the test once, so we have a single value array for bench, scores in self.scores.items(): for score_name, values in scores.items():
test_name = "{}-{}".format(self.name, score_name) # pylint --py3k W1619
mean = sum(values) / len(values)
self.suite["subtests"].append({"name": test_name, "value": mean}) if score_name == "score":
bench_score = mean
self.suite["value"] = bench_score
bench = all_benchmarks.get(benchmark)(
binary, args=extra_args, shell_name=perfherder
)
res = bench.run()
if perfherder:
print("PERFHERDER_DATA: {}".format(json.dumps(bench.perfherder_data))) return res
def get_parser():
parser = ArgumentParser()
parser.add_argument( "benchmark",
choices=list(all_benchmarks),
help="The name of the benchmark to run.",
)
parser.add_argument( "-b", "--binary", default=None, help="Path to the JS shell binary to use."
)
parser.add_argument( "--arg",
dest="extra_args",
action="append",
default=None,
help="Extra arguments to pass to the JS shell.",
)
parser.add_argument( "--perfherder",
default=None,
help="Log PERFHERDER_DATA to stdout using the given suite name.",
) return parser
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.