#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/.
import math import os import platform import posixpath import shlex import subprocess import sys import traceback
read_input = input if sys.version_info.major == 2:
read_input = raw_input
def add_tests_dir_to_path(): from os.path import dirname, exists, join, realpath
# Don't present a choice if there are too many tests if job_count > max_items: raise Exception("Too many jobs.")
for i, job in enumerate(jobs, 1):
print("{}) {}".format(i, display(job)))
item = read_input("Which one:\n") try:
item = int(item) if item > job_count or item < 1: raise Exception("Input isn't between 1 and {}".format(job_count)) except ValueError: raise Exception("Unrecognized input")
return jobs[item - 1]
def main(argv): # The [TESTS] optional arguments are paths of test files relative # to the jit-test/tests directory. import argparse
op = argparse.ArgumentParser(description="Run jit-test JS shell tests")
op.add_argument( "-s", "--show-cmd",
dest="show_cmd",
action="store_true",
help="show js shell command run",
)
op.add_argument( "-f", "--show-failed-cmd",
dest="show_failed",
action="store_true",
help="show command lines of failed tests",
)
op.add_argument( "-o", "--show-output",
dest="show_output",
action="store_true",
help="show output from js shell",
)
op.add_argument( "-F", "--failed-only",
dest="failed_only",
action="store_true",
help="if --show-output is given, only print output for"" failed tests",
)
op.add_argument( "--no-show-failed",
dest="no_show_failed",
action="store_true",
help="don't print output for failed tests"" (no-op with --show-output)",
)
op.add_argument( "-x", "--exclude",
dest="exclude",
default=[],
action="append",
help="exclude given test dir or path",
)
op.add_argument( "--exclude-from",
dest="exclude_from",
type=str,
help="exclude each test dir or path in FILE",
)
op.add_argument( "--slow",
dest="run_slow",
action="store_true",
help="also run tests marked as slow",
)
op.add_argument( "--no-slow",
dest="run_slow",
action="store_false",
help="do not run tests marked as slow (the default)",
)
op.add_argument( "-t", "--timeout",
dest="timeout",
type=float,
default=150.0,
help="set test timeout in seconds",
)
op.add_argument( "--no-progress",
dest="hide_progress",
action="store_true",
help="hide progress bar",
)
op.add_argument( "--tinderbox",
dest="format",
action="store_const",
const="automation",
help="Use automation-parseable output format",
)
op.add_argument( "--format",
dest="format",
default="none",
choices=("automation", "none"),
help="Output format (default %(default)s).",
)
op.add_argument( "--args",
dest="shell_args",
metavar="ARGS",
default="",
help="extra args to pass to the JS shell",
)
op.add_argument( "--feature-args",
dest="feature_args",
metavar="ARGS",
default="",
help="even more args to pass to the JS shell " "(for compatibility with jstests.py)",
)
op.add_argument( "-w", "--write-failures",
dest="write_failures",
metavar="FILE",
help="Write a list of failed tests to [FILE]",
)
op.add_argument( "-C", "--check-output",
action="store_true",
dest="check_output",
help="Run tests to check output for different jit-flags",
)
op.add_argument( "-r", "--read-tests",
dest="read_tests",
metavar="FILE",
help="Run test files listed in [FILE]",
)
op.add_argument( "-R", "--retest",
dest="retest",
metavar="FILE",
help="Retest using test list file [FILE]",
)
op.add_argument( "-g", "--debug",
action="store_const",
const="gdb",
dest="debugger",
help="Run a single test under the gdb debugger",
)
op.add_argument( "-G", "--debug-rr",
action="store_const",
const="rr",
dest="debugger",
help="Run a single test under the rr debugger",
)
op.add_argument( "--debugger", type=str, help="Run a single test under the specified debugger"
)
op.add_argument( "--valgrind",
dest="valgrind",
action="store_true",
help="Enable the |valgrind| flag, if valgrind is in $PATH.",
)
op.add_argument( "--unusable-error-status",
action="store_true",
help="Ignore incorrect exit status on tests that should return nonzero.",
)
op.add_argument( "--valgrind-all",
dest="valgrind_all",
action="store_true",
help="Run all tests with valgrind, if valgrind is in $PATH.",
)
op.add_argument( "--write-failure-output",
dest="write_failure_output",
action="store_true",
help="With --write-failures=FILE, additionally write the" " output of failed tests to [FILE]",
)
op.add_argument( "--jitflags",
dest="jitflags",
default="none",
choices=valid_jitflags(),
help="IonMonkey option combinations (default %(default)s).",
)
op.add_argument( "--ion",
dest="jitflags",
action="store_const",
const="ion",
help="Run tests once with --ion-eager and once with" " --baseline-eager (equivalent to --jitflags=ion)",
)
op.add_argument( "--no-xdr",
dest="use_xdr",
action="store_false",
help="Whether to disable caching of self-hosted parsed content in XDR format.",
)
op.add_argument( "--tbpl",
dest="jitflags",
action="store_const",
const="all",
help="Run tests with all IonMonkey option combinations" " (equivalent to --jitflags=all)",
)
op.add_argument( "-j", "--worker-count",
dest="max_jobs",
type=int,
default=max(1, get_cpu_count()),
help="Number of tests to run in parallel (default %(default)s).",
)
op.add_argument( "--remote", action="store_true", help="Run tests on a remote device"
)
op.add_argument( "--deviceIP",
action="store",
type=str,
dest="device_ip",
help="IP address of remote device to test",
)
op.add_argument( "--devicePort",
action="store",
type=int,
dest="device_port",
default=20701,
help="port of remote device to test",
)
op.add_argument( "--deviceSerial",
action="store",
type=str,
dest="device_serial",
default=None,
help="ADB device serial number of remote device to test",
)
op.add_argument( "--remoteTestRoot",
dest="remote_test_root",
action="store",
type=str,
default="/data/local/tmp/test_root",
help="The remote directory to use as test root"" (e.g. %(default)s)",
)
op.add_argument( "--localLib",
dest="local_lib",
action="store",
type=str,
help="The location of libraries to push -- preferably"" stripped",
)
op.add_argument( "--repeat", type=int, default=1, help="Repeat tests the given number of times."
)
op.add_argument("--this-chunk", type=int, default=1, help="The test chunk to run.")
op.add_argument( "--total-chunks", type=int, default=1, help="The total number of test chunks."
)
op.add_argument( "--ignore-timeouts",
dest="ignore_timeouts",
metavar="FILE",
help="Ignore timeouts of tests listed in [FILE]",
)
op.add_argument( "--retry-remote-timeouts",
dest="timeout_retry",
type=int,
default=1,
help="Number of time to retry timeout on remote devices",
)
op.add_argument( "--test-reflect-stringify",
dest="test_reflect_stringify",
help="instead of running tests, use them to test the " "Reflect.stringify code in specified file",
) # --enable-webrender is ignored as it is not relevant for JIT # tests, but is required for harness compatibility.
op.add_argument( "--enable-webrender",
action="store_true",
dest="enable_webrender",
default=False,
help=argparse.SUPPRESS,
)
op.add_argument("js_shell", metavar="JS_SHELL", help="JS shell to run tests with")
op.add_argument( "-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
)
op.add_argument( "--show-slow",
action="store_true",
help="Show tests taking longer than a minimum time (in seconds).",
)
op.add_argument( "--slow-test-threshold",
type=float,
default=5.0,
help="Time in seconds a test can take until it is considered slow " "(default %(default)s).",
)
ifnot (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)): if (
platform.system() != "Windows" or os.path.isfile(js_shell) ornot os.path.isfile(js_shell + ".exe") ornot os.access(js_shell + ".exe", os.X_OK)
):
op.error("shell is not executable: " + js_shell)
if options.retest:
options.read_tests = options.retest
options.write_failures = options.retest
test_list = []
read_all = True
if test_args:
read_all = False for arg in test_args:
test_list += jittests.find_tests(arg)
if options.read_tests:
read_all = False try:
f = open(options.read_tests) for line in f:
test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n")))
f.close() except IOError: if options.retest:
read_all = True else:
sys.stderr.write( "Exception thrown trying to read test file" " '{}'\n".format(options.read_tests)
)
traceback.print_exc()
sys.stderr.write("---\n")
if read_all:
test_list = jittests.find_tests()
if options.exclude_from: with open(options.exclude_from) as fh: for line in fh:
line_exclude = line.strip() ifnot line_exclude.startswith("#") and len(line_exclude):
options.exclude.append(line_exclude)
if options.exclude:
exclude_list = [] for exclude in options.exclude:
exclude_list += jittests.find_tests(exclude)
test_list = [test for test in test_list if test notin set(exclude_list)]
ifnot test_list:
print("No tests found matching command line arguments.", file=sys.stderr)
sys.exit(0)
test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]
ifnot options.run_slow:
test_list = [_ for _ in test_list ifnot _.slow]
if options.test_reflect_stringify isnotNone: for test in test_list:
test.test_reflect_stringify = options.test_reflect_stringify
# If chunking is enabled, determine which tests are part of this chunk. # This code was adapted from testing/mochitest/runtestsremote.py. if options.total_chunks > 1:
total_tests = len(test_list)
tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
start = int(round((options.this_chunk - 1) * tests_per_chunk))
end = int(round(options.this_chunk * tests_per_chunk))
test_list = test_list[start:end]
ifnot test_list:
print( "No tests found matching command line arguments after filtering.",
file=sys.stderr,
)
sys.exit(0)
# The full test list is ready. Now create copies for each JIT configuration.
test_flags = get_jitflags(options.jitflags)
test_list = [_ for test in test_list for _ in test.copy_variants(test_flags)]
job_list = (test for test in test_list)
job_count = len(test_list)
if options.repeat:
def repeat_copy(job_list_generator, repeat):
job_list = list(job_list_generator) for i in range(repeat): for test in job_list: if i == 0: yield test else: yield test.copy()
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.