"""
This is a sample mozperftest test that we use for testing
the verification process. """
SAMPLE_TEST = """ "use strict";
async function setUp(context) {
context.log.info("setUp example!");
}
async function test(context, commands) {
context.log.info("Test with setUp/tearDown example!");
await commands.measure.start("https://www.sitespeed.io/");
await commands.measure.start("https://www.mozilla.org/en-US/");
}
async function tearDown(context) {
context.log.info("tearDown example!");
}
module.noexport = {};
module.exports = {
setUp,
tearDown,
test,
owner: "Performance Testing Team",
name: "Example",
description: "The description of the example test.",
longDescription: `
This is a longer description of the test perhaps including information
about how it should be run locally or links to relevant information.
`
}; """
DYNAMIC_SAMPLE_CONFIG = """
name: {}
manifest: None
static-only: False
suites:
suite:
description: "Performance tests from the 'suite' folder."
tests:
Example: "Performance test Example from suite."
another_suite:
description: "Performance tests from the 'another_suite' folder."
tests:
Example: "Performance test Example from another_suite." """
SAMPLE_METRICS_CONFIG = """
name: raptor
manifest: "None"
metrics: 'test':
aliases: [t1, t2]
description: a description
matcher: f.*|S.*
static-only: False
suites:
suite:
description: "Performance tests from the 'suite' folder."
tests:
Example: "Performance test Example from another_suite."
another_suite:
description: "Performance tests from the 'another_suite' folder."
tests:
Example: "Performance test Example from another_suite." """
DYNAMIC_METRICS_CONFIG = """
name: raptor
manifest: "None"{}
static-only: False
suites:
suite:
description: "Performance tests from the 'suite' folder."
tests:
Example: "Performance test Example from another_suite."
another_suite:
description: "Performance tests from the 'another_suite' folder."
tests:
Example: "Performance test Example from another_suite." """
with open(perfdocs_sample["config"], "r") as file:
filedata = file.read()
filedata = filedata.replace("Example", "DifferentName") with open(perfdocs_sample["config"], "w", newline="\n") as file:
file.write(filedata)
with open(perfdocs_sample["config"], "r") as file:
filedata = file.read()
filedata = filedata.replace("suite:", "InvalidSuite:") with open(perfdocs_sample["config"], "w", newline="\n") as file:
file.write(filedata)
with mock.patch("perfdocs.verifier.Verifier.validate_yaml", return_value=False):
verifier = Verifier(top_dir) with pytest.raises(Exception):
verifier.validate_tree()
# Check if "File validation error" log is called # and Called with a log inside perfdocs_tree(). assert logger.log.call_count == 2 assert len(logger.mock_calls) == 2
with open(perfdocs_sample["config"], "w", newline="\n") as f:
f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, "")) with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, "")) with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions)) with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, "")) with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
# Check to make sure that every single framework # gatherer that has been implemented produces a test list # in every suite that contains a test with an associated # manifest. from perfdocs.gatherer import frameworks
for framework, gatherer in frameworks.items(): with open(perfdocs_sample["config"], "w", newline="\n") as f:
f.write(DYNAMIC_SAMPLE_CONFIG.format(framework))
fg = gatherer(perfdocs_sample["config"], top_dir) if getattr(fg, "get_test_list", None) isNone: # Skip framework gatherers that have not # implemented a method to build a test list. continue
# Setup some framework-specific things here if needed if framework == "raptor":
fg._manifest_path = perfdocs_sample["manifest"]["path"]
fg._get_subtests_from_ini = mock.Mock()
fg._get_subtests_from_ini.return_value = { "Example": perfdocs_sample["manifest"],
}
if framework == "talos":
fg._get_ci_tasks = mock.Mock() for suite, suitetests in fg.get_test_list().items(): assert suite == "Talos Tests" assert suitetests continue
if framework == "awsy": for suite, suitetests in fg.get_test_list().items(): assert suite == "Awsy tests" assert suitetests continue
for suite, suitetests in fg.get_test_list().items(): assert suite == "suite" for test, manifest in suitetests.items(): assert test == "Example" assert (
pathlib.Path(manifest["path"])
== perfdocs_sample["manifest"]["path"]
)
from perfdocs.gatherer import frameworks from perfdocs.generator import Generator from perfdocs.utils import read_yaml from perfdocs.verifier import Verifier
# This test is only for raptor
gatherer = frameworks["raptor"] with open(perfdocs_sample["config"], "w", newline="\n") as f:
f.write(DYNAMIC_SAMPLE_CONFIG.format("raptor"))
v = Verifier(top_dir)
gn = Generator(v, generate=True, workspace=top_dir)
# Check to make sure that if a test is present under multiple # suties the urls are generated correctly for the test under # every suite for suite, suitetests in fg.get_test_list().items():
url = fg._descriptions.get(suite) assert url isnotNone assert url[0]["name"] == "Example" assert url[0]["test_url"] == "Example_url"
# Check that the sections for each suite are generated correctly for suite_name, suite_details in suites.items():
gn._verifier._gatherer = mock.Mock(framework_gatherers={"raptor": gatherer})
section = gn._verifier._gatherer.framework_gatherers[ "raptor"
].build_suite_section(fg, suite_name, suites.get(suite_name)["description"]) assert suite_name.capitalize() == section[0] assert suite_name in section[2]
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.