# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/.
import contextlib import os import pathlib import shutil import tempfile from unittest import mock
import mozunit import pytest from tryselect.selectors.perf import (
MAX_PERF_TASKS,
Apps,
InvalidCategoryException,
InvalidRegressionDetectorQuery,
PerfParser,
Platforms,
Suites,
Variants,
run,
) from tryselect.selectors.perf_preview import plain_display from tryselect.selectors.perfselector.classification import (
check_for_live_sites,
check_for_profile,
) from tryselect.selectors.perfselector.perfpushinfo import PerfPushInfo
here = os.path.abspath(os.path.dirname(__file__))
FTG_SAMPLE_PATH = pathlib.Path(here, "full-task-graph-perf-test.json")
# The TEST_VARIANTS, and TEST_CATEGORIES are used to force # a particular set of categories to show up in testing. Otherwise, # every time someone adds a category, or a variant, we'll need # to redo all the category counts. The platforms, and apps are # not forced because they change infrequently.
TEST_VARIANTS = { # Bug 1837058 - Switch this back to Variants.NO_FISSION when # the default flips to fission on android
Variants.FISSION.value: { "query": "'nofis", "negation": "!nofis", "platforms": [Platforms.ANDROID.value], "apps": [Apps.FENIX.value, Apps.GECKOVIEW.value],
},
Variants.BYTECODE_CACHED.value: { "query": "'bytecode", "negation": "!bytecode", "platforms": [Platforms.DESKTOP.value], "apps": [Apps.FIREFOX.value],
},
Variants.LIVE_SITES.value: { "query": "'live", "negation": "!live", "restriction": check_for_live_sites, "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value], "apps": list(PerfParser.apps.keys()),
},
Variants.PROFILING.value: { "query": "'profil", "negation": "!profil", "restriction": check_for_profile, "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value], "apps": [Apps.FIREFOX.value, Apps.GECKOVIEW.value, Apps.FENIX.value],
},
Variants.SWR.value: { "query": "'swr", "negation": "!swr", "platforms": [Platforms.DESKTOP.value], "apps": [Apps.FIREFOX.value],
},
}
# Expand the categories, then either check if the unique_categories, # exist or are missing from the categories
expanded_cats = PerfParser.get_categories(**category_options)
assert len(expanded_cats) == expected_counts assertnot any([expanded_cats.get(ucat, None) isnotNonefor ucat in missing]) assert all(
[expanded_cats.get(ucat, None) isnotNonefor ucat in unique_categories.keys()]
)
# Ensure that the queries are as expected for cat_name, cat_query in unique_categories.items(): # Don't use get here because these fields should always exist assert cat_query == expanded_cats[cat_name]["queries"]
non_shippable_count = 0 for cat_name in expanded_cats:
queries = str(expanded_cats[cat_name].get("queries", None)) if"!shippable !nightlyasrelease"in queries and"'shippable"notin queries:
non_shippable_count += 1
assert non_shippable_count == call_counts
@pytest.mark.parametrize( "options, call_counts, log_ind, expected_log_message",
[
(
{},
[10, 2, 2, 10, 2, 1],
2,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try&framework=13\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=revision&newProject=try&newRevision=revision&framework=13\n"
),
),
(
{"query": "'Pageload 'linux 'firefox"},
[10, 2, 2, 10, 2, 1],
2,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try&framework=13\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=revision&newProject=try&newRevision=revision&framework=13\n"
),
),
(
{"cached_revision": "cached_base_revision"},
[10, 1, 1, 10, 2, 0],
2,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=cached_base_revision&newRev=revision&" "baseRepo=try&newRepo=try&framework=13\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=cached_base_revision&newProject=try&newRevision=revision&framework=13\n"
),
),
(
{"dry_run": True},
[10, 1, 1, 4, 2, 0],
2,
( "If you need any help, you can find us in the #perf-help Matrix channel:\n" "https://matrix.to/#/#perf-help:mozilla.org\n"
),
),
(
{"full": True},
[1, 2, 2, 8, 2, 1],
0,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try&framework=1\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=revision&newProject=try&newRevision=revision&framework=1\n"
),
),
(
{"full": True, "query": "'shippable !32 speedometer 'firefox"},
[1, 2, 2, 8, 2, 1],
0,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try&framework=1\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=revision&newProject=try&newRevision=revision&framework=1\n"
),
),
(
{"single_run": True},
[10, 1, 1, 4, 2, 0],
2,
( "If you need any help, you can find us in the #perf-help Matrix channel:\n" "https://matrix.to/#/#perf-help:mozilla.org\n"
),
),
(
{"detect_changes": True},
[11, 2, 2, 10, 2, 1],
2,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try&framework=13\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=revision&newProject=try&newRevision=revision&framework=13\n"
),
),
(
{"tests": ["amazon"]},
[7, 2, 2, 10, 2, 1],
2,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try&framework=13\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=revision&newProject=try&newRevision=revision&framework=13\n"
),
),
(
{"tests": ["amazon"], "alert": "000"},
[0, 2, 2, 9, 2, 1],
1,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try&framework=1\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=revision&newProject=try&newRevision=revision&framework=1\n"
),
),
(
{"tests": ["amazon"], "full": True},
[1, 2, 2, 8, 2, 1],
0,
( "\n!!!NOTE!!!\n You'll be able to find a performance comparison " "here once the tests are complete (ensure you select the right framework):\n" " https://perf.compare/compare-results?" "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try&framework=1\n\n" " The old comparison tool is still available at this URL:\n" " https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original" "Revision=revision&newProject=try&newRevision=revision&framework=1\n"
),
),
],
)
@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host") def test_full_run(options, call_counts, log_ind, expected_log_message): with mock.patch("tryselect.selectors.perf.push_to_try") as ptt, mock.patch( "tryselect.selectors.perf.run_fzf"
) as fzf, mock.patch( "tryselect.selectors.perf.get_repository_object", new=mock.MagicMock()
), mock.patch( "tryselect.selectors.perf.LogProcessor.revision",
new_callable=mock.PropertyMock,
return_value="revision",
) as logger, mock.patch( "tryselect.selectors.perf.PerfParser.check_cached_revision",
) as ccr, mock.patch( "tryselect.selectors.perf.PerfParser.save_revision_treeherder"
) as srt, mock.patch( "tryselect.selectors.perf.print",
) as perf_print, mock.patch( "tryselect.selectors.perf.PerfParser.set_categories_for_test"
) as tests_mock, mock.patch( "tryselect.selectors.perf.requests"
) as requests_mock:
fzf_side_effects = [
["", ["Benchmarks linux"]],
["", TASKS],
["", TASKS],
["", TASKS],
["", TASKS],
["", TASKS],
["", TASKS],
["", TASKS],
["", TASKS],
["", TASKS],
["", ["Perftest Change Detector"]],
] # Number of side effects for fzf should always be greater than # or equal to the number of calls expected assert len(fzf_side_effects) >= call_counts[0]
@pytest.mark.parametrize( "query, should_fail",
[
(
{ "query": { # Raptor has all variants available so it # should fail on this category "raptor": ["browsertime 'live 'no-fission"],
}
}, True,
),
(
{ "query": { # Awsy has no variants defined so it shouldn't fail # on a query like this "awsy": ["browsertime 'live 'no-fission"],
}
}, False,
),
],
) def test_category_rules(query, should_fail): # Set the categories, and variants to expand
PerfParser.categories = {"test-live": query}
PerfParser.variants = TEST_VARIANTS
if should_fail: with pytest.raises(InvalidCategoryException):
PerfParser.run_category_checks() else: assert PerfParser.run_category_checks()
# Reset the categories, and variants to expand
PerfParser.categories = TEST_CATEGORIES
PerfParser.variants = TEST_VARIANTS
@pytest.mark.parametrize( "apk_name, apk_content, should_fail, failure_message",
[
( "real-file", "file-content", False, None,
),
("bad-file", None, True, "Path does not exist:"),
],
) def test_apk_upload(apk_name, apk_content, should_fail, failure_message): with mock.patch("tryselect.selectors.perf.subprocess") as _, mock.patch( "tryselect.selectors.perf.shutil"
) as _:
temp_dir = None try:
temp_dir = tempfile.mkdtemp()
sample_apk = pathlib.Path(temp_dir, apk_name) if apk_content isnotNone: with sample_apk.open("w") as f:
f.write(apk_content)
if should_fail: with pytest.raises(Exception) as exc_info:
PerfParser.setup_apk_upload("browsertime", str(sample_apk)) assert failure_message in str(exc_info) else:
PerfParser.setup_apk_upload("browsertime", str(sample_apk)) finally: if temp_dir isnotNone:
shutil.rmtree(temp_dir)
@pytest.mark.parametrize( "total_tasks, options, call_counts, expected_log_message, expected_failure",
[
(
MAX_PERF_TASKS + 1,
{},
[1, 0, 0, 1],
( "\n\n----------------------------------------------------------------------------------------------\n"
f"You have selected {MAX_PERF_TASKS+1} total test runs! (selected tasks({MAX_PERF_TASKS+1}) * rebuild"
f" count(1) \nThese tests won't be triggered as the current maximum for a single ./mach try "
f"perf run is {MAX_PERF_TASKS}. \nIf this was unexpected, please file a bug in Testing :: Performance." "\n----------------------------------------------------------------------------------------------\n\n"
), True,
),
(
MAX_PERF_TASKS,
{"full": True},
[9, 0, 0, 8],
( "For more information on the performance tests, see our " "PerfDocs here:\nhttps://firefox-source-docs.mozilla.org/testing/perfdocs/"
), False,
),
(
int((MAX_PERF_TASKS + 2) / 2),
{ "full": True, "try_config_params": {"try_task_config": {"rebuild": 2}},
},
[1, 0, 0, 1],
( "\n\n----------------------------------------------------------------------------------------------\n"
f"You have selected {int((MAX_PERF_TASKS + 2) / 2) * 2} total test runs! (selected tasks("
f"{int((MAX_PERF_TASKS + 2) / 2)}) * rebuild"
f" count(2) \nThese tests won't be triggered as the current maximum for a single ./mach try "
f"perf run is {MAX_PERF_TASKS}. \nIf this was unexpected, please file a bug in Testing :: Performance." "\n----------------------------------------------------------------------------------------------\n\n"
), True,
),
(0, {}, [1, 0, 0, 1], ("No tasks selected"), True),
],
) def test_max_perf_tasks(
total_tasks,
options,
call_counts,
expected_log_message,
expected_failure,
):
setup_perfparser()
with mock.patch("tryselect.selectors.perf.push_to_try") as ptt, mock.patch( "tryselect.selectors.perf.print",
) as perf_print, mock.patch( "tryselect.selectors.perf.LogProcessor.revision",
new_callable=mock.PropertyMock,
return_value="revision",
), mock.patch( "tryselect.selectors.perf.PerfParser.perf_push_to_try",
new_callable=mock.MagicMock,
) as perf_push_to_try_mock, mock.patch( "tryselect.selectors.perf.PerfParser.get_perf_tasks"
) as get_perf_tasks_mock, mock.patch( "tryselect.selectors.perf.PerfParser.get_tasks"
) as get_tasks_mock, mock.patch( "tryselect.selectors.perf.run_fzf"
) as fzf, mock.patch( "tryselect.selectors.perf.fzf_bootstrap", return_value=mock.MagicMock()
):
tasks = ["a-task"] * total_tasks
get_tasks_mock.return_value = tasks
get_perf_tasks_mock.return_value = tasks, [], []
PerfParser.push_info.finished_run = not expected_failure
run(**options)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.