# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json import logging import os import shutil import sys import time from collections import defaultdict
import yaml from redo import retry from taskgraph import create from taskgraph.create import create_tasks
# TODO: Let standalone taskgraph generate parameters instead of calling internals from taskgraph.decision import (
_determine_more_accurate_base_ref,
_determine_more_accurate_base_rev,
_get_env_prefix,
) from taskgraph.generator import TaskGraphGenerator from taskgraph.parameters import Parameters from taskgraph.taskgraph import TaskGraph from taskgraph.util.python_path import find_object from taskgraph.util.taskcluster import get_artifact from taskgraph.util.vcs import get_repository from taskgraph.util.yaml import load_yaml
from . import GECKO from .actions import render_actions_json from .files_changed import get_changed_files from .parameters import get_app_version, get_version from .try_option_syntax import parse_message from .util.backstop import ANDROID_PERFTEST_BACKSTOP_INDEX, BACKSTOP_INDEX, is_backstop from .util.bugbug import push_schedules from .util.chunking import resolver from .util.hg import get_hg_commit_message, get_hg_revision_branch from .util.partials import populate_release_history from .util.taskcluster import insert_index from .util.taskgraph import find_decision_task, find_existing_tasks_from_previous_kinds
logger = logging.getLogger(__name__)
ARTIFACTS_DIR = "artifacts"
# For each project, this gives a set of parameters specific to the project. # See `taskcluster/docs/parameters.rst` for information on parameters.
PER_PROJECT_PARAMETERS = { "try": { "enable_always_target": True, "target_tasks_method": "try_tasks", "release_type": "nightly",
}, "kaios-try": { "target_tasks_method": "try_tasks",
}, "ash": { "target_tasks_method": "default",
}, "cedar": { "target_tasks_method": "default",
}, "holly": { "enable_always_target": True, "target_tasks_method": "holly_tasks",
}, "oak": { "target_tasks_method": "default", "release_type": "nightly-oak",
}, "graphics": { "target_tasks_method": "graphics_tasks",
}, "autoland": { "optimize_strategies": "gecko_taskgraph.optimize:project.autoland", "target_tasks_method": "autoland_tasks", "test_manifest_loader": "bugbug", # Remove this line to disable "manifest scheduling".
}, "mozilla-central": { "target_tasks_method": "mozilla_central_tasks", "release_type": "nightly",
}, "mozilla-beta": { "target_tasks_method": "mozilla_beta_tasks", "release_type": "beta",
}, "mozilla-release": { "target_tasks_method": "mozilla_release_tasks", "release_type": "release",
}, "mozilla-esr128": { "target_tasks_method": "mozilla_esr128_tasks", "release_type": "esr128",
}, "pine": { "target_tasks_method": "pine_tasks", "release_type": "nightly-pine",
}, "cypress": { "target_tasks_method": "cypress_tasks",
}, "larch": { "target_tasks_method": "larch_tasks", "release_type": "nightly-larch",
}, "kaios": { "target_tasks_method": "kaios_tasks",
}, "toolchains": { "target_tasks_method": "mozilla_central_tasks",
}, # the default parameters are used for projects that do not match above. "default": { "target_tasks_method": "default",
},
}
def full_task_graph_to_runnable_jobs(full_task_json):
runnable_jobs = {} for label, node in full_task_json.items(): ifnot ("extra"in node["task"] and"treeherder"in node["task"]["extra"]): continue
for i in ("groupName", "groupSymbol", "collection"): if i in th:
runnable_jobs[label][i] = th[i] if th.get("machine", {}).get("platform"):
runnable_jobs[label]["platform"] = th["machine"]["platform"] return runnable_jobs
def full_task_graph_to_manifests_by_task(full_task_json):
manifests_by_task = defaultdict(list) for label, node in full_task_json.items():
manifests = node["attributes"].get("test_manifests") ifnot manifests: continue
def try_syntax_from_message(message): """
Parse the try syntax out of a commit message, returning ''ifnoneis
found. """
try_idx = message.find("try:") if try_idx == -1: return"" return message[try_idx:].split("\n", 1)[0]
def taskgraph_decision(options, parameters=None): """
Run the decision task. This function implements `mach taskgraph decision`, andis responsible for
* processing decision task command-line options into parameters
* running task-graph generation exactly the same way the other `mach
taskgraph` commands do
* generating a set of artifacts to memorialize the graph
* calling TaskCluster APIs to create the graph """
parameters = parameters or ( lambda graph_config: get_decision_parameters(graph_config, options)
)
ifnot create.testing: # set additional index paths for the decision task
set_decision_indexes(decision_task_id, tgg.parameters, tgg.graph_config)
# write out the parameters used to generate this graph
write_artifact("parameters.yml", dict(**tgg.parameters))
# write out the public/actions.json file
write_artifact( "actions.json",
render_actions_json(tgg.parameters, tgg.graph_config, decision_task_id),
)
# write out the full graph for reference
full_task_json = tgg.full_task_graph.to_json()
write_artifact("full-task-graph.json", full_task_json)
# write out the public/runnable-jobs.json file
write_artifact( "runnable-jobs.json", full_task_graph_to_runnable_jobs(full_task_json)
)
# write out the public/manifests-by-task.json file
write_artifact( "manifests-by-task.json.gz",
full_task_graph_to_manifests_by_task(full_task_json),
)
# write out the public/tests-by-manifest.json file
write_artifact("tests-by-manifest.json.gz", resolver.tests_by_manifest)
# this is just a test to check whether the from_json() function is working
_, _ = TaskGraph.from_json(full_task_json)
# write out the target task set to allow reproducing this as input
write_artifact("target-tasks.json", list(tgg.target_task_set.tasks.keys()))
# write out the optimized task graph to describe what will actually happen, # and the map of labels to taskids
write_artifact("task-graph.json", tgg.morphed_task_graph.to_json())
write_artifact("label-to-taskid.json", tgg.label_to_taskid)
# write bugbug scheduling information if it was invoked if len(push_schedules) > 0:
write_artifact("bugbug-push-schedules.json", push_schedules.popitem()[1])
def get_decision_parameters(graph_config, options): """
Load parameters from the command-line options for'taskgraph decision'.
This also applies per-project parameters, based on the given project.
"""
product_dir = graph_config["product-dir"]
parameters = {
n: options[n] for n in [ "base_repository", "base_ref", "base_rev", "head_repository", "head_rev", "head_ref", "head_tag", "project", "pushlog_id", "pushdate", "owner", "level", "repository_type", "target_tasks_method", "tasks_for",
] if n in options
}
# owner must be an email, but sometimes (e.g., for ffxbld) it is not, in which # case, fake it if"@"notin parameters["owner"]:
parameters["owner"] += "@noreply.mozilla.org"
# use the pushdate as build_date if given, else use current time
parameters["build_date"] = parameters["pushdate"] or int(time.time()) # moz_build_date is the build identifier based on build_date
parameters["moz_build_date"] = time.strftime( "%Y%m%d%H%M%S", time.gmtime(parameters["build_date"])
)
project = parameters["project"] try:
parameters.update(PER_PROJECT_PARAMETERS[project]) except KeyError:
logger.warning( "using default project parameters; add {} to " "PER_PROJECT_PARAMETERS in {} to customize behavior " "for this project".format(project, __file__)
)
parameters.update(PER_PROJECT_PARAMETERS["default"])
# `target_tasks_method` has higher precedence than `project` parameters if options.get("target_tasks_method"):
parameters["target_tasks_method"] = options["target_tasks_method"]
# ..but can be overridden by the commit message: if it contains the special # string "DONTBUILD" and this is an on-push decision task, then use the # special 'nothing' target task method. if"DONTBUILD"in commit_message and options["tasks_for"] == "hg-push":
parameters["target_tasks_method"] = "nothing"
if options.get("include_push_tasks"):
get_existing_tasks(options.get("rebuild_kinds", []), parameters, graph_config)
# If the target method is nightly, we should build partials. This means # knowing what has been released previously. # An empty release_history is fine, it just means no partials will be built
parameters.setdefault("release_history", dict()) if"nightly"in parameters.get("target_tasks_method", ""):
parameters["release_history"] = populate_release_history("Firefox", project)
if options.get("try_task_config_file"):
task_config_file = os.path.abspath(options.get("try_task_config_file")) else: # if try_task_config.json is present, load it
task_config_file = os.path.join(os.getcwd(), "try_task_config.json")
if options.get("optimize_target_tasks") isnotNone:
parameters["optimize_target_tasks"] = options["optimize_target_tasks"]
# Determine if this should be a backstop push.
parameters["backstop"] = is_backstop(parameters)
# For the android perf tasks, run them 50% less often
parameters["android_perftest_backstop"] = is_backstop(
parameters,
push_interval=30,
time_interval=60 * 6,
backstop_strategy="android_perftest_backstop",
)
def set_decision_indexes(decision_task_id, params, graph_config):
index_paths = [] if params["android_perftest_backstop"]:
index_paths.insert(0, ANDROID_PERFTEST_BACKSTOP_INDEX) if params["backstop"]: # When two Decision tasks run at nearly the same time, it's possible # they both end up being backstops if the second checks the backstop # index before the first inserts it. Insert this index first to reduce # the chances of that happening.
index_paths.insert(0, BACKSTOP_INDEX)
for index_path in index_paths:
insert_index(index_path.format(**subs), decision_task_id, use_proxy=True)
def write_artifact(filename, data):
logger.info(f"writing artifact file `{filename}`") ifnot os.path.isdir(ARTIFACTS_DIR):
os.mkdir(ARTIFACTS_DIR)
path = os.path.join(ARTIFACTS_DIR, filename) if filename.endswith(".yml"): with open(path, "w") as f:
yaml.safe_dump(data, f, allow_unicode=True, default_flow_style=False) elif filename.endswith(".json"): with open(path, "w") as f:
json.dump(data, f, sort_keys=True, indent=2, separators=(",", ": ")) elif filename.endswith(".json.gz"): import gzip
with gzip.open(path, "wb") as f:
f.write(json.dumps(data).encode("utf-8")) else: raise TypeError(f"Don't know how to write to {filename}")
def read_artifact(filename):
path = os.path.join(ARTIFACTS_DIR, filename) if filename.endswith(".yml"): return load_yaml(path, filename) if filename.endswith(".json"): with open(path) as f: return json.load(f) if filename.endswith(".json.gz"): import gzip
with gzip.open(path, "rb") as f: return json.load(f.decode("utf-8")) else: raise TypeError(f"Don't know how to read {filename}")
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.