# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Combined with build/autoconf/config.status.m4, ConfigStatus is an almost # drop-in replacement for autoconf 2.13's config.status, with features # borrowed from autoconf > 2.5, and additional features.
import logging import os import sys import time from argparse import ArgumentParser from itertools import chain from multiprocessing import Pool, get_start_method from time import process_time
from mach.logging import LoggingManager
from mozbuild.backend import backends, get_backend_class from mozbuild.backend.configenvironment import ConfigEnvironment from mozbuild.base import MachCommandConditions from mozbuild.frontend.emitter import TreeMetadataEmitter from mozbuild.frontend.reader import BuildReader from mozbuild.mozinfo import write_mozinfo from mozbuild.util import FileAvoidWrite
You are building GeckoView. After your build completes, you can open
the top source directory in Android Studio directly and build using Gradle.
See the documentation at
## Parallel backend setup # Distributing each backend on different process is costly because we need to # copy the definitions across each process. These definitions are read-only, so # only copy them once when each process starts.
def run(self, backends): # We're trying to spawn a minimal number of new processes there, and # limit the number of times we serialize the task state. As a # consequence: # 1. we initialize each process with a copy of `definitions' # 2. instead of spawning as many processes as backend, we use current # process to handle one of the backend and asynchronously run the # others.
async_tasks = self.pool.map_async(BackendPool._run_worker, backends[1:])
BackendPool._run_worker(backends[0])
async_tasks.wait()
@staticmethod def _init_worker(state):
BackendPool.per_process_definitions = state
Contrary to config.status, it doesn't use CONFIG_FILES or CONFIG_HEADERS
variables.
Without the -n option, this program acts as config.status and considers
the current directory as the top object directory, even when config.status isin a different directory. It will, however, treat the directory
containing config.status as the top object directory with the -n option.
The options to this function are passed when creating the
ConfigEnvironment. These lists, as well as the actual wrapper script
around this function, are meant to be generated by configure.
See build/autoconf/config.status.m4. """
if"CONFIG_FILES"in os.environ: raise Exception( "Using the CONFIG_FILES environment variable is not ""supported."
) if"CONFIG_HEADERS"in os.environ: raise Exception( "Using the CONFIG_HEADERS environment variable is not ""supported."
)
ifnot os.path.isabs(topsrcdir): raise Exception( "topsrcdir must be defined as an absolute directory: ""%s" % topsrcdir
)
default_backends = ["RecursiveMake"]
default_backends = (substs or {}).get("BUILD_BACKENDS", ["RecursiveMake"])
# Make appropriate backend instances, defaulting to RecursiveMakeBackend, # or what is in BUILD_BACKENDS.
selected_backends = [get_backend_class(b)(env) for b in options.backend]
if options.dry_run: for b in selected_backends:
b.dry_run = True
reader = BuildReader(env)
emitter = TreeMetadataEmitter(env) # This won't actually do anything because of the magic of generators.
definitions = emitter.emit(reader.read_topsrcdir())
log_level = logging.DEBUG if options.verbose else logging.INFO
log_manager.add_terminal_logging(level=log_level)
log_manager.enable_unstructured()
print("Reticulating splines...", file=sys.stderr)
# `definitions` objects are unfortunately not picklable, which is a # requirement for "spawn" method. It's fine under "fork" method. This # basically excludes Windows from our optimization, we can live with it. if len(selected_backends) > 1 and get_start_method() == "fork": # See https://github.com/python/cpython/commit/39889864c09741909da4ec489459d0197ea8f1fc # For why we cap the process count. There's also an overhead to setup # new processes, and not that many backends anyway.
processes = min(len(selected_backends) - 1, 4)
pool = BackendPool(definitions, processes=processes)
pool.run(selected_backends) else: if len(selected_backends) > 1:
definitions = list(definitions)
for backend in selected_backends:
backend.consume(definitions)
execution_time = 0.0 for obj in chain((reader, emitter), selected_backends):
summary = obj.summary()
print(summary, file=sys.stderr)
execution_time += summary.execution_time if hasattr(obj, "gyp_summary"):
summary = obj.gyp_summary()
print(summary, file=sys.stderr)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.