SSL RuntimeService.cpp
Interaktion und PortierbarkeitC
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
RuntimeService* rts = RuntimeService::GetService(); if (!rts) { // May be shutting down, just bail. return;
}
const nsDependentCString prefName(aPrefName);
// Several other pref branches will get included here so bail out if there is // another callback that will handle this change. if (StringBeginsWith(
prefName,
nsLiteralCString(PREF_JS_OPTIONS_PREFIX PREF_MEM_OPTIONS_PREFIX))) { return;
}
if (!rts) { // May be shutting down, just bail. return;
}
constexpr auto memPrefix =
nsLiteralCString{PREF_JS_OPTIONS_PREFIX PREF_MEM_OPTIONS_PREFIX}; const nsDependentCString fullPrefName(aPrefName);
// Pull out the string that actually distinguishes the parameter we need to // change.
nsDependentCSubstring memPrefName; if (StringBeginsWith(fullPrefName, memPrefix)) {
memPrefName.Rebind(fullPrefName, memPrefix.Length());
} else {
NS_ERROR("Unknown pref name!"); return;
}
#define PREF(suffix_, key_) \
{ \
nsLiteralCString(suffix_), \
PREF_JS_OPTIONS_PREFIX PREF_MEM_OPTIONS_PREFIX suffix_, key_ \
}
constexpr WorkerGCPref kWorkerPrefs[] = {
PREF("max", JSGC_MAX_BYTES),
PREF("gc_high_frequency_time_limit_ms", JSGC_HIGH_FREQUENCY_TIME_LIMIT),
PREF("gc_low_frequency_heap_growth", JSGC_LOW_FREQUENCY_HEAP_GROWTH),
PREF("gc_high_frequency_large_heap_growth",
JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH),
PREF("gc_high_frequency_small_heap_growth",
JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH),
PREF("gc_small_heap_size_max_mb", JSGC_SMALL_HEAP_SIZE_MAX),
PREF("gc_large_heap_size_min_mb", JSGC_LARGE_HEAP_SIZE_MIN),
PREF("gc_balanced_heap_limits", JSGC_BALANCED_HEAP_LIMITS_ENABLED),
PREF("gc_heap_growth_factor", JSGC_HEAP_GROWTH_FACTOR),
PREF("gc_allocation_threshold_mb", JSGC_ALLOCATION_THRESHOLD),
PREF("gc_malloc_threshold_base_mb", JSGC_MALLOC_THRESHOLD_BASE),
PREF("gc_small_heap_incremental_limit",
JSGC_SMALL_HEAP_INCREMENTAL_LIMIT),
PREF("gc_large_heap_incremental_limit",
JSGC_LARGE_HEAP_INCREMENTAL_LIMIT),
PREF("gc_urgent_threshold_mb", JSGC_URGENT_THRESHOLD_MB),
PREF("gc_incremental_slice_ms", JSGC_SLICE_TIME_BUDGET_MS),
PREF("gc_min_empty_chunk_count", JSGC_MIN_EMPTY_CHUNK_COUNT),
PREF("gc_compacting", JSGC_COMPACTING_ENABLED),
PREF("gc_parallel_marking", JSGC_PARALLEL_MARKING_ENABLED),
PREF("gc_parallel_marking_threshold_mb",
JSGC_PARALLEL_MARKING_THRESHOLD_MB),
PREF("gc_max_parallel_marking_threads", JSGC_MAX_MARKING_THREADS), #ifdef NIGHTLY_BUILD
PREF("gc_experimental_semispace_nursery", JSGC_SEMISPACE_NURSERY_ENABLED), #endif // Note: Workers do not currently trigger eager minor GC, but if that is // desired the following parameters should be added: // javascript.options.mem.nursery_eager_collection_threshold_kb // javascript.options.mem.nursery_eager_collection_threshold_percent // javascript.options.mem.nursery_eager_collection_timeout_ms
}; #undef PREF
auto pref = kWorkerPrefs; auto end = kWorkerPrefs + std::size(kWorkerPrefs);
if (gRuntimeServiceDuringInit) { // During init, we want to update every pref in kWorkerPrefs.
MOZ_ASSERT(memPrefName.IsEmpty(), "Pref branch prefix only expected during init");
} else { // Otherwise, find the single pref that changed. while (pref != end) { if (pref->memName == memPrefName) {
end = pref + 1; break;
}
++pref;
} #ifdef DEBUG if (pref == end) {
nsAutoCString message("Workers don't support the '");
message.Append(memPrefName);
message.AppendLiteral("' preference!");
NS_WARNING(message.get());
} #endif
}
while (pref != end) { switch (pref->key) { case JSGC_MAX_BYTES: {
int32_t prefValue = GetPref(pref->fullName, -1);
Maybe<uint32_t> value = (prefValue <= 0 || prefValue >= 0x1000)
? Nothing()
: Some(uint32_t(prefValue) * 1024 * 1024);
UpdateOtherJSGCMemoryOption(rts, pref->key, value); break;
} case JSGC_SLICE_TIME_BUDGET_MS: {
int32_t prefValue = GetPref(pref->fullName, -1);
Maybe<uint32_t> value = (prefValue <= 0 || prefValue >= 100000)
? Nothing()
: Some(uint32_t(prefValue));
UpdateOtherJSGCMemoryOption(rts, pref->key, value); break;
} case JSGC_COMPACTING_ENABLED: case JSGC_PARALLEL_MARKING_ENABLED: #ifdef NIGHTLY_BUILD case JSGC_SEMISPACE_NURSERY_ENABLED: #endif case JSGC_BALANCED_HEAP_LIMITS_ENABLED: { bool present; bool prefValue = GetPref(pref->fullName, false, &present);
Maybe<uint32_t> value = present ? Some(prefValue ? 1 : 0) : Nothing();
UpdateOtherJSGCMemoryOption(rts, pref->key, value); break;
} case JSGC_HIGH_FREQUENCY_TIME_LIMIT: case JSGC_LOW_FREQUENCY_HEAP_GROWTH: case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH: case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH: case JSGC_SMALL_HEAP_SIZE_MAX: case JSGC_LARGE_HEAP_SIZE_MIN: case JSGC_ALLOCATION_THRESHOLD: case JSGC_MALLOC_THRESHOLD_BASE: case JSGC_SMALL_HEAP_INCREMENTAL_LIMIT: case JSGC_LARGE_HEAP_INCREMENTAL_LIMIT: case JSGC_URGENT_THRESHOLD_MB: case JSGC_MIN_EMPTY_CHUNK_COUNT: case JSGC_HEAP_GROWTH_FACTOR: case JSGC_PARALLEL_MARKING_THRESHOLD_MB: case JSGC_MAX_MARKING_THREADS:
UpdateCommonJSGCMemoryOption(rts, pref->fullName, pref->key); break; default:
MOZ_ASSERT_UNREACHABLE("Unknown JSGCParamKey value"); break;
}
++pref;
}
}
switch (aType) { case JS::CTypesActivityType::BeginCall:
worker->BeginCTypesCall(); break;
case JS::CTypesActivityType::EndCall:
worker->EndCTypesCall(); break;
case JS::CTypesActivityType::BeginCallback:
worker->BeginCTypesCallback(); break;
case JS::CTypesActivityType::EndCallback:
worker->EndCTypesCallback(); break;
default:
MOZ_CRASH("Unknown type flag!");
}
}
// JSDispatchableRunnables are WorkerRunnables used to dispatch JS::Dispatchable // back to their worker thread. A WorkerRunnable is used for two reasons: // // 1. The JS::Dispatchable::run() callback may run JS so we cannot use a control // runnable since they use async interrupts and break JS run-to-completion. // // 2. The DispatchToEventLoopCallback interface is *required* to fail during // shutdown (see jsapi.h) which is exactly what WorkerRunnable::Dispatch() will // do. Moreover, JS_DestroyContext() does *not* block on JS::Dispatchable::run // being called, DispatchToEventLoopCallback failure is expected to happen // during shutdown. class JSDispatchableRunnable final : public WorkerThreadRunnable {
JS::Dispatchable* mDispatchable;
void PostDispatch(WorkerPrivate* aWorkerPrivate, bool aDispatchResult) override { // For the benefit of the destructor assert. if (!aDispatchResult) {
mDispatchable = nullptr;
}
}
mDispatchable->run(GetCurrentThreadWorkerPrivate()->GetJSContext(),
JS::Dispatchable::ShuttingDown);
mDispatchable = nullptr; // mDispatchable may delete itself
return NS_OK;
}
};
staticbool DispatchToEventLoop(void* aClosure,
JS::Dispatchable* aDispatchable) { // This callback may execute either on the worker thread or a random // JS-internal helper thread.
// See comment at JS::InitDispatchToEventLoop() below for how we know the // WorkerPrivate is alive.
WorkerPrivate* workerPrivate = reinterpret_cast<WorkerPrivate*>(aClosure);
// Dispatch is expected to fail during shutdown for the reasons outlined in // the JSDispatchableRunnable comment above.
RefPtr<JSDispatchableRunnable> r = new JSDispatchableRunnable(workerPrivate, aDispatchable); return r->Dispatch(workerPrivate);
}
// This is the real place where we set the max memory for the runtime. for (constauto& setting : settings.gcSettings) { if (setting.value) {
JS_SetGCParameter(aWorkerCx, setting.key, *setting.value);
} else {
JS_ResetGCParameter(aWorkerCx, setting.key);
}
}
// A WorkerPrivate lives strictly longer than its JSRuntime so we can safely // store a raw pointer as the callback's closure argument on the JSRuntime.
JS::InitDispatchToEventLoop(aWorkerCx, DispatchToEventLoop,
(void*)aWorkerPrivate);
// When available, set the self-hosted shared memory to be read, so that we // can decode the self-hosted content instead of parsing it. auto& shm = xpc::SelfHostedShmem::GetSingleton();
JS::SelfHostedCache selfHostedContent = shm.Content();
if (!JS::InitSelfHostedCode(aWorkerCx, selfHostedContent)) {
NS_WARNING("Could not init self-hosted code!"); returnfalse;
}
class WorkerJSRuntime final : public mozilla::CycleCollectedJSRuntime { public: // The heap size passed here doesn't matter, we will change it later in the // call to JS_SetGCParameter inside InitJSContextForWorker. explicit WorkerJSRuntime(JSContext* aCx, WorkerPrivate* aWorkerPrivate)
: CycleCollectedJSRuntime(aCx), mWorkerPrivate(aWorkerPrivate) {
MOZ_COUNT_CTOR_INHERITED(WorkerJSRuntime, CycleCollectedJSRuntime);
MOZ_ASSERT(aWorkerPrivate);
{
JS::UniqueChars defaultLocale = aWorkerPrivate->AdoptDefaultLocale();
MOZ_ASSERT(defaultLocale, "failure of a WorkerPrivate to have a default locale should " "have made the worker fail to spawn");
if (!JS_SetDefaultLocale(Runtime(), defaultLocale.get())) {
NS_WARNING("failed to set workerCx's default locale");
}
}
}
void Shutdown(JSContext* cx) override { // The CC is shut down, and the superclass destructor will GC, so make sure // we don't try to CC again.
mWorkerPrivate = nullptr;
if (WorkerGlobalScope* scope = mWorkerPrivate->GlobalScope()) { if (EventListenerManager* elm = scope->GetExistingListenerManager()) {
elm->TraceListeners(aTracer);
}
}
if (WorkerDebuggerGlobalScope* debuggerScope =
mWorkerPrivate->DebuggerGlobalScope()) { if (EventListenerManager* elm =
debuggerScope->GetExistingListenerManager()) {
elm->TraceListeners(aTracer);
}
}
};
private:
WorkerPrivate* mWorkerPrivate;
};
} // anonymous namespace
} // namespace workerinternals
class WorkerJSContext final : public mozilla::CycleCollectedJSContext { public: // The heap size passed here doesn't matter, we will change it later in the // call to JS_SetGCParameter inside InitJSContextForWorker. explicit WorkerJSContext(WorkerPrivate* aWorkerPrivate)
: mWorkerPrivate(aWorkerPrivate) {
MOZ_COUNT_CTOR_INHERITED(WorkerJSContext, CycleCollectedJSContext);
MOZ_ASSERT(aWorkerPrivate); // Magical number 2. Workers have the base recursion depth 1, and normal // runnables run at level 2, and we don't want to process microtasks // at any other level.
SetTargetedMicroTaskRecursionDepth(2);
}
// MOZ_CAN_RUN_SCRIPT_BOUNDARY because otherwise we have to annotate the // SpiderMonkey JS::JobQueue's destructor as MOZ_CAN_RUN_SCRIPT, which is a // bit of a pain.
MOZ_CAN_RUN_SCRIPT_BOUNDARY ~WorkerJSContext() {
MOZ_COUNT_DTOR_INHERITED(WorkerJSContext, CycleCollectedJSContext);
JSContext* cx = MaybeContext(); if (!cx) { return; // Initialize() must have failed
}
// We expect to come here with the cycle collector already shut down. // The superclass destructor will run the GC one final time and finalize any // JSObjects that were participating in cycles that were broken during CC // shutdown. // Make sure we don't try to CC again.
mWorkerPrivate = nullptr;
}
JSContext* cx = Context();
NS_ASSERTION(cx, "This should never be null!");
JS::Rooted<JSObject*> global(cx, JS::CurrentGlobalOrNull(cx));
NS_ASSERTION(global, "This should never be null!");
// On worker threads, if the current global is the worker global or // ShadowRealm global, we use the main micro task queue. Otherwise, the // current global must be either the debugger global or a debugger sandbox, // and we use the debugger micro task queue instead. if (IsWorkerGlobal(global) || IsShadowRealmGlobal(global)) {
microTaskQueue = &GetMicroTaskQueue();
} else {
MOZ_ASSERT(IsWorkerDebuggerGlobal(global) ||
IsWorkerDebuggerSandbox(global));
class WorkerThreadPrimaryRunnable final : public Runnable {
WorkerPrivate* mWorkerPrivate;
SafeRefPtr<WorkerThread> mThread;
JSRuntime* mParentRuntime;
class FinishedRunnable final : public Runnable {
SafeRefPtr<WorkerThread> mThread;
if (!gRuntimeService) { // The observer service now owns us until shutdown.
gRuntimeService = new RuntimeService(); if (NS_FAILED((*gRuntimeService).Init())) {
NS_WARNING("Failed to initialize!");
(*gRuntimeService).Cleanup();
gRuntimeService = nullptr; return nullptr;
}
}
if (queued) {
domainInfo->mQueuedWorkers.AppendElement(&aWorkerPrivate);
// Worker spawn gets queued due to hitting max workers per domain // limit so let's log a warning.
WorkerPrivate::ReportErrorToConsole(nsIScriptError::warningFlag, "DOM"_ns,
nsContentUtils::eDOM_PROPERTIES, "HittingMaxWorkersPerDomain2"_ns);
// From here on out we must call UnregisterWorker if something fails! if (parent) { if (!parent->AddChildWorker(aWorkerPrivate)) {
UnregisterWorker(aWorkerPrivate); returnfalse;
}
} else { if (!mNavigatorPropertiesLoaded) { if (NS_FAILED(Navigator::GetAppVersion(
mNavigatorProperties.mAppVersion, aWorkerPrivate.GetDocument(), false/* aUsePrefOverriddenValue */)) ||
NS_FAILED(Navigator::GetPlatform(
mNavigatorProperties.mPlatform, aWorkerPrivate.GetDocument(), false/* aUsePrefOverriddenValue */))) {
UnregisterWorker(aWorkerPrivate); returnfalse;
}
// The navigator overridden properties should have already been read.
if (!isServiceWorker) { // Service workers are excluded since their lifetime is separate from // that of dom windows. if (auto* const windowArray = mWindowMap.GetOrInsertNew(window, 1);
!windowArray->Contains(&aWorkerPrivate)) {
windowArray->AppendElement(&aWorkerPrivate);
} else {
MOZ_ASSERT(aWorkerPrivate.IsSharedWorker());
}
}
}
if (!queued && !ScheduleWorker(aWorkerPrivate)) { returnfalse;
}
if (isServiceWorker) {
AssertIsOnMainThread();
} returntrue;
}
WorkerDomainInfo* domainInfo; if (!mDomainMap.Get(domain, &domainInfo)) {
NS_ERROR("Don't have an entry for this domain!");
}
// Remove old worker from everywhere.
uint32_t index = domainInfo->mQueuedWorkers.IndexOf(&aWorkerPrivate); if (index != kNoIndex) { // Was queued, remove from the list.
domainInfo->mQueuedWorkers.RemoveElementAt(index);
} elseif (parent) {
MOZ_ASSERT(domainInfo->mChildWorkerCount, "Must be non-zero!");
domainInfo->mChildWorkerCount--;
} elseif (aWorkerPrivate.IsServiceWorker()) {
MOZ_ASSERT(domainInfo->mActiveServiceWorkers.Contains(&aWorkerPrivate), "Don't know about this worker!");
domainInfo->mActiveServiceWorkers.RemoveElement(&aWorkerPrivate);
} else {
MOZ_ASSERT(domainInfo->mActiveWorkers.Contains(&aWorkerPrivate), "Don't know about this worker!");
domainInfo->mActiveWorkers.RemoveElement(&aWorkerPrivate);
}
// See if there's a queued worker we can schedule. if (domainInfo->ActiveWorkerCount() < gMaxWorkersPerDomain &&
!domainInfo->mQueuedWorkers.IsEmpty()) {
queuedWorker = domainInfo->mQueuedWorkers[0];
domainInfo->mQueuedWorkers.RemoveElementAt(0);
if (domainInfo->HasNoWorkers()) {
MOZ_ASSERT(domainInfo->mQueuedWorkers.IsEmpty());
mDomainMap.Remove(domain);
}
}
// NB: For Shared Workers we used to call ShutdownOnMainThread on the // RemoteWorkerController; however, that was redundant because // RemoteWorkerChild uses a WeakWorkerRef which notifies at about the // same time as us calling into the code here and would race with us.
if (parent) {
parent->RemoveChildWorker(aWorkerPrivate);
} elseif (aWorkerPrivate.IsSharedWorker()) {
AssertIsOnMainThread();
if (workers->RemoveElement(&aWorkerPrivate)) {
MOZ_ASSERT(!workers->Contains(&aWorkerPrivate), "Added worker more than once!");
return workers->IsEmpty();
}
returnfalse;
});
} elseif (aWorkerPrivate.IsDedicatedWorker()) { // May be null.
nsPIDOMWindowInner* window = aWorkerPrivate.GetWindow(); if (auto entry = mWindowMap.Lookup(window)) {
MOZ_ALWAYS_TRUE(entry.Data()->RemoveElement(&aWorkerPrivate)); if (entry.Data()->IsEmpty()) {
entry.Remove();
}
} else {
MOZ_ASSERT_UNREACHABLE("window is not in mWindowMap");
}
}
if (queuedWorker && !ScheduleWorker(*queuedWorker)) {
UnregisterWorker(*queuedWorker);
}
}
bool RuntimeService::ScheduleWorker(WorkerPrivate& aWorkerPrivate) { if (!aWorkerPrivate.Start()) { // This is ok, means that we didn't need to make a thread for this worker. returntrue;
}
const WorkerThreadFriendKey friendKey;
SafeRefPtr<WorkerThread> thread = WorkerThread::Create(friendKey); if (!thread) {
UnregisterWorker(aWorkerPrivate); returnfalse;
}
if (NS_FAILED(thread->SetPriority(nsISupportsPriority::PRIORITY_NORMAL))) {
NS_WARNING("Could not set the thread's priority!");
}
// nsIStreamTransportService is thread-safe but it must be initialized on the // main-thread. FileReader needs it, so, let's initialize it now.
nsresult rv;
nsCOMPtr<nsIStreamTransportService> sts =
do_GetService(kStreamTransportServiceCID, &rv);
NS_ENSURE_TRUE(sts, NS_ERROR_FAILURE);
if (NS_FAILED(obs->AddObserver(this, GC_REQUEST_OBSERVER_TOPIC, false))) {
NS_WARNING("Failed to register for GC request notifications!");
}
if (NS_FAILED(obs->AddObserver(this, CC_REQUEST_OBSERVER_TOPIC, false))) {
NS_WARNING("Failed to register for CC request notifications!");
}
if (NS_FAILED(
obs->AddObserver(this, MEMORY_PRESSURE_OBSERVER_TOPIC, false))) {
NS_WARNING("Failed to register for memory pressure notifications!");
}
if (NS_FAILED(
obs->AddObserver(this, NS_IOSERVICE_OFFLINE_STATUS_TOPIC, false))) {
NS_WARNING("Failed to register for offline notification event!");
}
MOZ_ASSERT(!gRuntimeServiceDuringInit, "This should be false!");
gRuntimeServiceDuringInit = true;
MOZ_ASSERT(!mShuttingDown); // That's it, no more workers.
mShuttingDown = true;
nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
NS_WARNING_ASSERTION(obs, "Failed to get observer service?!");
// Tell anyone that cares that they're about to lose worker support. if (obs && NS_FAILED(obs->NotifyObservers(nullptr, WORKERS_SHUTDOWN_TOPIC,
nullptr))) {
NS_WARNING("NotifyObservers failed!");
}
{
AutoTArray<WorkerPrivate*, 100> workers;
{
MutexAutoLock lock(mMutex);
AddAllTopLevelWorkersToArray(workers);
}
// Cancel all top-level workers. for (constauto& worker : workers) { if (!worker->Cancel()) {
NS_WARNING("Failed to cancel worker!");
}
}
}
sDefaultJSSettings = nullptr;
}
namespace {
class DumpCrashInfoRunnable final : public WorkerControlRunnable { public: explicit DumpCrashInfoRunnable(WorkerPrivate* aWorkerPrivate)
: WorkerControlRunnable("DumpCrashInfoRunnable"),
mMonitor("DumpCrashInfoRunnable::mMonitor"),
mWorkerPrivate(aWorkerPrivate) {}
if (!Dispatch(mWorkerPrivate)) { // The worker is already dead but the main thread still didn't remove it // from RuntimeService's registry. returnfalse;
}
// To avoid any possibility of process hangs we never receive reports on // we give the worker 1sec to react.
lock.Wait(TimeDuration::FromMilliseconds(1000)); if (!mHasMsg) {
mMsg.Append("NoResponse");
mHasMsg.Flip();
} returntrue;
}
// This string will be leaked.
MOZ_CRASH_UNSAFE(strdup(msg.BeginReading()));
}
// This spins the event loop until all workers are finished and their threads // have been joined. void RuntimeService::Cleanup() {
AssertIsOnMainThread();
if (!mShuttingDown) {
Shutdown();
}
nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
NS_WARNING_ASSERTION(obs, "Failed to get observer service?!");
if (!workers.IsEmpty()) {
nsIThread* currentThread = NS_GetCurrentThread();
NS_ASSERTION(currentThread, "This should never be null!");
// If the loop below takes too long, we probably have a problematic // worker. MOZ_LOG some info before the parent process forcibly // terminates us so that in the event we are a content process, the log // output can provide useful context about the workers that did not // cleanly shut down.
nsCOMPtr<nsITimer> timer;
RefPtr<RuntimeService> self = this;
nsresult rv = NS_NewTimerWithCallback(
getter_AddRefs(timer),
[self](nsITimer*) { self->DumpRunningWorkers(); },
TimeDuration::FromSeconds(1), nsITimer::TYPE_ONE_SHOT, "RuntimeService::WorkerShutdownDump");
Unused << NS_WARN_IF(NS_FAILED(rv));
// And make sure all their final messages have run and all their threads // have joined. while (mDomainMap.Count()) {
MutexAutoUnlock unlock(mMutex);
if (!NS_ProcessNextEvent(currentThread)) {
NS_WARNING("Something bad happened!"); break;
}
}
if (NS_SUCCEEDED(rv)) {
timer->Cancel();
}
}
}
NS_ASSERTION(!mWindowMap.Count(), "All windows should have been released!");
if (obs) { if (NS_FAILED(obs->RemoveObserver(this, GC_REQUEST_OBSERVER_TOPIC))) {
NS_WARNING("Failed to unregister for GC request notifications!");
}
if (NS_FAILED(obs->RemoveObserver(this, CC_REQUEST_OBSERVER_TOPIC))) {
NS_WARNING("Failed to unregister for CC request notifications!");
}
if (NS_FAILED(
obs->RemoveObserver(this, MEMORY_PRESSURE_OBSERVER_TOPIC))) {
NS_WARNING("Failed to unregister for memory pressure notifications!");
}
if (NS_FAILED(
obs->RemoveObserver(this, NS_IOSERVICE_OFFLINE_STATUS_TOPIC))) {
NS_WARNING("Failed to unregister for offline notification event!");
}
obs->RemoveObserver(this, NS_XPCOM_SHUTDOWN_THREADS_OBSERVER_ID);
obs->RemoveObserver(this, NS_XPCOM_SHUTDOWN_OBSERVER_ID);
mObserved = false;
}
}
nsLayoutStatics::Release();
}
void RuntimeService::AddAllTopLevelWorkersToArray(
nsTArray<WorkerPrivate*>& aWorkers) { for (constauto& aData : mDomainMap.Values()) { #ifdef DEBUG for (constauto& activeWorker : aData->mActiveWorkers) {
MOZ_ASSERT(!activeWorker->GetParent(), "Shouldn't have a parent in this list!");
} for (constauto& activeServiceWorker : aData->mActiveServiceWorkers) {
MOZ_ASSERT(!activeServiceWorker->GetParent(), "Shouldn't have a parent in this list!");
} #endif
uint32_t RuntimeService::ClampedHardwareConcurrency( bool aShouldResistFingerprinting) const { // The Firefox Hardware Report says 70% of Firefox users have exactly 2 cores. // When the resistFingerprinting pref is set, we want to blend into the crowd // so spoof navigator.hardwareConcurrency = 2 to reduce user uniqueness. if (MOZ_UNLIKELY(aShouldResistFingerprinting)) { return 2;
}
// This needs to be atomic, because multiple workers, and even mainthread, // could race to initialize it at once. static Atomic<uint32_t> unclampedHardwareConcurrency;
// No need to loop here: if compareExchange fails, that just means that some // other worker has initialized numberOfProcessors, so we're good to go. if (!unclampedHardwareConcurrency) {
int32_t numberOfProcessors = 0; #ifdefined(XP_MACOSX) if (nsMacUtilsImpl::IsTCSMAvailable()) { // On failure, zero is returned from GetPhysicalCPUCount() // and we fallback to PR_GetNumberOfProcessors below.
numberOfProcessors = nsMacUtilsImpl::GetPhysicalCPUCount();
} #endif if (numberOfProcessors == 0) {
numberOfProcessors = PR_GetNumberOfProcessors();
} if (numberOfProcessors <= 0) {
numberOfProcessors = 1; // Must be one there somewhere
}
Unused << unclampedHardwareConcurrency.compareExchange(0,
numberOfProcessors);
}
if (!strcmp(aTopic, NS_XPCOM_SHUTDOWN_OBSERVER_ID)) {
Shutdown(); return NS_OK;
} if (!strcmp(aTopic, NS_XPCOM_SHUTDOWN_THREADS_OBSERVER_ID)) {
Cleanup(); return NS_OK;
} if (!strcmp(aTopic, GC_REQUEST_OBSERVER_TOPIC)) {
GarbageCollectAllWorkers(/* shrinking = */ false); return NS_OK;
} if (!strcmp(aTopic, CC_REQUEST_OBSERVER_TOPIC)) {
CycleCollectAllWorkers(); return NS_OK;
} if (!strcmp(aTopic, MEMORY_PRESSURE_OBSERVER_TOPIC)) {
nsDependentString data(aData); // Don't continue to GC/CC if we are in an ongoing low-memory state since // its very slow and it likely won't help us anyway. if (data.EqualsLiteral(LOW_MEMORY_ONGOING_DATA)) { return NS_OK;
} if (data.EqualsLiteral(LOW_MEMORY_DATA)) {
SetLowMemoryStateAllWorkers(true);
}
GarbageCollectAllWorkers(/* shrinking = */ true);
CycleCollectAllWorkers();
MemoryPressureAllWorkers(); return NS_OK;
} if (!strcmp(aTopic, MEMORY_PRESSURE_STOP_OBSERVER_TOPIC)) {
SetLowMemoryStateAllWorkers(false); return NS_OK;
} if (!strcmp(aTopic, NS_IOSERVICE_OFFLINE_STATUS_TOPIC)) {
SendOfflineStatusChangeEventToAllWorkers(NS_IsOffline()); return NS_OK;
}
void RuntimeService::DumpRunningWorkers() { // Temporarily set the LogLevel high enough to be certain the messages are // visible.
LogModule* module = gWorkerShutdownDumpLog;
LogLevel prevLevel = module->Level();
if (prevLevel < LogLevel::Debug) {
module->SetLevel(LogLevel::Debug);
--> --------------------
--> maximum size reached
--> --------------------
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.40Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.