/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef Mutex_h #define Mutex_h
#ifdefined(XP_WIN) # include <windows.h> #else # include <pthread.h> #endif #ifdefined(XP_DARWIN) # include <os/lock.h> #endif
// Mutexes based on spinlocks. We can't use normal pthread spinlocks in all // places, because they require malloc()ed memory, which causes bootstrapping // issues in some cases. We also can't use constructors, because for statics, // they would fire after the first use of malloc, resetting the locks. struct MOZ_CAPABILITY("mutex") Mutex { #ifdefined(XP_WIN)
CRITICAL_SECTION mMutex; #elifdefined(XP_DARWIN)
os_unfair_lock mMutex; #else
pthread_mutex_t mMutex; #endif
inlinevoid Lock() MOZ_CAPABILITY_ACQUIRE() { #ifdefined(XP_WIN)
EnterCriticalSection(&mMutex); #elifdefined(XP_DARWIN) // We rely on a non-public function to improve performance here. // The OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION flag informs the kernel that // the calling thread is able to make progress even in absence of actions // from other threads and the OS_UNFAIR_LOCK_ADAPTIVE_SPIN one causes the // kernel to spin on a contested lock if the owning thread is running on // the same physical core (presumably only on x86 CPUs given that ARM // macs don't have cores capable of SMT).
os_unfair_lock_lock_with_options(
&mMutex,
OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | OS_UNFAIR_LOCK_ADAPTIVE_SPIN); #else
pthread_mutex_lock(&mMutex); #endif
}
// Mutex that can be used for static initialization. // On Windows, CRITICAL_SECTION requires a function call to be initialized, // but for the initialization lock, a static initializer calling the // function would be called too late. We need no-function-call // initialization, which SRWLock provides. // Ideally, we'd use the same type of locks everywhere, but SRWLocks // everywhere incur a performance penalty. See bug 1418389. #ifdefined(XP_WIN) struct MOZ_CAPABILITY("mutex") StaticMutex {
SRWLOCK mMutex;
#ifndef XP_WIN // Re initialise after fork(), assumes that mDoLock is already initialised. void Reinit(pthread_t aForkingThread) { if (mDoLock == MUST_LOCK) {
Mutex::Init(); return;
} # ifdef MOZ_DEBUG // If this is an eluded lock we can only safely re-initialise it if the // thread that called fork is the one that owns the lock. if (pthread_equal(mThreadId, aForkingThread)) {
mThreadId = GetThreadId();
Mutex::Init();
} else { // We can't guantee that whatever resource this lock protects (probably a // jemalloc arena) is in a consistent state.
mDeniedAfterFork = true;
} # endif
} #endif
inlinevoid Lock() MOZ_CAPABILITY_ACQUIRE() { if (ShouldLock()) {
Mutex::Lock();
}
}
inlinevoid Unlock() MOZ_CAPABILITY_RELEASE() { if (ShouldLock()) {
Mutex::Unlock();
}
}
// Return true if we can use this resource from this thread, either because // we'll use the lock or because this is the only thread that will access the // protected resource. #ifdef MOZ_DEBUG bool SafeOnThisThread() const { return mDoLock == MUST_LOCK || ThreadIdEqual(GetThreadId(), mThreadId);
} #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.