/* Report that a lock has been created at address "lock". */ #define ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" is about to be destroyed. */ #define ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" has been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
/* Report that the lock at address "lock" is about to be released. */ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
#ifdefined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK) #ifdefined(__GNUC__) #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak)) #else /* TODO(glider): for Windows support we may want to change this macro in order
to prepend __declspec(selectany) to the annotations' declarations. */ #error weak annotations are not supported for your compiler #endif #else #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK #endif
class SkSharedMutex::ThreadIDSet { public: // Returns true if threadID is in the set. bool find(SkThreadID threadID) const { for (auto& t : fThreadIDs) { if (t == threadID) returntrue;
} returnfalse;
}
// Returns true if did not already exist. bool tryAdd(SkThreadID threadID) { for (auto& t : fThreadIDs) { if (t == threadID) returnfalse;
}
fThreadIDs.append(1, &threadID); returntrue;
} // Returns true if already exists in Set. bool tryRemove(SkThreadID threadID) { for (int i = 0; i < fThreadIDs.size(); ++i) { if (fThreadIDs[i] == threadID) {
fThreadIDs.remove(i); returntrue;
}
} returnfalse;
}
// Implementation Detail: // The shared threads need two separate queues to keep the threads that were added after the // exclusive lock separate from the threads added before. void SkSharedMutex::release() {
ANNOTATE_RWLOCK_RELEASED(this, 1);
SkThreadID threadID(SkGetThreadID()); int sharedWaitingCount; int exclusiveWaitingCount; int sharedQueueSelect;
{
SkAutoMutexExclusive l(fMu);
SkASSERT(0 == fCurrentShared->count()); if (!fWaitingExclusive->tryRemove(threadID)) {
SkDEBUGFAILF("Thread %" PRIx64 " did not have the lock held.\n",
(uint64_t)threadID);
}
exclusiveWaitingCount = fWaitingExclusive->count();
sharedWaitingCount = fWaitingShared->count();
fWaitingShared.swap(fCurrentShared);
sharedQueueSelect = fSharedQueueSelect; if (sharedWaitingCount > 0) {
fSharedQueueSelect = 1 - fSharedQueueSelect;
}
}
int currentSharedCount; int waitingExclusiveCount;
{
SkAutoMutexExclusive l(fMu); if (!fCurrentShared->tryRemove(threadID)) {
SkDEBUGFAILF("Thread %" PRIx64 " does not hold a shared lock.\n",
(uint64_t)threadID);
}
currentSharedCount = fCurrentShared->count();
waitingExclusiveCount = fWaitingExclusive->count();
}
// The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic. // These three counts must be the same size, so each gets 10 bits. The 10 bits represent // the log of the count which is 1024. // // The three counts held in fQueueCounts are: // * Shared - the number of shared lock holders currently running. // * WaitingExclusive - the number of threads waiting for an exclusive lock. // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread // to finish. staticconstint kLogThreadCount = 10;
// If there are no other exclusive waiters and no shared threads are running then run // else wait. if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
fExclusiveQueue.wait();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
// The number of threads waiting to acquire a shared lock.
waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
// If there are any move the counts of all the shared waiters to actual shared. They are // going to run next. if (waitingShared > 0) {
// Set waiting shared to zero.
newQueueCounts &= ~kWaitingSharedMask;
// Because this is the exclusive release, then there are zero readers. So, the bits // for shared locks should be zero. Since those bits are zero, we can just |= in the // waitingShared count instead of clearing with an &= and then |= the count.
newQueueCounts |= waitingShared << kSharedOffset;
}
} while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
std::memory_order_release,
std::memory_order_relaxed));
if (waitingShared > 0) { // Run all the shared.
fSharedQueue.signal(waitingShared);
} elseif ((newQueueCounts & kWaitingExclusiveMask) > 0) { // Run a single exclusive waiter.
fExclusiveQueue.signal();
}
}
void SkSharedMutex::acquireShared() {
int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
int32_t newQueueCounts; do {
newQueueCounts = oldQueueCounts; // If there are waiting exclusives then this shared lock waits else it runs. if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
newQueueCounts += 1 << kWaitingSharedOffset;
} else {
newQueueCounts += 1 << kSharedOffset;
}
} while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
std::memory_order_acquire,
std::memory_order_relaxed));
// If there are waiting exclusives, then this shared waits until after it runs. if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
fSharedQueue.wait();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 0);
// If shared count is going to zero (because the old count == 1) and there are exclusive // waiters, then run a single exclusive waiter. if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
&& (oldQueueCounts & kWaitingExclusiveMask) > 0) {
fExclusiveQueue.signal();
}
}
#endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.