// vim:set sw=2 sts=2 et cin: /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
NS_ASSERTION(!gSocketTransportService, "must not instantiate twice");
gSocketTransportService = this;
// The Poll list always has an entry at [0]. The rest of the // list is a duplicate of the Active list's PRFileDesc file descriptors.
PRPollDesc entry = {nullptr, PR_POLL_READ | PR_POLL_EXCEPT, 0};
mPollList.InsertElementAt(0, entry);
}
// Reverse the array to make later rules override earlier rules. for (autoconst& portMapping : Reversed(*mPortRemapping)) { if (*aPort < std::get<0>(portMapping)) { continue;
} if (*aPort > std::get<1>(portMapping)) { continue;
}
tokenizer.SkipWhites(); if (tokenizer.CheckChar(',')) { continue; // another port or port range is expected
}
if (tokenizer.CheckChar('=')) {
uint16_t targetPort;
tokenizer.SkipWhites(); if (!tokenizer.ReadInteger(&targetPort)) { break;
}
// Storing reversed, because the most common cases (like 443) will very // likely be listed as first, less common cases will be added to the end // of the list mapping to the same port. As we iterate the whole // remapping array from the end, this may have a small perf win by // hitting the most common cases earlier. for (autoconst& range : Reversed(ranges)) {
portRemapping.AppendElement(std::make_tuple(
std::get<0>(range), std::get<1>(range), targetPort));
}
ranges.Clear();
tokenizer.SkipWhites(); if (tokenizer.CheckChar(';')) { continue; // more mappings (or EOF) expected
} if (tokenizer.CheckEOF()) { returntrue;
}
}
// Anything else is unexpected. break;
}
// 'break' from the parsing loop means ill-formed preference
portRemapping.Clear(); returnfalse;
};
bool rv = consumePreference();
if (!IsOnCurrentThread()) {
nsCOMPtr<nsIThread> thread = GetThreadSafely(); if (!thread) { // Init hasn't been called yet. Could probably just assert. // If shutdown, the dispatch below will just silently fail.
NS_ASSERTION(false, "ApplyPortRemapPreference before STS::Init"); returnfalse;
}
thread->Dispatch(NewRunnableMethod<TPortRemapping>( "net::ApplyPortRemapping", this,
&nsSocketTransportService::ApplyPortRemapPreference, portRemapping));
} else {
ApplyPortRemapPreference(portRemapping);
}
nsCOMPtr<nsIThread> thread = GetThreadSafely();
nsresult rv;
rv = thread ? thread->Dispatch(event_ref.forget(), flags)
: NS_ERROR_NOT_INITIALIZED; if (rv == NS_ERROR_UNEXPECTED) { // Thread is no longer accepting events. We must have just shut it // down on the main thread. Pretend we never saw it.
rv = NS_ERROR_NOT_INITIALIZED;
} return rv;
}
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
if (!CanAttachSocket()) { return NS_ERROR_NOT_AVAILABLE;
}
SocketContext sock{fd, handler, 0};
AddToIdleList(&sock); return NS_OK;
}
// the number of sockets that can be attached at any given time is // limited. this is done because some operating systems (e.g., Win9x) // limit the number of sockets that can be created by an application. // AttachSocket will fail if the limit is exceeded. consumers should // call CanAttachSocket and check the result before creating a socket.
bool nsSocketTransportService::CanAttachSocket() {
MOZ_ASSERT(!mShuttingDown);
uint32_t total = mActiveList.Length() + mIdleList.Length(); bool rv = total < gMaxCount;
{ // inform the handler that this socket is going away
sock->mHandler->OnSocketDetached(sock->mFD);
}
mSentBytesCount += sock->mHandler->ByteCountSent();
mReceivedBytesCount += sock->mHandler->ByteCountReceived();
// // notify the first element on the pending socket queue... //
nsCOMPtr<nsIRunnable> event;
LinkedRunnableEvent* runnable = mPendingSocketQueue.getFirst(); if (runnable) {
event = runnable->TakeEvent();
runnable->remove(); delete runnable;
} if (event) { // move event from pending queue to dispatch queue return Dispatch(event, NS_DISPATCH_NORMAL);
} return NS_OK;
}
// Returns the index of a SocketContext within a list, or -1 if it's // not a pointer to a list element // NOTE: this could be supplied by nsTArray<>
int64_t nsSocketTransportService::SockIndex(SocketContextList& aList,
SocketContext* aSock) {
ptrdiff_t index = -1; if (!aList.IsEmpty()) {
index = aSock - &aList[0]; if (index < 0 || (size_t)index + 1 > aList.Length()) {
index = -1;
}
} return (int64_t)index;
}
// If there are pending events for this thread then // DoPollIteration() should service the network without blocking. bool pendingEvents = false;
mRawThread->HasPendingEvents(&pendingEvents);
if ((ts - mLastNetworkLinkChangeTime) < mNetworkLinkChangeBusyWaitPeriod) { // Being here means we are few seconds after a network change has // been detected.
PRIntervalTime to = mNetworkLinkChangeBusyWaitTimeout; if (to) {
pollTimeout = std::min(to, pollTimeout);
SOCKET_LOG((" timeout shorthened after network change event"));
}
}
TimeStamp pollStart; if (Telemetry::CanRecordPrereleaseData()) {
pollStart = TimeStamp::NowLoRes();
}
int32_t n;
{ #ifdef MOZ_GECKO_PROFILER
TimeStamp startTime = TimeStamp::Now(); if (pollTimeout != PR_INTERVAL_NO_WAIT) { // There will be an actual non-zero wait, let the profiler know about it // by marking thread as sleeping around the polling call.
profiler_thread_sleep();
} #endif
n = PR_Poll(firstPollEntry, pollCount, pollTimeout);
const uint32_t kWindowsThreadStackSize = 512 * 1024; // We can remove this custom stack size when DEFAULT_STACK_SIZE is increased.
static_assert(kWindowsThreadStackSize > nsIThreadManager::DEFAULT_STACK_SIZE); return kWindowsThreadStackSize; #else return nsIThreadManager::DEFAULT_STACK_SIZE; #endif
}
// called from main thread only
NS_IMETHODIMP
nsSocketTransportService::Init() { if (!NS_IsMainThread()) {
NS_ERROR("wrong thread"); return NS_ERROR_UNEXPECTED;
}
if (mInitialized) { return NS_OK;
}
if (mShuttingDown) { return NS_ERROR_UNEXPECTED;
}
nsCOMPtr<nsIThread> thread;
if (!XRE_IsContentProcess() ||
StaticPrefs::network_allow_raw_sockets_in_content_processes_AtStartup()) { // Since we Poll, we can't use normal LongTask support in Main Process
nsresult rv = NS_NewNamedThread( "Socket Thread", getter_AddRefs(thread), this,
{GetThreadStackSize(), false, false, Some(SOCKET_THREAD_LONGTASK_MS)});
NS_ENSURE_SUCCESS(rv, rv);
} else { // In the child process, we just want a regular nsThread with no socket // polling. So we don't want to run the nsSocketTransportService runnable on // it.
nsresult rv =
NS_NewNamedThread("Socket Thread", getter_AddRefs(thread), nullptr,
{nsIThreadManager::DEFAULT_STACK_SIZE, false, false,
Some(SOCKET_THREAD_LONGTASK_MS)});
NS_ENSURE_SUCCESS(rv, rv);
// Set up some of the state that nsSocketTransportService::Run would set.
PRThread* prthread = nullptr;
thread->GetPRThread(&prthread);
gSocketThread = prthread;
mRawThread = thread;
}
{
MutexAutoLock lock(mLock); // Install our mThread, protecting against concurrent readers
thread.swap(mThread);
mDirectTaskDispatcher = do_QueryInterface(mThread);
MOZ_DIAGNOSTIC_ASSERT(
mDirectTaskDispatcher, "Underlying thread must support direct task dispatching");
}
nsCOMPtr<nsIObserverService> obsSvc = services::GetObserverService(); // Note that the observr notifications are forwarded from parent process to // socket process. We have to make sure the topics registered below are also // registered in nsIObserver::Init(). if (obsSvc) {
obsSvc->AddObserver(this, "last-pb-context-exited", false);
obsSvc->AddObserver(this, NS_WIDGET_SLEEP_OBSERVER_TOPIC, true);
obsSvc->AddObserver(this, NS_WIDGET_WAKE_OBSERVER_TOPIC, true);
obsSvc->AddObserver(this, "xpcom-shutdown-threads", false);
obsSvc->AddObserver(this, NS_NETWORK_LINK_TOPIC, false);
}
// We can now dispatch tasks to the socket thread.
mInitialized = true; return NS_OK;
}
// called from main thread only
NS_IMETHODIMP
nsSocketTransportService::Shutdown(bool aXpcomShutdown) {
SOCKET_LOG(("nsSocketTransportService::Shutdown\n"));
NS_ENSURE_STATE(NS_IsMainThread());
if (!mInitialized || mShuttingDown) { // We never inited, or shutdown has already started return NS_OK;
}
{ auto observersCopy = mShutdownObservers; for (auto& observer : observersCopy) {
observer->Observe();
}
}
mShuttingDown = true;
{
MutexAutoLock lock(mLock);
if (mPollableEvent) {
mPollableEvent->Signal();
}
}
// If we're shutting down due to going offline (rather than due to XPCOM // shutdown), also tear down the thread. The thread will be shutdown during // xpcom-shutdown-threads if during xpcom-shutdown proper. if (!aXpcomShutdown) {
ShutdownThread();
}
// join with thread
nsCOMPtr<nsIThread> thread = GetThreadSafely();
thread->Shutdown();
{
MutexAutoLock lock(mLock); // Drop our reference to mThread and make sure that any concurrent readers // are excluded
mThread = nullptr;
mDirectTaskDispatcher = nullptr;
}
NS_IMETHODIMP
nsSocketTransportService::SetOffline(bool offline) {
MutexAutoLock lock(mLock); if (!mOffline && offline) { // signal the socket thread to go offline, so it will detach sockets
mGoingOffline = true;
mOffline = true;
} elseif (mOffline && !offline) {
mOffline = false;
} if (mPollableEvent) {
mPollableEvent->Signal();
}
NS_IMETHODIMP
nsSocketTransportService::OnDispatchedEvent() { #ifndef XP_WIN // On windows poll can hang and this became worse when we introduced the // patch for bug 698882 (see also bug 1292181), therefore we reverted the // behavior on windows to be as before bug 698882, e.g. write to the socket // also if an event dispatch is on the socket thread and writing to the // socket for each event. if (OnSocketThread()) { // this check is redundant to one done inside ::Signal(), but // we can do it here and skip obtaining the lock - given that // this is a relatively common occurance its worth the // redundant code
SOCKET_LOG(("OnDispatchedEvent Same Thread Skip Signal\n")); return NS_OK;
} #else if (gIOService->IsNetTearingDown()) { // Poll can hang sometimes. If we are in shutdown, we are going to // start a watchdog. If we do not exit poll within // REPAIR_POLLABLE_EVENT_TIME signal a pollable event again.
StartPollWatchdog();
} #endif
MutexAutoLock lock(mLock); if (mPollableEvent) {
mPollableEvent->Signal();
} return NS_OK;
}
#ifdefined(XP_WIN) // see bug 1361495, gethostname() triggers winsock initialization. // so do it here (on parent and child) to protect against it being done first // accidentally on the main thread.. especially via PR_GetSystemInfo(). This // will also improve latency of first real winsock operation // .. // If STS-thread is no longer needed this should still be run before exiting
{ // See bug 1843384: // Avoid blocking the main thread by allocating the PollableEvent outside // the mutex. Still has the potential to hang the socket thread, but the // main thread remains responsive.
PollableEvent* pollable = new PollableEvent();
MutexAutoLock lock(mLock);
mPollableEvent.reset(pollable);
// // NOTE: per bug 190000, this failure could be caused by Zone-Alarm // or similar software. // // NOTE: per bug 191739, this failure could also be caused by lack // of a loopback device on Windows and OS/2 platforms (it creates // a loopback socket pair on these platforms to implement a pollable // event object). if we can't create a pollable event, then we'll // have to "busy wait" to implement the socket event queue :-( // if (!mPollableEvent->Valid()) {
mPollableEvent = nullptr;
NS_WARNING("running socket transport thread without a pollable event");
SOCKET_LOG(("running socket transport thread without a pollable event"));
}
// Ensure a call to GetCurrentSerialEventTarget() returns this event target.
SerialEventTargetGuard guard(this);
// hook ourselves up to observe event processing for this thread
nsCOMPtr<nsIThreadInternal> threadInt = do_QueryInterface(mRawThread);
threadInt->SetObserver(this);
// make sure the pseudo random number generator is seeded on this thread
srand(static_cast<unsigned>(PR_Now()));
// For the calculation of the duration of the last cycle (i.e. the last // for-loop iteration before shutdown).
TimeStamp startOfCycleForLastCycleCalc;
// For measuring of the poll iteration duration without time spent blocked // in poll().
TimeStamp pollCycleStart; // Time blocked in poll().
TimeDuration singlePollDuration;
// For calculating the time needed for a new element to run.
TimeStamp startOfIteration;
TimeStamp startOfNextIteration;
// If there is too many pending events queued, we will run some poll() // between them and the following variable is cumulative time spent // blocking in poll().
TimeDuration pollDuration;
for (;;) { bool pendingEvents = false; if (Telemetry::CanRecordPrereleaseData()) {
startOfCycleForLastCycleCalc = TimeStamp::NowLoRes();
startOfNextIteration = TimeStamp::NowLoRes();
}
pollDuration = nullptr; // We pop out to this loop when there are no pending events. // If we don't reset these, we may not re-enter ProcessNextEvent() // until we have events to process, and it may seem like we have // an event running for a very long time.
mRawThread->SetRunningEventDelay(TimeDuration(), TimeStamp());
do { if (Telemetry::CanRecordPrereleaseData()) {
pollCycleStart = TimeStamp::NowLoRes();
}
mRawThread->HasPendingEvents(&pendingEvents); if (pendingEvents) { if (!mServingPendingQueue) {
nsresult rv = Dispatch(
NewRunnableMethod( "net::nsSocketTransportService::" "MarkTheLastElementOfPendingQueue", this,
&nsSocketTransportService::MarkTheLastElementOfPendingQueue),
nsIEventTarget::DISPATCH_NORMAL); if (NS_FAILED(rv)) {
NS_WARNING( "Could not dispatch a new event on the " "socket thread.");
} else {
mServingPendingQueue = true;
}
if (Telemetry::CanRecordPrereleaseData()) {
startOfIteration = startOfNextIteration; // Everything that comes after this point will // be served in the next iteration. If no even // arrives, startOfNextIteration will be reset at the // beginning of each for-loop.
startOfNextIteration = TimeStamp::NowLoRes();
}
}
TimeStamp eventQueueStart = TimeStamp::NowLoRes(); do {
NS_ProcessNextEvent(mRawThread);
pendingEvents = false;
mRawThread->HasPendingEvents(&pendingEvents);
} while (pendingEvents && mServingPendingQueue &&
((TimeStamp::NowLoRes() - eventQueueStart).ToMilliseconds() <
mMaxTimePerPollIter));
bool goingOffline = false; // now that our event queue is empty, check to see if we should exit if (mShuttingDown) { if (Telemetry::CanRecordPrereleaseData() &&
!startOfCycleForLastCycleCalc.IsNull()) {
glean::sts::poll_and_event_the_last_cycle.AccumulateRawDuration(
TimeStamp::NowLoRes() - startOfCycleForLastCycleCalc);
} break;
}
{
MutexAutoLock lock(mLock); if (mGoingOffline) {
mGoingOffline = false;
goingOffline = true;
}
} // Avoid potential deadlock if (goingOffline) {
Reset(true);
}
}
SOCKET_LOG(("STS shutting down thread\n"));
// detach all sockets, including locals
Reset(false);
// We don't clear gSocketThread so that OnSocketThread() won't be a false // alarm for events generated by stopping the SSL threads during shutdown.
psm::StopSSLServerCertVerificationThreads();
// Final pass over the event queue. This makes sure that events posted by // socket detach handlers get processed.
NS_ProcessPendingEvents(mRawThread);
// We can't have more than int32_max sockets in use
int32_t i, count; // // poll loop // // walk active list backwards to see if any sockets should actually be // idle, then walk the idle list backwards to see if any idle sockets // should become active. take care to check only idle sockets that // were idle to begin with ;-) //
count = mIdleList.Length(); for (i = mActiveList.Length() - 1; i >= 0; --i) { //---
SOCKET_LOG((" active [%u] { handler=%p condition=%" PRIx32 " pollflags=%hu }\n",
i, mActiveList[i].mHandler.get(), static_cast<uint32_t>(mActiveList[i].mHandler->mCondition),
mActiveList[i].mHandler->mPollFlags)); //--- if (NS_FAILED(mActiveList[i].mHandler->mCondition)) {
DetachSocket(mActiveList, &mActiveList[i]);
} else {
uint16_t in_flags = mActiveList[i].mHandler->mPollFlags; if (in_flags == 0) {
MoveToIdleList(&mActiveList[i]);
} else { // update poll flags
mPollList[i + 1].in_flags = in_flags;
mPollList[i + 1].out_flags = 0;
mActiveList[i].EnsureTimeout(now);
}
}
} for (i = count - 1; i >= 0; --i) { //---
SOCKET_LOG((" idle [%u] { handler=%p condition=%" PRIx32 " pollflags=%hu }\n",
i, mIdleList[i].mHandler.get(), static_cast<uint32_t>(mIdleList[i].mHandler->mCondition),
mIdleList[i].mHandler->mPollFlags)); //--- if (NS_FAILED(mIdleList[i].mHandler->mCondition)) {
DetachSocket(mIdleList, &mIdleList[i]);
} elseif (mIdleList[i].mHandler->mPollFlags != 0) {
MoveToPollList(&mIdleList[i]);
}
}
{
MutexAutoLock lock(mLock); if (mPollableEvent) { // we want to make sure the timeout is measured from the time // we enter poll(). This method resets the timestamp to 'now', // if we were first signalled between leaving poll() and here. // If we didn't do this and processing events took longer than // the allowed signal timeout, we would detect it as a // false-positive. AdjustFirstSignalTimestamp is then a no-op // until mPollableEvent->Clear() is called.
mPollableEvent->AdjustFirstSignalTimestamp();
}
}
// Measures seconds spent while blocked on PR_Poll
int32_t n = 0;
*pollDuration = nullptr;
if (!gIOService->IsNetTearingDown()) { // Let's not do polling during shutdown. #ifdefined(XP_WIN)
StartPolling(); #endif
n = Poll(pollDuration, now); #ifdefined(XP_WIN)
EndPolling(); #endif
}
now = PR_IntervalNow(); #ifdef MOZ_GECKO_PROFILER
TimeStamp startTime; bool profiling = profiler_thread_is_being_profiled_for_markers(); if (profiling) {
startTime = TimeStamp::Now();
} #endif
if (n < 0) {
SOCKET_LOG((" PR_Poll error [%d] os error [%d]\n", PR_GetError(),
PR_GetOSError()));
} else { // // service "active" sockets... // for (i = 0; i < int32_t(mActiveList.Length()); ++i) {
PRPollDesc& desc = mPollList[i + 1];
SocketContext& s = mActiveList[i]; if (n > 0 && desc.out_flags != 0) {
s.DisengageTimeout();
s.mHandler->OnSocketReady(desc.fd, desc.out_flags);
} elseif (s.IsTimedOut(now)) {
SOCKET_LOG(("socket %p timed out", s.mHandler.get()));
s.DisengageTimeout();
s.mHandler->OnSocketReady(desc.fd, -1);
} else {
s.MaybeResetEpoch();
}
} // // check for "dead" sockets and remove them (need to do this in // reverse order obviously). // for (i = mActiveList.Length() - 1; i >= 0; --i) { if (NS_FAILED(mActiveList[i].mHandler->mCondition)) {
DetachSocket(mActiveList, &mActiveList[i]);
}
}
{
MutexAutoLock lock(mLock); // acknowledge pollable event (should not block) if (n != 0 &&
(mPollList[0].out_flags & (PR_POLL_READ | PR_POLL_EXCEPT)) &&
mPollableEvent &&
((mPollList[0].out_flags & PR_POLL_EXCEPT) ||
!mPollableEvent->Clear())) { // On Windows, the TCP loopback connection in the // pollable event may become broken when a laptop // switches between wired and wireless networks or // wakes up from hibernation. We try to create a // new pollable event. If that fails, we fall back // on "busy wait".
TryRepairPollableEvent();
}
// If the pref is set, honor it. 0 means use OS defaults.
nsresult rv = Preferences::GetInt(SEND_BUFFER_PREF, &bufferSize); if (NS_SUCCEEDED(rv)) {
mSendBufferSize = bufferSize; return;
}
// Notify each socket that keepalive has been en/disabled globally. for (int32_t i = mActiveList.Length() - 1; i >= 0; --i) {
NotifyKeepaliveEnabledPrefChange(&mActiveList[i]);
} for (int32_t i = mIdleList.Length() - 1; i >= 0; --i) {
NotifyKeepaliveEnabledPrefChange(&mIdleList[i]);
}
}
void nsSocketTransportService::NotifyKeepaliveEnabledPrefChange(
SocketContext* sock) {
MOZ_ASSERT(sock, "SocketContext cannot be null!");
MOZ_ASSERT(sock->mHandler, "SocketContext does not have a handler!");
void nsSocketTransportService::ClosePrivateConnections() {
MOZ_ASSERT(IsOnCurrentThread(), "Must be called on the socket thread");
for (int32_t i = mActiveList.Length() - 1; i >= 0; --i) { if (mActiveList[i].mHandler->mIsPrivate) {
DetachSocket(mActiveList, &mActiveList[i]);
}
} for (int32_t i = mIdleList.Length() - 1; i >= 0; --i) { if (mIdleList[i].mHandler->mIsPrivate) {
DetachSocket(mIdleList, &mIdleList[i]);
}
}
}
#ifdefined(XP_UNIX) && !defined(AIX) && !defined(NEXTSTEP) && !defined(QNX) // On unix and os x network sockets and file // descriptors are the same. OS X comes defaulted at 256, // most linux at 1000. We can reliably use [sg]rlimit to // query that and raise it if needed.
struct rlimit rlimitData{}; if (getrlimit(RLIMIT_NOFILE, &rlimitData) == -1) { // rlimit broken - use min return PR_SUCCESS;
}
if (rlimitData.rlim_cur >= SOCKET_LIMIT_TARGET) { // larger than target!
gMaxCount = SOCKET_LIMIT_TARGET; return PR_SUCCESS;
}
int32_t maxallowed = rlimitData.rlim_max; if ((uint32_t)maxallowed <= SOCKET_LIMIT_MIN) { return PR_SUCCESS; // so small treat as if rlimit is broken
}
if ((maxallowed == -1) || // no hard cap - ok to set target
((uint32_t)maxallowed >= SOCKET_LIMIT_TARGET)) {
maxallowed = SOCKET_LIMIT_TARGET;
}
#elifdefined(XP_WIN) && !defined(WIN_CE) // >= XP is confirmed to have at least 1000
static_assert(SOCKET_LIMIT_TARGET <= 1000, "SOCKET_LIMIT_TARGET max value is 1000");
gMaxCount = SOCKET_LIMIT_TARGET; #else // other platforms are harder to test - so leave at safe legacy value #endif
return PR_SUCCESS;
}
// Used to return connection info to Dashboard.cpp void nsSocketTransportService::AnalyzeConnection(nsTArray<SocketInfo>* data,
SocketContext* context, bool aActive) { if (context->mHandler->mIsPrivate) { return;
}
PRFileDesc* aFD = context->mFD;
#ifdefined(XP_WIN) void nsSocketTransportService::StartPollWatchdog() { // Start off the timer from a runnable off of the main thread in order to // avoid a deadlock, see bug 1370448.
RefPtr<nsSocketTransportService> self(this);
NS_DispatchToMainThread(NS_NewRunnableFunction( "nsSocketTransportService::StartPollWatchdog", [self] {
MutexAutoLock lock(self->mLock);
// Poll can hang sometimes. If we are in shutdown, we are going to start // a watchdog. If we do not exit poll within REPAIR_POLLABLE_EVENT_TIME // signal a pollable event again. if (gIOService->IsNetTearingDown() && self->mPolling &&
!self->mPollRepairTimer) {
NS_NewTimerWithObserver(getter_AddRefs(self->mPollRepairTimer), self,
REPAIR_POLLABLE_EVENT_TIME,
nsITimer::TYPE_REPEATING_SLACK);
}
}));
}
PollableEvent* pollable = nullptr;
{ // Bug 1719046: In certain cases PollableEvent constructor can hang // when callign PR_NewTCPSocketPair. // We unlock the mutex to prevent main thread hangs acquiring the lock.
MutexAutoUnlock unlock(mLock);
pollable = new PollableEvent();
}
NS_WARNING("Trying to repair mPollableEvent");
mPollableEvent.reset(pollable); if (!mPollableEvent->Valid()) {
mPollableEvent = nullptr;
}
SOCKET_LOG(
("running socket transport thread without " "a pollable event now valid=%d",
!!mPollableEvent));
mPollList[0].fd = mPollableEvent ? mPollableEvent->PollableFD() : nullptr;
mPollList[0].in_flags = PR_POLL_READ | PR_POLL_EXCEPT;
mPollList[0].out_flags = 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.