/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
using mozilla::dom::quota::ClientDirectoryLock; using mozilla::dom::quota::CloneFileAndAppend;
namespace {
/** * Note: The aCommitHook argument will be invoked while a lock is held. Callers * should be careful not to pass a hook that might lock on something else and * trigger a deadlock.
*/ template <typename Callable>
nsresult MaybeUpdatePaddingFile(nsIFile* aBaseDir, mozIStorageConnection* aConn, const int64_t aIncreaseSize, const int64_t aDecreaseSize,
Callable aCommitHook) {
MOZ_ASSERT(!NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(aBaseDir);
MOZ_DIAGNOSTIC_ASSERT(aConn);
MOZ_DIAGNOSTIC_ASSERT(aIncreaseSize >= 0);
MOZ_DIAGNOSTIC_ASSERT(aDecreaseSize >= 0);
// An Action that is executed when a Context is first created. It ensures that // the directory and database are setup properly. This lets other actions // not worry about these details. class SetupAction final : public SyncDBAction { public:
SetupAction() : SyncDBAction(DBAction::Create) {}
// executes in its own transaction
QM_TRY(MOZ_TO_RESULT(db::CreateOrMigrateSchema(*aDBDir, *aConn)));
// If the Context marker file exists, then the last session was // not cleanly shutdown. In these cases sqlite will ensure that // the database is valid, but we might still orphan data. Both // Cache objects and body files can be referenced by DOM objects // after they are "removed" from their parent. So we need to // look and see if any of these late access objects have been // orphaned. // // Note, this must be done after any schema version updates to // ensure our DBSchema methods work correctly. if (MarkerFileExists(aDirectoryMetadata)) {
NS_WARNING("Cache not shutdown cleanly! Cleaning up stale data...");
mozStorageTransaction trans(aConn, false,
mozIStorageConnection::TRANSACTION_IMMEDIATE);
QM_TRY(MOZ_TO_RESULT(trans.Start()));
// Clean up orphaned Cache objects
QM_TRY_INSPECT(constauto& orphanedCacheIdList,
db::FindOrphanedCacheIds(*aConn));
// Commit() explicitly here, because we want to ensure the padding file // has the correct content. // We'll restore padding file below, so just warn here if failure happens. // // XXX Before, if MaybeUpdatePaddingFile failed but we didn't enter the if // body below, we would have propagated the MaybeUpdatePaddingFile // failure, but if we entered it and RestorePaddingFile succeeded, we // would have returned NS_OK. Now, we will never propagate a // MaybeUpdatePaddingFile failure.
QM_WARNONLY_TRY(QM_TO_RESULT(
MaybeUpdatePaddingFile(aDBDir, aConn, /* aIncreaceSize */ 0,
overallDeletedPaddingSize.value(),
[&trans]() { return trans.Commit(); })));
}
if (DirectoryPaddingFileExists(*aDBDir, DirPaddingFile::TMP_FILE) ||
!DirectoryPaddingFileExists(*aDBDir, DirPaddingFile::FILE)) {
QM_TRY(MOZ_TO_RESULT(RestorePaddingFile(aDBDir, aConn)));
}
// Action that is executed when we determine that content has stopped using // a body file that has been orphaned. class DeleteOrphanedBodyAction final : public Action { public: using DeletedBodyIdList = AutoTArray<nsID, 64>;
// Singleton class to track Manager instances and ensure there is only // one for each unique ManagerId. class Manager::Factory { public: friendclass StaticAutoPtr<Manager::Factory>;
// If we get here during/after quota manager shutdown, we bail out.
MOZ_ASSERT(AppShutdown::GetCurrentShutdownPhase() <
ShutdownPhase::AppShutdownQM); if (AppShutdown::GetCurrentShutdownPhase() >=
ShutdownPhase::AppShutdownQM) {
NS_WARNING( "Attempt to AcquireCreateIfNonExistent a Manager during QM " "shutdown."); return Err(NS_ERROR_ILLEGAL_DURING_SHUTDOWN);
}
// Ensure there is a factory instance. This forces the Acquire() call // below to use the same factory.
QM_TRY(MOZ_TO_RESULT(MaybeCreateInstance()));
SafeRefPtr<Manager> ref = Acquire(*aManagerId); if (!ref) { // TODO: replace this with a thread pool (bug 1119864) // XXX Can't use QM_TRY_INSPECT because that causes a clang-plugin // error of the NoNewThreadsChecker.
nsCOMPtr<nsIThread> ioThread;
QM_TRY(MOZ_TO_RESULT(
NS_NewNamedThread("DOMCacheThread", getter_AddRefs(ioThread))));
// There may be an old manager for this origin in the process of // cleaning up. We need to tell the new manager about this so // that it won't actually start until the old manager is done. const SafeRefPtr<Manager> oldManager = Acquire(*aManagerId, Closing);
ref->Init(oldManager.maybeDeref());
// This might both happen in late shutdown such that this event // is executed even after the QuotaManager singleton passed away // or if the QuotaManager has not yet been created.
quota::QuotaManager::SafeMaybeRecordQuotaClientShutdownStep(
quota::Client::DOMCACHE, "Manager removed"_ns);
// clean up the factory singleton if there are no more managers
MaybeDestroyInstance();
}
AbortMatching([&aDirectoryLockIds](constauto& manager) { // Check if the Manager holds an acquired DirectoryLock. Origin clearing // can't be blocked by this Manager if there is no acquired DirectoryLock. // If there is an acquired DirectoryLock, check if the table contains the // lock for the Manager. return Client::IsLockForObjectAcquiredAndContainedInLockTable(
manager, aDirectoryLockIds);
});
}
{ // Note that we are synchronously calling shutdown code here. If any // of the shutdown code synchronously decides to delete the Factory // we need to delay that delete until the end of this method.
AutoRestore<bool> restore(sFactory->mInSyncAbortOrShutdown);
sFactory->mInSyncAbortOrShutdown = true;
for (constauto& manager : sFactory->mManagerList.ForwardRange()) { auto pinnedManager =
SafeRefPtr{manager.get(), AcquireStrongRefFromRawPtr{}};
pinnedManager->Shutdown();
}
}
for (constauto& manager : sFactory->mManagerList.NonObservingRange()) {
manager->Stringify(data);
}
data.Append(kStringifyEndSet); if (sFactory->mPotentiallyUnreleasedCSCP.Length() > 0) {
data.Append( "There have been CSCP instances whose" "Send__delete__ might not have freed them.");
}
}
if (!sFactory) { // We cannot use ClearOnShutdown() here because we're not on the main // thread. Instead, we delete sFactory in Factory::Remove() after the // last manager is removed. ShutdownObserver ensures this happens // before shutdown.
sFactory = new Factory();
}
// Never return sFactory to code outside Factory. We need to delete it // out from under ourselves just before we return from Remove(). This // would be (even more) dangerous if other code had a pointer to the // factory itself.
// If the factory is is still in use then we cannot delete yet. This // could be due to managers still existing or because we are in the // middle of aborting or shutting down. We need to be careful not to delete // ourself synchronously during shutdown. if (!sFactory->mManagerList.IsEmpty() || sFactory->mInSyncAbortOrShutdown) { return;
}
// Iterate in reverse to find the most recent, matching Manager. This // is important when looking for a Closing Manager. If a new Manager // chains to an old Manager we want it to be the most recent one. constauto range = Reversed(sFactory->mManagerList.NonObservingRange()); constauto foundIt = std::find_if(
range.begin(), range.end(), [aState, &aManagerId](constauto& manager) { return aState == manager->GetState() &&
*manager->mManagerId == aManagerId;
}); return foundIt != range.end()
? SafeRefPtr{foundIt->get(), AcquireStrongRefFromRawPtr{}}
: nullptr;
}
{ // Note that we are synchronously calling abort code here. If any // of the shutdown code synchronously decides to delete the Factory // we need to delay that delete until the end of this method.
AutoRestore<bool> restore(sFactory->mInSyncAbortOrShutdown);
sFactory->mInSyncAbortOrShutdown = true;
for (constauto& manager : sFactory->mManagerList.ForwardRange()) { if (aCondition(*manager)) { auto pinnedManager =
SafeRefPtr{manager.get(), AcquireStrongRefFromRawPtr{}};
pinnedManager->Abort();
}
}
}
MaybeDestroyInstance();
}
// Singleton created on demand and deleted when last Manager is cleared // in Remove(). // PBackground thread only. static StaticAutoPtr<Factory> sFactory;
// Weak references as we don't want to keep Manager objects alive forever. // When a Manager is destroyed it calls Factory::Remove() to clear itself. // PBackground thread only.
nsTObserverArray<NotNull<Manager*>> mManagerList;
// This flag is set when we are looping through the list and calling Abort() // or Shutdown() on each Manager. We need to be careful not to synchronously // trigger the deletion of the factory while still executing this loop. bool mInSyncAbortOrShutdown;
// Abstract class to help implement the various Actions. The vast majority // of Actions are synchronous and need to report back to a Listener on the // Manager. class Manager::BaseAction : public SyncDBAction { protected:
BaseAction(SafeRefPtr<Manager> aManager, ListenerId aListenerId)
: SyncDBAction(DBAction::Existing),
mManager(std::move(aManager)),
mListenerId(aListenerId) {}
// Action that is executed when we determine that content has stopped using // a Cache object that has been orphaned. class Manager::DeleteOrphanedCacheAction final : public SyncDBAction { public:
DeleteOrphanedCacheAction(SafeRefPtr<Manager> aManager, CacheId aCacheId)
: SyncDBAction(DBAction::Existing),
mManager(std::move(aManager)),
mCacheId(aCacheId) {}
virtualvoid CompleteOnInitiatingThread(nsresult aRv) override { // If the transaction fails, we shouldn't delete the body files and decrease // their padding size. if (NS_FAILED(aRv)) {
mDeletionInfo.mDeletedBodyIdList.Clear();
mDeletionInfo.mDeletedPaddingSize = 0;
}
// If we entered shutdown on the main thread while we were doing IO, // bail out now. if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdownQM)) { if (stream) {
stream->Close();
} return NS_ERROR_ABORT;
}
// If we entered shutdown on the main thread while we were doing IO, // bail out now. if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdownQM)) { if (stream) {
stream->Close();
} return NS_ERROR_ABORT;
}
// This is the most complex Action. It puts a request/response pair into the // Cache. It does not complete until all of the body data has been saved to // disk. This means its an asynchronous Action. class Manager::CachePutAllAction final : public DBAction { public:
CachePutAllAction(
SafeRefPtr<Manager> aManager, ListenerId aListenerId, CacheId aCacheId, const nsTArray<CacheRequestResponse>& aPutList, const nsTArray<nsCOMPtr<nsIInputStream>>& aRequestStreamList, const nsTArray<nsCOMPtr<nsIInputStream>>& aResponseStreamList)
: DBAction(DBAction::Existing),
mManager(std::move(aManager)),
mListenerId(aListenerId),
mCacheId(aCacheId),
mList(aPutList.Length()),
mExpectedAsyncCopyCompletions(1),
mAsyncResult(NS_OK),
mMutex("cache::Manager::CachePutAllAction"),
mUpdatedPaddingSize(0),
mDeletedPaddingSize(0) {
MOZ_DIAGNOSTIC_ASSERT(!aPutList.IsEmpty());
MOZ_DIAGNOSTIC_ASSERT(aPutList.Length() == aRequestStreamList.Length());
MOZ_DIAGNOSTIC_ASSERT(aPutList.Length() == aResponseStreamList.Length());
for (uint32_t i = 0; i < aPutList.Length(); ++i) {
Entry* entry = mList.AppendElement();
entry->mRequest = aPutList[i].request();
entry->mRequestStream = aRequestStreamList[i];
entry->mResponse = aPutList[i].response();
entry->mResponseStream = aResponseStreamList[i];
}
}
// We should be pre-initialized to expect one async completion. This is // the "manual" completion we call at the end of this method in all // cases.
MOZ_DIAGNOSTIC_ASSERT(mExpectedAsyncCopyCompletions == 1);
// File bodies are streamed to disk via asynchronous copying. Start // this copying now. Each copy will eventually result in a call // to OnAsyncCopyComplete(). const nsresult rv = [this, &aDirectoryMetadata]() -> nsresult {
QM_TRY(CollectEachInRange(
mList, [this, &aDirectoryMetadata](auto& entry) -> nsresult {
QM_TRY(MOZ_TO_RESULT(
StartStreamCopy(aDirectoryMetadata, entry, RequestStream,
&mExpectedAsyncCopyCompletions)));
// Always call OnAsyncCopyComplete() manually here. This covers the // case where there is no async copying and also reports any startup // errors correctly. If we hit an error, then OnAsyncCopyComplete() // will cancel any async copying.
OnAsyncCopyComplete(rv);
}
// Called once for each asynchronous file copy whether it succeeds or // fails. If a file copy is canceled, it still calls this method with // an error code. void OnAsyncCopyComplete(nsresult aRv) {
MOZ_ASSERT(mTarget->IsOnCurrentThread());
MOZ_DIAGNOSTIC_ASSERT(mConn);
MOZ_DIAGNOSTIC_ASSERT(mResolver);
MOZ_DIAGNOSTIC_ASSERT(mExpectedAsyncCopyCompletions > 0);
// Explicitly check for cancellation here to catch a race condition. // Consider: // // 1) NS_AsyncCopy() executes on IO thread, but has not saved its // copy context yet. // 2) CancelAllStreamCopying() occurs on PBackground thread // 3) Copy context from (1) is saved on IO thread. // // Checking for cancellation here catches this condition when we // first call OnAsyncCopyComplete() manually from RunWithDBOnTarget(). // // This explicit cancellation check also handles the case where we // are canceled just after all stream copying completes. We should // abort the synchronous DB operations in this case if we have not // started them yet. if (NS_SUCCEEDED(aRv) && IsCanceled()) {
aRv = NS_ERROR_ABORT;
}
// If any of the async copies fail, we need to still wait for them all to // complete. Cancel any other streams still working and remember the // error. All canceled streams will call OnAsyncCopyComplete(). if (NS_FAILED(aRv) && NS_SUCCEEDED(mAsyncResult)) {
CancelAllStreamCopying();
mAsyncResult = aRv;
}
// Check to see if async copying is still on-going. If so, then simply // return for now. We must wait for a later OnAsyncCopyComplete() call.
mExpectedAsyncCopyCompletions -= 1; if (mExpectedAsyncCopyCompletions > 0) { return;
}
// We have finished with all async copying. Indicate this by clearing all // our copy contexts.
{
MutexAutoLock lock(mMutex);
mCopyContextList.Clear();
}
// An error occurred while async copying. Terminate the Action. // DoResolve() will clean up any files we may have written. if (NS_FAILED(mAsyncResult)) {
DoResolve(mAsyncResult); return;
}
for (uint32_t i = 0; i < mList.Length(); ++i) {
mList[i].mRequestStream = nullptr;
mList[i].mResponseStream = nullptr;
}
// If the transaction fails, we shouldn't delete the body files and decrease // their padding size. if (NS_FAILED(aRv)) {
mDeletedBodyIdList.Clear();
mDeletedPaddingSize = 0;
}
if (copyContext) {
MutexAutoLock lock(mMutex);
mCopyContextList.AppendElement(copyContext);
}
*aCopyCountOut += 1;
return NS_OK;
}
void CancelAllStreamCopying() { // May occur on either owning thread or target thread
MutexAutoLock lock(mMutex); for (uint32_t i = 0; i < mCopyContextList.Length(); ++i) {
MOZ_DIAGNOSTIC_ASSERT(mCopyContextList[i]);
BodyCancelWrite(*mCopyContextList[i]);
}
mCopyContextList.Clear();
}
staticvoid AsyncCopyCompleteFunc(void* aClosure, nsresult aRv) { // May be on any thread, including STS event target.
MOZ_DIAGNOSTIC_ASSERT(aClosure); // Weak ref as we are guaranteed to the action is alive until // CompleteOnInitiatingThread is called.
CachePutAllAction* action = static_cast<CachePutAllAction*>(aClosure);
action->CallOnAsyncCopyCompleteOnTargetThread(aRv);
}
void CallOnAsyncCopyCompleteOnTargetThread(nsresult aRv) { // May be on any thread, including STS event target. Non-owning runnable // here since we are guaranteed the Action will survive until // CompleteOnInitiatingThread is called.
nsCOMPtr<nsIRunnable> runnable = NewNonOwningRunnableMethod<nsresult>( "dom::cache::Manager::CachePutAllAction::OnAsyncCopyComplete", this,
&CachePutAllAction::OnAsyncCopyComplete, aRv);
MOZ_ALWAYS_SUCCEEDS(
mTarget->Dispatch(runnable.forget(), nsIThread::DISPATCH_NORMAL));
}
// DoResolve() must not be called until all async copying has completed. #ifdef DEBUG
{
MutexAutoLock lock(mMutex);
MOZ_ASSERT(mCopyContextList.IsEmpty());
} #endif
// Clean up any files we might have written before hitting the error. if (NS_FAILED(aRv)) {
BodyDeleteFiles(*mDirectoryMetadata, *mDBDir, mBodyIdWrittenList); if (mUpdatedPaddingSize > 0) {
DecreaseUsageForDirectoryMetadata(*mDirectoryMetadata,
mUpdatedPaddingSize);
}
}
// Must be released on the target thread where it was opened.
mConn = nullptr;
// Drop our ref to the target thread as we are done with this thread. // Also makes our thread assertions catch any incorrect method calls // after resolve.
mTarget = nullptr;
// Make sure to de-ref the resolver per the Action API contract.
SafeRefPtr<Action::Resolver> resolver = std::move(mResolver);
resolver->Resolve(aRv);
}
// initiating thread only
SafeRefPtr<Manager> mManager; const ListenerId mListenerId;
// Set on initiating thread, read on target thread. State machine guarantees // these are not modified while being read by the target thread. const CacheId mCacheId;
nsTArray<Entry> mList;
uint32_t mExpectedAsyncCopyCompletions;
// Written to on target thread, accessed on initiating thread after target // thread activity is guaranteed complete
nsTArray<nsID> mDeletedBodyIdList;
// accessed from any thread while mMutex locked
Mutex mMutex MOZ_UNANNOTATED;
nsTArray<nsCOMPtr<nsISupports>> mCopyContextList;
Maybe<CacheDirectoryMetadata> mDirectoryMetadata; // Track how much pad amount has been added for new entries so that it can be // removed if an error occurs.
int64_t mUpdatedPaddingSize; // Track any pad amount associated with overwritten entries.
int64_t mDeletedPaddingSize;
};
virtualvoid Complete(Listener* aListener, ErrorResult&& aRv) override { // If the transaction fails, we shouldn't delete the body files and decrease // their padding size. if (aRv.Failed()) {
mDeletionInfo.mDeletedBodyIdList.Clear();
mDeletionInfo.mDeletedPaddingSize = 0;
}
// If we entered shutdown on the main thread while we were doing IO, // bail out now. if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdownQM)) { if (stream) {
stream->Close();
} return NS_ERROR_ABORT;
}
// If we entered shutdown on the main thread while we were doing IO, // bail out now. if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdownQM)) { if (stream) {
stream->Close();
} return NS_ERROR_ABORT;
}
// Don't delete the removing padding size here, we'll delete it on // DeleteOrphanedCacheAction.
QM_TRY(
MOZ_TO_RESULT(db::StorageForgetCache(*aConn, mNamespace, mArgs.key())));
QM_TRY(MOZ_TO_RESULT(trans.Commit()));
mCacheDeleted = true; return NS_OK;
}
virtualvoid Complete(Listener* aListener, ErrorResult&& aRv) override { if (mCacheDeleted) { // If content is referencing this cache, mark it orphaned to be // deleted later. if (!mManager->SetCacheIdOrphanedIfRefed(mCacheId)) { // no outstanding references, delete immediately constauto pinnedContext =
SafeRefPtr{mManager->mContext, AcquireStrongRefFromRawPtr{}};
virtualvoid Complete(Listener* aListener, ErrorResult&& aRv) override { if (aRv.Failed()) { // Ignore the reason for fail and just pass a null input stream to let it // fail.
aRv.SuppressException();
mResolver(nullptr);
} else {
mResolver(std::move(mBodyStream));
}
void Manager::RemoveListener(Listener* aListener) {
NS_ASSERT_OWNINGTHREAD(Manager); // There may not be a listener here in the case where an actor is killed // before it can perform any actual async requests on Manager.
mListeners.RemoveElement(aListener, ListenerEntryListenerComparator());
MOZ_ASSERT(
!mListeners.Contains(aListener, ListenerEntryListenerComparator()));
MaybeAllowContextToClose();
}
// Whether the Context destruction was triggered from the Manager going // idle or the underlying storage being invalidated, we should know we // are closing before the Context is destroyed.
MOZ_DIAGNOSTIC_ASSERT(mState == Closing);
// Before forgetting the Context, check to see if we have any outstanding // cache or body objects waiting for deletion. If so, note that we've // orphaned data so it will be cleaned up on the next open. if (std::any_of(
mCacheIdRefs.cbegin(), mCacheIdRefs.cend(),
[](constauto& cacheIdRef) { return cacheIdRef.mOrphaned; }) ||
std::any_of(mBodyIdRefs.cbegin(), mBodyIdRefs.cend(),
[](constauto& bodyIdRef) { return bodyIdRef.mOrphaned; })) {
aContext.NoteOrphanedData();
}
mContext = nullptr;
// Once the context is gone, we can immediately remove ourself from the // Factory list. We don't need to block shutdown by staying in the list // any more.
Factory::Remove(*this);
}
void Manager::NoteClosing() {
NS_ASSERT_OWNINGTHREAD(Manager); // This can be called more than once legitimately through different paths.
mState = Closing;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.