/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
nsresult PopulateFileIds(ResultConnection& aConn) { return aConn->ExecuteSimpleSQL( "INSERT OR IGNORE INTO FileIds ( fileId, handle ) " "SELECT handle, handle FROM Files " ";"_ns);
}
nsresult PopulateMainFiles(ResultConnection& aConn) { return aConn->ExecuteSimpleSQL( "INSERT OR IGNORE INTO MainFiles ( fileId, handle ) " "SELECT handle, handle FROM Files " ";"_ns);
}
Result<Ok, QMResult> ClearInvalidFileIds(
ResultConnection& aConn, data::FileSystemFileManager& aFileManager) { // We cant't just clear all file ids because if a file was accessed using // writable file stream a new file id was created which is not the same as // entry id.
// Get all file ids first.
QM_TRY_INSPECT( constauto& allFileIds,
([&aConn]() -> Result<nsTArray<FileId>, QMResult> { const nsLiteralCString allFileIdsQuery = "SELECT fileId FROM FileIds;"_ns;
Result<Ok, QMResult> ClearInvalidMainFiles(
ResultConnection& aConn, data::FileSystemFileManager& aFileManager) { // We cant't just clear all main files because if a file was accessed using // writable file stream a new main file was created which is not the same as // entry id.
// Get all main files first.
QM_TRY_INSPECT( constauto& allMainFiles,
([&aConn]() -> Result<nsTArray<std::pair<EntryId, FileId>>, QMResult> { const nsLiteralCString allMainFilesQuery = "SELECT handle, fileId FROM MainFiles;"_ns;
nsresult CreateEntryNamesView(ResultConnection& aConn) { return aConn->ExecuteSimpleSQL( "CREATE VIEW IF NOT EXISTS EntryNames AS " "SELECT isFile, handle, parent, name FROM Entries INNER JOIN ( " "SELECT 1 AS isFile, handle, name FROM Files UNION " "SELECT 0, handle, name FROM Directories ) " "USING (handle) " ";"_ns);
}
const nsLiteralCString flagTemporaryParentAsDir = "INSERT INTO Directories ( handle, name ) " "VALUES ( :tempParent, 'temp' ) ;"_ns;
const nsLiteralCString insertNewEntriesQuery = "INSERT INTO Entries ( handle, parent ) " "SELECT hash, :tempParent FROM EntryMigrationTable WHERE hash != handle " ";"_ns;
const nsLiteralCString insertNewDirectoriesQuery = "INSERT INTO Directories ( handle, name ) " "SELECT hash, name FROM EntryMigrationTable " "WHERE isFile = 0 AND hash != handle " "ORDER BY depth " ";"_ns;
const nsLiteralCString insertNewFilesQuery = "INSERT INTO Files ( handle, type, name ) " "SELECT EntryMigrationTable.hash, Files.type, EntryMigrationTable.name " "FROM EntryMigrationTable INNER JOIN Files USING (handle) " "WHERE EntryMigrationTable.isFile = 1 AND hash != handle " ";"_ns;
const nsLiteralCString updateFileMappingsQuery = "UPDATE FileIds SET handle = hash " "FROM ( SELECT handle, hash FROM EntryMigrationTable WHERE hash != " "handle ) " "AS replacement WHERE FileIds.handle = replacement.handle " ";"_ns;
const nsLiteralCString updateMainFilesQuery = "UPDATE MainFiles SET handle = hash " "FROM ( SELECT handle, hash FROM EntryMigrationTable WHERE hash != " "handle ) " "AS replacement WHERE MainFiles.handle = replacement.handle " ";"_ns;
// Now fix the parents. const nsLiteralCString updateEntryMappingsQuery = "UPDATE Entries SET parent = hash " "FROM ( SELECT Lhs.hash AS handle, Rhs.hash AS hash, Lhs.depth AS depth " "FROM EntryMigrationTable AS Lhs " "INNER JOIN EntryMigrationTable AS Rhs " "ON Rhs.handle = Lhs.parent ORDER BY depth ) AS replacement " "WHERE Entries.handle = replacement.handle " "AND Entries.parent = :tempParent " ";"_ns;
const nsLiteralCString cleanupOldEntriesQuery = "DELETE FROM Entries WHERE handle IN " "( SELECT handle FROM EntryMigrationTable WHERE hash != handle ) " ";"_ns;
const nsLiteralCString cleanupTemporaryParent = "DELETE FROM Entries WHERE handle = :tempParent ;"_ns;
const nsLiteralCString dropIndexByDepthQuery = "DROP INDEX indexOnDepth ; "_ns;
// Index is automatically deleted const nsLiteralCString cleanupTemporaries = "DROP TABLE EntryMigrationTable ;"_ns;
nsCOMPtr<mozIStorageFunction> rehashFunction = new data::FileSystemHashStorageFunction();
QM_TRY(MOZ_TO_RESULT(aConnection->CreateFunction("hashEntry"_ns, /* number of arguments */ 2,
rehashFunction))); auto finallyRemoveFunction = MakeScopeExit([&aConnection]() {
QM_WARNONLY_TRY(MOZ_TO_RESULT(aConnection->RemoveFunction("hashEntry"_ns)));
});
// We need this to make sure the old entries get removed
QM_TRY(MOZ_TO_RESULT(
aConnection->ExecuteSimpleSQL("PRAGMA foreign_keys = ON;"_ns)));
if (!wasEmpty) {
QM_TRY(QM_TO_RESULT(aConn->ExecuteSimpleSQL("VACUUM;"_ns)));
}
}
// The upgrade from version 1 to version 2 was buggy, so we have to check if // the Usages table still references the Files table which is a sign that // the upgrade wasn't complete. This extra query has only negligible perf // impact. See bug 1847989. auto UsagesTableRefsFilesTable = [&aConn]() -> Result<bool, QMResult> { const nsLiteralCString query = "SELECT pragma_foreign_key_list.'table'=='Files' " "FROM pragma_foreign_key_list('Usages');"_ns;
if (usagesTableRefsFilesTable) {
QM_TRY_UNWRAP(auto transaction, StartedTransaction::Create(aConn));
// The buggy upgrade didn't call PopulateFileIds, ConnectUsagesToFileIds // and PopulateMainFiles was completely missing. Since invalid file ids // and main files could be inserted when the profile was broken, we need // to clear them before populating.
QM_TRY(ClearInvalidFileIds(aConn, aFileManager));
QM_TRY(QM_TO_RESULT(PopulateFileIds(aConn)));
QM_TRY(QM_TO_RESULT(ConnectUsagesToFileIds(aConn)));
QM_TRY(ClearInvalidMainFiles(aConn, aFileManager));
QM_TRY(QM_TO_RESULT(PopulateMainFiles(aConn)));
// In schema version 001, entryId was unique but not necessarily related to // a path. For schema 002, we have to fix all entryIds to be derived from // the underlying path. auto OneTimeRehashingDone = [&aConn]() -> Result<bool, QMResult> { const nsLiteralCString query = "SELECT EXISTS (SELECT 1 FROM sqlite_master " "WHERE type='table' AND name='RehashedFrom001to002' ) ;"_ns;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.14Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.