/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
void AppendMemoryStorageTag(nsAutoCString& key) { // Using DEL as the very last ascii-7 character we can use in the list of // attributes
key.Append('\x7f');
key.Append(',');
}
} // namespace
// Not defining as static or class member of CacheStorageService since // it would otherwise need to include CacheEntry.h and that then would // need to be exported to make nsNetModule.cpp compilable. using GlobalEntryTables = nsClassHashtable<nsCStringHashKey, CacheEntryTable>;
/** * Keeps tables of entries. There is one entries table for each distinct load * context type. The distinction is based on following load context info * states: <isPrivate|isAnon|inIsolatedMozBrowser> which builds a mapping * key. * * Thread-safe to access, protected by the service mutex.
*/ static GlobalEntryTables* sGlobalEntryTables;
// clang-format off
MOZ_ATOMIC_BITFIELDS(mAtomicBitfields, 8, (
(bool, NotifyStorage, 1),
(bool, VisitEntries, 1)
)) // clang-format on
Atomic<bool> mCancel{false};
};
// WalkMemoryCacheRunnable // Responsible to visit memory storage and walk // all entries on it asynchronously. class WalkMemoryCacheRunnable : public WalkCacheRunnable { public:
WalkMemoryCacheRunnable(nsILoadContextInfo* aLoadInfo, bool aVisitEntries,
nsICacheStorageVisitor* aVisitor)
: WalkCacheRunnable(aVisitor, aVisitEntries) {
CacheFileUtils::AppendKeyPrefix(aLoadInfo, mContextKey);
MOZ_ASSERT(NS_IsMainThread());
}
private:
NS_IMETHOD Run() override { if (CacheStorageService::IsOnManagementThread()) {
LOG(("WalkMemoryCacheRunnable::Run - collecting [this=%p]", this)); // First, walk, count and grab all entries from the storage
if (!CacheStorageService::IsRunning()) return NS_ERROR_NOT_INITIALIZED;
// Count the entries to allocate the array memory all at once.
size_t numEntries = 0; for (constauto& entries : sGlobalEntryTables->Values()) { if (entries->Type() != CacheEntryTable::MEMORY_ONLY) { continue;
}
numEntries += entries->Values().Count();
}
mEntryArray.SetCapacity(numEntries);
// Collect the entries. for (constauto& entries : sGlobalEntryTables->Values()) { if (entries->Type() != CacheEntryTable::MEMORY_ONLY) { continue;
}
for (CacheEntry* entry : entries->Values()) {
MOZ_ASSERT(!entry->IsUsingDisk());
// Third, notify each entry until depleted or canceled. if (mNextEntryIdx >= mEntryArray.Length() || mCancel) {
mCallback->OnCacheEntryVisitCompleted(); return NS_OK; // done
}
// Grab the next entry.
RefPtr<CacheEntry> entry = std::move(mEntryArray[mNextEntryIdx++]);
// Invokes this->OnEntryInfo, that calls the callback with all // information of the entry.
CacheStorageService::GetCacheEntryInfo(entry, this);
}
} else {
MOZ_CRASH("Bad thread"); return NS_ERROR_FAILURE;
}
NS_DispatchToMainThread(this); return NS_OK;
}
virtual ~WalkMemoryCacheRunnable() { if (mCallback) {
ProxyReleaseMainThread("WalkMemoryCacheRunnable::mCallback", mCallback);
}
}
// WalkDiskCacheRunnable // Using the cache index information to get the list of files per context. class WalkDiskCacheRunnable : public WalkCacheRunnable { public:
WalkDiskCacheRunnable(nsILoadContextInfo* aLoadInfo, bool aVisitEntries,
nsICacheStorageVisitor* aVisitor)
: WalkCacheRunnable(aVisitor, aVisitEntries),
mLoadInfo(aLoadInfo),
mPass(COLLECT_STATS),
mCount(0) {}
nsresult Walk() { // TODO, bug 998693 // Initial index build should be forced here so that about:cache soon // after startup gives some meaningfull results.
// Dispatch to the INDEX level in hope that very recent cache entries // information gets to the index list before we grab the index iterator // for the first time. This tries to avoid miss of entries that has // been created right before the visit is required.
RefPtr<CacheIOThread> thread = CacheFileIOManager::IOThread();
NS_ENSURE_TRUE(thread, NS_ERROR_NOT_INITIALIZED);
private: // Invokes OnCacheEntryInfo callback for each single found entry. // There is one instance of this class per one entry. class OnCacheEntryInfoRunnable : public Runnable { public: explicit OnCacheEntryInfoRunnable(WalkDiskCacheRunnable* aWalker)
: Runnable("net::WalkDiskCacheRunnable::OnCacheEntryInfoRunnable"),
mWalker(aWalker) {}
NS_IMETHOD Run() override { // The main loop
nsresult rv;
if (CacheStorageService::IsOnManagementThread()) { switch (mPass) { case COLLECT_STATS: // Get quickly the cache stats.
uint32_t size;
rv = CacheIndex::GetCacheStats(mLoadInfo, &size, &mCount); if (NS_FAILED(rv)) { if (LoadVisitEntries()) { // both onStorageInfo and onCompleted are expected
NS_DispatchToMainThread(this);
} return NS_DispatchToMainThread(this);
}
mSize = static_cast<uint64_t>(size) << 10;
// Invoke onCacheStorageInfo with valid information.
NS_DispatchToMainThread(this);
if (!LoadVisitEntries()) { return NS_OK; // done
}
mPass = ITERATE_METADATA;
[[fallthrough]];
case ITERATE_METADATA: // Now grab the context iterator. if (!mIter) {
rv =
CacheIndex::GetIterator(mLoadInfo, true, getter_AddRefs(mIter)); if (NS_FAILED(rv)) { // Invoke onCacheEntryVisitCompleted now return NS_DispatchToMainThread(this);
}
}
while (!mCancel && !CacheObserver::ShuttingDown()) { if (CacheIOThread::YieldAndRerun()) return NS_OK;
// This synchronously invokes OnEntryInfo on this class where we // redispatch to the main thread for the consumer callback.
CacheFileIOManager::GetEntryInfo(&hash, this);
}
// Invoke onCacheEntryVisitCompleted on the main thread
NS_DispatchToMainThread(this);
}
} elseif (NS_IsMainThread()) { if (LoadNotifyStorage()) {
nsCOMPtr<nsIFile> dir;
CacheFileIOManager::GetCacheDirectory(getter_AddRefs(dir));
uint64_t capacity = CacheObserver::DiskCacheCapacity();
capacity <<= 10; // kilobytes to bytes
mCallback->OnCacheStorageInfo(mCount, mSize, capacity, dir);
StoreNotifyStorage(false);
} else {
mCallback->OnCacheEntryVisitCompleted();
}
} else {
MOZ_CRASH("Bad thread"); return NS_ERROR_FAILURE;
}
// Invoke onCacheEntryInfo on the main thread for this entry.
RefPtr<OnCacheEntryInfoRunnable> info = new OnCacheEntryInfoRunnable(this);
info->mURISpec = aURISpec;
info->mIdEnhance = aIdEnhance;
info->mDataSize = aDataSize;
info->mAltDataSize = aAltDataSize;
info->mFetchCount = aFetchCount;
info->mLastModifiedTime = aLastModifiedTime;
info->mExpirationTime = aExpirationTime;
info->mPinned = aPinned;
info->mInfo = aInfo;
NS_DispatchToMainThread(info);
}
RefPtr<nsILoadContextInfo> mLoadInfo; enum { // First, we collect stats for the load context.
COLLECT_STATS,
// Second, if demanded, we iterate over the entries gethered // from the iterator and call CacheFileIOManager::GetEntryInfo // for each found entry.
ITERATE_METADATA,
} mPass;
// Tell the index to block notification to AsyncGetDiskConsumption. // Will be allowed again from CacheFileContextEvictor::EvictEntries() // when all the context have been removed from disk.
CacheIndex::OnAsyncEviction(true);
// Passing null as a load info means to evict all contexts. // EvictByContext() respects the entry pinning. EvictAll() does not.
rv = CacheFileIOManager::EvictByContext(nullptr, false, u""_ns);
NS_ENSURE_SUCCESS(rv, rv);
nsTArray<nsCString> keys; for (constauto& globalEntry : *sGlobalEntryTables) { // Match by partitionKey base domain. This should cover most cache entries // because we statically partition the cache. Most first party cache // entries will also have a partitionKey set where the partitionKey base // domain will match the entry URI base domain. const nsACString& key = globalEntry.GetKey();
nsCOMPtr<nsILoadContextInfo> info =
CacheFileUtils::ParseKey(globalEntry.GetKey());
if (info &&
StoragePrincipalHelper::PartitionKeyHasBaseDomain(
info->OriginAttributesPtr()->mPartitionKey, aBaseDomain)) {
keys.AppendElement(key); continue;
}
// If we didn't get a partitionKey match, try to match by entry URI. This // requires us to iterate over all entries.
CacheEntryTable* table = globalEntry.GetWeak();
MOZ_ASSERT(table);
nsTArray<RefPtr<CacheEntry>> entriesToDelete;
for (CacheEntry* entry : table->Values()) {
nsCOMPtr<nsIURI> uri;
nsresult rv = NS_NewURI(getter_AddRefs(uri), entry->GetURI()); if (NS_WARN_IF(NS_FAILED(rv))) { continue;
}
nsAutoCString host;
rv = uri->GetHost(host); // Some entries may not have valid hosts. We can skip them. if (NS_FAILED(rv) || host.IsEmpty()) { continue;
}
bool hasRootDomain = false;
rv = HasRootDomain(host, cBaseDomain, &hasRootDomain); if (NS_WARN_IF(NS_FAILED(rv))) { continue;
} if (hasRootDomain) {
entriesToDelete.AppendElement(entry);
}
}
switch (aWhat) { case PURGE_DISK_DATA_ONLY:
what = CacheEntry::PURGE_DATA_ONLY_DISK_BACKED; break;
case PURGE_DISK_ALL:
what = CacheEntry::PURGE_WHOLE_ONLY_DISK_BACKED; break;
case PURGE_EVERYTHING:
what = CacheEntry::PURGE_WHOLE; break;
default: return NS_ERROR_INVALID_ARG;
}
nsCOMPtr<nsIRunnable> event = new PurgeFromMemoryRunnable(this, what);
return Dispatch(event);
}
NS_IMETHODIMP CacheStorageService::PurgeFromMemoryRunnable::Run() { if (NS_IsMainThread()) {
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService(); if (observerService) {
observerService->NotifyObservers(
nullptr, "cacheservice:purge-memory-pools", nullptr);
}
return NS_OK;
}
if (mService) { // Note that we seem to come here only in the case of "memory-pressure" // being notified (or in case of tests), so we start from purging in-memory // entries first and ignore minprogress for disk entries. // TODO not all flags apply to both pools.
mService->Pool(MemoryPool::EType::MEMORY)
.PurgeAll(mWhat, StaticPrefs::network_cache_purge_minprogress_memory());
mService->Pool(MemoryPool::EType::DISK).PurgeAll(mWhat, 0);
mService = nullptr;
}
// Walking the disk cache also walks the memory cache.
RefPtr<WalkDiskCacheRunnable> event = new WalkDiskCacheRunnable(nullptr, aVisitEntries, aVisitor); return event->Walk();
}
// Methods used by CacheEntry for management of in-memory structures.
void CacheStorageService::RecordMemoryOnlyEntry(CacheEntry* aEntry, bool aOnlyInMemory, bool aOverwrite) {
LOG(
("CacheStorageService::RecordMemoryOnlyEntry [entry=%p, memory=%d, " "overwrite=%d]",
aEntry, aOnlyInMemory, aOverwrite)); // This method is responsible to put this entry to a special record hashtable // that contains only entries that are stored in memory. // Keep in mind that every entry, regardless of whether is in-memory-only or // not is always recorded in the storage master hash table, the one identified // by CacheEntry.StorageID().
mLock.AssertCurrentThreadOwns();
if (mShutdown) {
LOG((" after shutdown")); return;
}
// Checks if a cache entry is forced valid (will be loaded directly from cache // without further validation) - see nsICacheEntry.idl for further details bool CacheStorageService::IsForcedValidEntry(nsACString const& aContextKey,
nsACString const& aEntryKey) { return IsForcedValidEntry(aContextKey + aEntryKey);
}
// Allows a cache entry to be loaded directly from cache without further // validation - see nsICacheEntry.idl for further details void CacheStorageService::ForceEntryValidFor(nsACString const& aContextKey,
nsACString const& aEntryKey,
uint32_t aSecondsToTheFuture) {
mozilla::MutexAutoLock lock(mForcedValidEntriesLock);
TimeStamp now = TimeStamp::NowLoRes();
ForcedValidEntriesPrune(now);
ForcedValidData data;
data.validUntil = now + TimeDuration::FromSeconds(aSecondsToTheFuture);
data.viewed = false;
// Cleans out the old entries in mForcedValidEntries void CacheStorageService::ForcedValidEntriesPrune(TimeStamp& now) { static TimeDuration const oneMinute = TimeDuration::FromSeconds(60); static TimeStamp dontPruneUntil = now + oneMinute; if (now < dontPruneUntil) return;
for (auto iter = mForcedValidEntries.Iter(); !iter.Done(); iter.Next()) { if (iter.Data().validUntil < now) { if (!iter.Data().viewed) {
glean::predictor::prefetch_use_status
.EnumGet(glean::predictor::PrefetchUseStatusLabel::eWaitedtoolong)
.Add();
}
iter.Remove();
}
}
dontPruneUntil = now + oneMinute;
}
// It's likely the timer has already been set when we get here, // check outside the lock to save resources. #ifdef MOZ_TSAN if (mPurgeTimerActive) { #else if (mPurgeTimer) { #endif return;
}
// We don't know if this is called under the service lock or not, // hence rather dispatch.
RefPtr<nsIEventTarget> cacheIOTarget = Thread(); if (!cacheIOTarget) return;
// Dispatch as a priority task, we want to set the purge timer // ASAP to prevent vain redispatch of this event.
nsCOMPtr<nsIRunnable> event = NewRunnableMethod( "net::CacheStorageService::SchedulePurgeOverMemoryLimit", this,
&CacheStorageService::SchedulePurgeOverMemoryLimit);
cacheIOTarget->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
}
static TimeDuration const kFourSeconds = TimeDuration::FromSeconds(4);
TimeStamp now = TimeStamp::NowLoRes();
if (!mLastPurgeTime.IsNull() && now - mLastPurgeTime < kFourSeconds) {
LOG((" bypassed, too soon")); return;
}
mLastPurgeTime = now;
// We start purging memory entries first as we care more about RAM over // disk space beeing freed in case we are interrupted.
Pool(MemoryPool::EType::MEMORY).PurgeExpiredOrOverMemoryLimit();
Pool(MemoryPool::EType::DISK).PurgeExpiredOrOverMemoryLimit();
}
// We always purge expired entries, even if under our limit.
size_t numExpired = PurgeExpired(minprogress); if (numExpired > 0) {
LOG((" found and purged %zu expired entries", numExpired));
}
minprogress = (minprogress > numExpired) ? minprogress - numExpired : 0;
// If we are still under pressure, purge LFU entries until we aren't. if (mMemorySize > memoryLimit) { // Do not enter PurgeByFrecency if we reached the minimum and are asked to // deliver entries. if (minprogress == 0 && CacheIOThread::YieldAndRerun()) { return;
}
auto r = PurgeByFrecency(minprogress); if (MOZ_LIKELY(r.isOk())) {
size_t numPurged = r.unwrap();
LOG(( " memory data consumption over the limit, abandoned %zu LFU entries",
numPurged));
} else { // If we hit an error (OOM), do an emergency PurgeAll.
size_t numPurged = PurgeAll(CacheEntry::PURGE_WHOLE, minprogress);
LOG(
(" memory data consumption over the limit, emergency purged all %zu " "entries",
numPurged));
}
}
LOG((" purging took %1.2fms", (TimeStamp::Now() - start).ToMilliseconds()));
}
// This function purges ALL expired entries.
size_t CacheStorageService::MemoryPool::PurgeExpired(size_t minprogress) {
MOZ_ASSERT(IsOnManagementThread());
uint32_t now = NowInSeconds();
size_t numPurged = 0; // Scan for items to purge. mManagedEntries is not sorted but comparing just // one integer should be faster than anything else, so go scan.
RefPtr<CacheEntry> entry = mManagedEntries.getFirst(); while (entry) { // Get the next entry before we may be removed from our list.
RefPtr<CacheEntry> nextEntry = entry->getNext();
if (entry->GetExpirationTime() <= now) { // Purge will modify our mManagedEntries list but we are prepared for it. if (entry->Purge(CacheEntry::PURGE_WHOLE)) {
numPurged++;
LOG((" purged expired, entry=%p, exptime=%u (now=%u)", entry.get(),
entry->GetExpirationTime(), now));
}
}
entry = std::move(nextEntry);
// To have some progress even under load, we do the check only after // purging at least minprogress items if under pressure. if ((numPurged >= minprogress || mMemorySize <= Limit()) &&
CacheIOThread::YieldAndRerun()) { break;
}
}
// Pretend the limit is 10% lower so that we get rid of more entries at one // shot and save the sorting below.
uint32_t const memoryLimit = (uint32_t)(Limit() * 0.9); if (mMemorySize <= memoryLimit) { return 0;
}
for (constauto& entry : mManagedEntries) { // Referenced items cannot be purged and we deliberately want to not look // at '0' frecency entries, these are new entries and can be ignored. if (!entry->IsReferenced() && entry->GetFrecency() > 0.0) {
mayPurgeEntry copy(entry);
mayPurgeSorted.AppendElement(std::move(copy));
}
}
} if (mayPurgeSorted.Length() == 0) { return 0;
}
mayPurgeSorted.Sort();
size_t numPurged = 0;
for (auto& checkPurge : mayPurgeSorted) { if (mMemorySize <= memoryLimit) { break;
}
if (entryExists && !replace) { // check whether we want to turn this entry to a memory-only. if (MOZ_UNLIKELY(!aWriteToDisk) && MOZ_LIKELY(entry->IsUsingDisk())) {
LOG((" entry is persistent but we want mem-only, replacing it"));
replace = true;
}
}
// If truncate is demanded, delete and doom the current entry if (entryExists && replace) {
entries->Remove(entryKey);
LOG((" dooming entry %p for %s because of OPEN_TRUNCATE", entry.get(),
entryKey.get())); // On purpose called under the lock to prevent races of doom and open on // I/O thread No need to remove from both memory-only and all-entries // tables. The new entry will overwrite the shadow entry in its ctor.
entry->DoomAlreadyRemoved();
entry = nullptr;
entryExists = false;
// Would only lead to deleting force-valid timestamp again. We don't need // the replace information anymore after this point anyway.
replace = false;
}
// Ensure entry for the particular URL if (!entryExists) { // When replacing with a new entry, always remove the current force-valid // timestamp, this is the only place to do it. if (replace) {
RemoveEntryForceValid(aContextKey, entryKey);
}
// Entry is not in the hashtable or has just been truncated...
entry = new CacheEntry(aContextKey, aURI, aIdExtension, aWriteToDisk,
aSkipSizeCheck, aPin);
entries->InsertOrUpdate(entryKey, RefPtr{entry});
LOG((" new entry %p for %s", entry.get(), entryKey.get()));
}
if (entry) { // Here, if this entry was not for a long time referenced by any consumer, // gets again first 'handles count' reference.
handle = entry->NewHandle();
}
}
CacheEntryTable* entries; if ((*aResult = sGlobalEntryTables->Get(contextKey, &entries)) &&
entries->GetWeak(entryKey, aResult)) {
LOG((" found in hash tables")); return NS_OK;
}
}
if (!aStorage->WriteToDisk()) { // Memory entry, nothing more to do.
LOG((" not found in hash tables")); return NS_OK;
}
// Disk entry, not found in the hashtable, check the index.
nsAutoCString fileKey;
rv = CacheEntry::HashingKey(contextKey, aIdExtension, aURI, fileKey);
CacheIndex::EntryStatus status;
rv = CacheIndex::HasEntry(fileKey, &status); if (NS_FAILED(rv) || status == CacheIndex::DO_NOT_KNOW) {
LOG((" index doesn't know, rv=0x%08" PRIx32, static_cast<uint32_t>(rv))); return NS_ERROR_NOT_AVAILABLE;
}
*aResult = status == CacheIndex::EXISTS;
LOG((" %sfound in index", *aResult ? "" : "not ")); return NS_OK;
}
CacheEntryTable* entries; if (sGlobalEntryTables->Get(contextKey, &entries)) { if (entries->Get(entryKey, getter_AddRefs(entry))) { if (aStorage->WriteToDisk() || !entry->IsUsingDisk()) { // When evicting from disk storage, purge // When evicting from memory storage and the entry is memory-only, // purge
LOG(
(" purging entry %p for %s [storage use disk=%d, entry use " "disk=%d]",
entry.get(), entryKey.get(), aStorage->WriteToDisk(),
entry->IsUsingDisk()));
entries->Remove(entryKey);
} else { // Otherwise, leave it
LOG(
(" leaving entry %p for %s [storage use disk=%d, entry use " "disk=%d]",
entry.get(), entryKey.get(), aStorage->WriteToDisk(),
entry->IsUsingDisk()));
entry = nullptr;
}
}
}
if (!entry) {
RemoveEntryForceValid(contextKey, entryKey);
}
}
if (entry) {
LOG((" dooming entry %p for %s", entry.get(), entryKey.get())); return entry->AsyncDoom(aCallback);
}
LOG((" no entry loaded for %s", entryKey.get()));
if (aStorage->WriteToDisk()) {
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
if (aDiskStorage) {
LOG((" dooming disk+memory storage of %s", aContextKey.BeginReading()));
// Walk one by one and remove entries according their pin status
CacheEntryTable *diskEntries, *memoryEntries; if (sGlobalEntryTables->Get(aContextKey, &diskEntries)) {
sGlobalEntryTables->Get(memoryStorageID, &memoryEntries);
for (auto iter = diskEntries->Iter(); !iter.Done(); iter.Next()) { auto entry = iter.Data(); if (entry->DeferOrBypassRemovalOnPinStatus(aPinned)) { continue;
}
if (aContext && !aContext->IsPrivate()) {
LOG((" dooming disk entries"));
CacheFileIOManager::EvictByContext(aContext, aPinned, u""_ns);
}
} else {
LOG((" dooming memory-only storage of %s", aContextKey.BeginReading()));
// Remove the memory entries table from the global tables. // Since we store memory entries also in the disk entries table // we need to remove the memory entries from the disk table one // by one manually.
mozilla::UniquePtr<CacheEntryTable> memoryEntries;
sGlobalEntryTables->Remove(memoryStorageID, &memoryEntries);
// An artificial callback. This is a candidate for removal tho. In the new // cache any 'doom' or 'evict' function ensures that the entry or entries // being doomed is/are not accessible after the function returns. So there is // probably no need for a callback - has no meaning. But for compatibility // with the old cache that is still in the tree we keep the API similar to be // able to make tests as well as other consumers work for now. class Callback : public Runnable { public: explicit Callback(nsICacheEntryDoomCallback* aCallback)
: mozilla::Runnable("Callback"), mCallback(aCallback) {}
NS_IMETHOD Run() override {
mCallback->OnCacheEntryDoomed(NS_OK); return NS_OK;
}
nsCOMPtr<nsICacheEntryDoomCallback> mCallback;
};
if (aCallback) {
RefPtr<Runnable> callback = new Callback(aCallback); return NS_DispatchToMainThread(callback);
}
if (sGlobalEntryTables->Get(contextKey, &entries) &&
entries->Get(entryKey, getter_AddRefs(entry))) { if (entry->IsFileDoomed()) { // Need to remove under the lock to avoid possible race leading // to duplication of the entry per its key.
RemoveExactEntry(entries, entryKey, entry, false);
entry->DoomAlreadyRemoved();
}
// Entry found, but it's not the entry that has been found doomed // by the lower eviction layer. Just leave everything unchanged. return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.