// SPDX-License-Identifier: GPL-2.0 /* * Simple file system for zoned block devices exposing zones as files. * * Copyright (C) 2019 Western Digital Corporation or its affiliates.
*/ #include <linux/module.h> #include <linux/pagemap.h> #include <linux/magic.h> #include <linux/iomap.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/statfs.h> #include <linux/writeback.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/mman.h> #include <linux/sched/mm.h> #include <linux/crc32.h> #include <linux/task_io_accounting_ops.h> #include <linux/fs_parser.h> #include <linux/fs_context.h>
#include"zonefs.h"
#define CREATE_TRACE_POINTS #include"trace.h"
/* * Get the name of a zone group directory.
*/ staticconstchar *zonefs_zgroup_name(enum zonefs_ztype ztype)
{ switch (ztype) { case ZONEFS_ZTYPE_CNV: return"cnv"; case ZONEFS_ZTYPE_SEQ: return"seq"; default:
WARN_ON_ONCE(1); return"???";
}
}
/* * Manage the active zone count.
*/ staticvoid zonefs_account_active(struct super_block *sb, struct zonefs_zone *z)
{ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
if (zonefs_zone_is_cnv(z)) return;
/* * For zones that transitioned to the offline or readonly condition, * we only need to clear the active state.
*/ if (z->z_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY)) goto out;
/* * If the zone is active, that is, if it is explicitly open or * partially written, check if it was already accounted as active.
*/ if ((z->z_flags & ZONEFS_ZONE_OPEN) ||
(z->z_wpoffset > 0 && z->z_wpoffset < z->z_capacity)) { if (!(z->z_flags & ZONEFS_ZONE_ACTIVE)) {
z->z_flags |= ZONEFS_ZONE_ACTIVE;
atomic_inc(&sbi->s_active_seq_files);
} return;
}
out: /* The zone is not active. If it was, update the active count */ if (z->z_flags & ZONEFS_ZONE_ACTIVE) {
z->z_flags &= ~ZONEFS_ZONE_ACTIVE;
atomic_dec(&sbi->s_active_seq_files);
}
}
/* * Manage the active zone count. Called with zi->i_truncate_mutex held.
*/ void zonefs_inode_account_active(struct inode *inode)
{
lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
/* * Execute a zone management operation.
*/ staticint zonefs_zone_mgmt(struct super_block *sb, struct zonefs_zone *z, enum req_op op)
{ int ret;
/* * With ZNS drives, closing an explicitly open zone that has not been * written will change the zone state to "closed", that is, the zone * will remain active. Since this can then cause failure of explicit * open operation on other zones if the drive active zone resources * are exceeded, make sure that the zone does not remain active by * resetting it.
*/ if (op == REQ_OP_ZONE_CLOSE && !z->z_wpoffset)
op = REQ_OP_ZONE_RESET;
trace_zonefs_zone_mgmt(sb, z, op);
ret = blkdev_zone_mgmt(sb->s_bdev, op, z->z_sector,
z->z_size >> SECTOR_SHIFT); if (ret) {
zonefs_err(sb, "Zone management operation %s at %llu failed %d\n",
blk_op_str(op), z->z_sector, ret); return ret;
}
return 0;
}
int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op)
{
lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
/* * A full zone is no longer open/active and does not need * explicit closing.
*/ if (isize >= z->z_capacity) { struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
/* * This may be called for an update after an IO error. * So beware of the values seen.
*/ if (new_isize < old_isize) {
nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits; if (sbi->s_used_blocks > nr_blocks)
sbi->s_used_blocks -= nr_blocks; else
sbi->s_used_blocks = 0;
} else {
sbi->s_used_blocks +=
(new_isize - old_isize) >> sb->s_blocksize_bits; if (sbi->s_used_blocks > sbi->s_blocks)
sbi->s_used_blocks = sbi->s_blocks;
}
spin_unlock(&sbi->s_lock);
}
/* * Check a zone condition. Return the amount of written (and still readable) * data in the zone.
*/ static loff_t zonefs_check_zone_condition(struct super_block *sb, struct zonefs_zone *z, struct blk_zone *zone)
{ switch (zone->cond) { case BLK_ZONE_COND_OFFLINE:
zonefs_warn(sb, "Zone %llu: offline zone\n",
z->z_sector);
z->z_flags |= ZONEFS_ZONE_OFFLINE; return 0; case BLK_ZONE_COND_READONLY: /* * The write pointer of read-only zones is invalid, so we cannot * determine the zone wpoffset (inode size). We thus keep the * zone wpoffset as is, which leads to an empty file * (wpoffset == 0) on mount. For a runtime error, this keeps * the inode size as it was when last updated so that the user * can recover data.
*/
zonefs_warn(sb, "Zone %llu: read-only zone\n",
z->z_sector);
z->z_flags |= ZONEFS_ZONE_READONLY; if (zonefs_zone_is_cnv(z)) return z->z_capacity; return z->z_wpoffset; case BLK_ZONE_COND_FULL: /* The write pointer of full zones is invalid. */ return z->z_capacity; default: if (zonefs_zone_is_cnv(z)) return z->z_capacity; return (zone->wp - zone->start) << SECTOR_SHIFT;
}
}
/* * Check a zone condition and adjust its inode access permissions for * offline and readonly zones.
*/ staticvoid zonefs_inode_update_mode(struct inode *inode)
{ struct zonefs_zone *z = zonefs_inode_zone(inode);
if (z->z_flags & ZONEFS_ZONE_OFFLINE) { /* Offline zones cannot be read nor written */
inode->i_flags |= S_IMMUTABLE;
inode->i_mode &= ~0777;
} elseif (z->z_flags & ZONEFS_ZONE_READONLY) { /* Readonly zones cannot be written */
inode->i_flags |= S_IMMUTABLE; if (z->z_flags & ZONEFS_ZONE_INIT_MODE)
inode->i_mode &= ~0777; else
inode->i_mode &= ~0222;
}
/* * Check the zone condition: if the zone is not "bad" (offline or * read-only), read errors are simply signaled to the IO issuer as long * as there is no inconsistency between the inode size and the amount of * data writen in the zone (data_size).
*/
data_size = zonefs_check_zone_condition(sb, z, zone);
isize = i_size_read(inode); if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
!write && isize == data_size) return;
/* * At this point, we detected either a bad zone or an inconsistency * between the inode size and the amount of data written in the zone. * For the latter case, the cause may be a write IO error or an external * action on the device. Two error patterns exist: * 1) The inode size is lower than the amount of data in the zone: * a write operation partially failed and data was writen at the end * of the file. This can happen in the case of a large direct IO * needing several BIOs and/or write requests to be processed. * 2) The inode size is larger than the amount of data in the zone: * this can happen with a deferred write error with the use of the * device side write cache after getting successful write IO * completions. Other possibilities are (a) an external corruption, * e.g. an application reset the zone directly, or (b) the device * has a serious problem (e.g. firmware bug). * * In all cases, warn about inode size inconsistency and handle the * IO error according to the zone condition and to the mount options.
*/ if (isize != data_size)
zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
inode->i_ino, isize, data_size);
/* * First handle bad zones signaled by hardware. The mount options * errors=zone-ro and errors=zone-offline result in changing the * zone condition to read-only and offline respectively, as if the * condition was signaled by the hardware.
*/ if ((z->z_flags & ZONEFS_ZONE_OFFLINE) ||
(sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)) {
zonefs_warn(sb, "inode %lu: read/write access disabled\n",
inode->i_ino); if (!(z->z_flags & ZONEFS_ZONE_OFFLINE))
z->z_flags |= ZONEFS_ZONE_OFFLINE;
zonefs_inode_update_mode(inode);
data_size = 0;
} elseif ((z->z_flags & ZONEFS_ZONE_READONLY) ||
(sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)) {
zonefs_warn(sb, "inode %lu: write access disabled\n",
inode->i_ino); if (!(z->z_flags & ZONEFS_ZONE_READONLY))
z->z_flags |= ZONEFS_ZONE_READONLY;
zonefs_inode_update_mode(inode);
data_size = isize;
} elseif (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
data_size > isize) { /* Do not expose garbage data */
data_size = isize;
}
/* * If the filesystem is mounted with the explicit-open mount option, we * need to clear the ZONEFS_ZONE_OPEN flag if the zone transitioned to * the read-only or offline condition, to avoid attempting an explicit * close of the zone when the inode file is closed.
*/ if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)))
z->z_flags &= ~ZONEFS_ZONE_OPEN;
/* * If error=remount-ro was specified, any error result in remounting * the volume as read-only.
*/ if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) {
zonefs_warn(sb, "remounting filesystem read-only\n");
sb->s_flags |= SB_RDONLY;
}
/* * Update block usage stats and the inode size to prevent access to * invalid data.
*/
zonefs_update_stats(inode, data_size);
zonefs_i_size_write(inode, data_size);
z->z_wpoffset = data_size;
zonefs_inode_account_active(inode);
}
/* * When an file IO error occurs, check the file zone to see if there is a change * in the zone condition (e.g. offline or read-only). For a failed write to a * sequential zone, the zone write pointer position must also be checked to * eventually correct the file size and zonefs inode write pointer offset * (which can be out of sync with the drive due to partial write failures).
*/ void __zonefs_io_error(struct inode *inode, bool write)
{ struct zonefs_zone *z = zonefs_inode_zone(inode); struct super_block *sb = inode->i_sb; unsignedint noio_flag; struct blk_zone zone; int ret;
/* * Conventional zone have no write pointer and cannot become read-only * or offline. So simply fake a report for a single or aggregated zone * and let zonefs_handle_io_error() correct the zone inode information * according to the mount options.
*/ if (!zonefs_zone_is_seq(z)) {
zone.start = z->z_sector;
zone.len = z->z_size >> SECTOR_SHIFT;
zone.wp = zone.start + zone.len;
zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
zone.cond = BLK_ZONE_COND_NOT_WP;
zone.capacity = zone.len; goto handle_io_error;
}
/* * Memory allocations in blkdev_report_zones() can trigger a memory * reclaim which may in turn cause a recursion into zonefs as well as * struct request allocations for the same device. The former case may * end up in a deadlock on the inode truncate mutex, while the latter * may prevent IO forward progress. Executing the report zones under * the GFP_NOIO context avoids both problems.
*/
noio_flag = memalloc_noio_save();
ret = blkdev_report_zones(sb->s_bdev, z->z_sector, 1,
zonefs_io_error_cb, &zone);
memalloc_noio_restore(noio_flag);
if (ret != 1) {
zonefs_err(sb, "Get inode %lu zone information failed %d\n",
inode->i_ino, ret);
zonefs_warn(sb, "remounting filesystem read-only\n");
sb->s_flags |= SB_RDONLY; return;
}
ret = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (ret) return ret;
/* * Since files and directories cannot be created nor deleted, do not * allow setting any write attributes on the sub-directories grouping * files by zone type.
*/ if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
(iattr->ia_mode & 0222)) return -EPERM;
if (((iattr->ia_valid & ATTR_UID) &&
!uid_eq(iattr->ia_uid, inode->i_uid)) ||
((iattr->ia_valid & ATTR_GID) &&
!gid_eq(iattr->ia_gid, inode->i_gid))) {
ret = dquot_transfer(&nop_mnt_idmap, inode, iattr); if (ret) return ret;
}
if (iattr->ia_valid & ATTR_SIZE) {
ret = zonefs_file_truncate(inode, iattr->ia_size); if (ret) return ret;
}
setattr_copy(&nop_mnt_idmap, inode, iattr);
if (S_ISREG(inode->i_mode)) { struct zonefs_zone *z = zonefs_inode_zone(inode);
/* * We only need to check for the "seq" directory and * the "cnv" directory if we have conventional zones.
*/ if (dentry->d_name.len != 3) return ERR_PTR(-ENOENT);
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) { if (sbi->s_zgroup[ztype].g_nr_zones &&
memcmp(name, zonefs_zgroup_name(ztype), 3) == 0) break;
} if (ztype == ZONEFS_ZTYPE_MAX) return ERR_PTR(-ENOENT);
/* * The size of zone group directories is equal to the number * of zone files in the group and does note include the "." and * ".." entries. Hence the "+ 2" here.
*/ if (ctx->pos >= inode->i_size + 2) return 0;
if (!dir_emit_dots(file, ctx)) return 0;
fname = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL); if (!fname) return -ENOMEM;
for (f = ctx->pos - 2; f < zgroup->g_nr_zones; f++) {
z = &zgroup->g_zones[f];
ino = z->z_sector >> sbi->s_zone_sectors_shift;
fname_len = snprintf(fname, ZONEFS_NAME_MAX - 1, "%u", f); if (!dir_emit(ctx, fname, fname_len, ino, DT_REG)) break;
ctx->pos++;
}
/* * We do not care about the first zone: it contains the super block * and not exposed as a file.
*/ if (!idx) return 0;
/* * Count the number of zones that will be exposed as files. * For sequential zones, we always have as many files as zones. * FOr conventional zones, the number of files depends on if we have * conventional zones aggregation enabled.
*/ switch (zone->type) { case BLK_ZONE_TYPE_CONVENTIONAL: if (sbi->s_features & ZONEFS_F_AGGRCNV) { /* One file per set of contiguous conventional zones */ if (!(sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones) ||
zone->start != zd->cnv_zone_start)
sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
zd->cnv_zone_start = zone->start + zone->len;
} else { /* One file per zone */
sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
} break; case BLK_ZONE_TYPE_SEQWRITE_REQ: case BLK_ZONE_TYPE_SEQWRITE_PREF:
sbi->s_zgroup[ZONEFS_ZTYPE_SEQ].g_nr_zones++; break; default:
zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
zone->type); return -EIO;
}
zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone),
GFP_KERNEL); if (!zd->zones) return -ENOMEM;
/* Get zones information from the device */
ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
zonefs_get_zone_info_cb, zd); if (ret < 0) {
zonefs_err(zd->sb, "Zone report failed %d\n", ret); return ret;
}
if (ret != bdev_nr_zones(bdev)) {
zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
ret, bdev_nr_zones(bdev)); return -EIO;
}
/* * Create a zone group and populate it with zone files.
*/ staticint zonefs_init_zgroup(struct super_block *sb, struct zonefs_zone_data *zd, enum zonefs_ztype ztype)
{ struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype]; struct blk_zone *zone, *next, *end; struct zonefs_zone *z; unsignedint n = 0; int ret;
/* Allocate the zone group. If it is empty, we have nothing to do. */ if (!zgroup->g_nr_zones) return 0;
zgroup->g_zones = kvcalloc(zgroup->g_nr_zones, sizeof(struct zonefs_zone), GFP_KERNEL); if (!zgroup->g_zones) return -ENOMEM;
/* * Initialize the zone groups using the device zone information. * We always skip the first zone as it contains the super block * and is not use to back a file.
*/
end = zd->zones + bdev_nr_zones(sb->s_bdev); for (zone = &zd->zones[1]; zone < end; zone = next) {
next = zone + 1; if (zonefs_zone_type(zone) != ztype) continue;
if (WARN_ON_ONCE(n >= zgroup->g_nr_zones)) return -EINVAL;
/* * For conventional zones, contiguous zones can be aggregated * together to form larger files. Note that this overwrites the * length of the first zone of the set of contiguous zones * aggregated together. If one offline or read-only zone is * found, assume that all zones aggregated have the same * condition.
*/ if (ztype == ZONEFS_ZTYPE_CNV &&
(sbi->s_features & ZONEFS_F_AGGRCNV)) { for (; next < end; next++) { if (zonefs_zone_type(next) != ztype) break;
zone->len += next->len;
zone->capacity += next->capacity; if (next->cond == BLK_ZONE_COND_READONLY &&
zone->cond != BLK_ZONE_COND_OFFLINE)
zone->cond = BLK_ZONE_COND_READONLY; elseif (next->cond == BLK_ZONE_COND_OFFLINE)
zone->cond = BLK_ZONE_COND_OFFLINE;
}
}
z = &zgroup->g_zones[n]; if (ztype == ZONEFS_ZTYPE_CNV)
z->z_flags |= ZONEFS_ZONE_CNV;
z->z_sector = zone->start;
z->z_size = zone->len << SECTOR_SHIFT; if (z->z_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
!(sbi->s_features & ZONEFS_F_AGGRCNV)) {
zonefs_err(sb, "Invalid zone size %llu (device zone sectors %llu)\n",
z->z_size,
bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT); return -EINVAL;
}
/* * Let zonefs_inode_update_mode() know that we will need * special initialization of the inode mode the first time * it is accessed.
*/
z->z_flags |= ZONEFS_ZONE_INIT_MODE;
/* * For sequential zones, make sure that any open zone is closed * first to ensure that the initial number of open zones is 0, * in sync with the open zone accounting done when the mount * option ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
*/ if (ztype == ZONEFS_ZTYPE_SEQ &&
(zone->cond == BLK_ZONE_COND_IMP_OPEN ||
zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
ret = zonefs_zone_mgmt(sb, z, REQ_OP_ZONE_CLOSE); if (ret) return ret;
}
zonefs_account_active(sb, z);
n++;
}
if (WARN_ON_ONCE(n != zgroup->g_nr_zones)) return -EINVAL;
zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
zonefs_zgroup_name(ztype),
zgroup->g_nr_zones,
str_plural(zgroup->g_nr_zones));
/* * Create a zone group and populate it with zone files.
*/ staticint zonefs_init_zgroups(struct super_block *sb)
{ struct zonefs_zone_data zd; enum zonefs_ztype ztype; int ret;
/* First get the device zone information */
memset(&zd, 0, sizeof(struct zonefs_zone_data));
zd.sb = sb;
ret = zonefs_get_zone_info(&zd); if (ret) goto cleanup;
/* Allocate and initialize the zone groups */ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
ret = zonefs_init_zgroup(sb, &zd, ztype); if (ret) {
zonefs_info(sb, "Zone group \"%s\" initialization failed\n",
zonefs_zgroup_name(ztype)); break;
}
}
cleanup:
zonefs_free_zone_info(&zd); if (ret)
zonefs_free_zgroups(sb);
return ret;
}
/* * Read super block information from the device.
*/ staticint zonefs_read_super(struct super_block *sb)
{ struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_super *super;
u32 crc, stored_crc; int ret;
super = kmalloc(ZONEFS_SUPER_SIZE, GFP_KERNEL); if (!super) return -ENOMEM;
ret = bdev_rw_virt(sb->s_bdev, 0, super, ZONEFS_SUPER_SIZE,
REQ_OP_READ); if (ret) goto free_super;
ret = -EINVAL; if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC) goto free_super;
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) { if (sbi->s_zgroup[ztype].g_inode) {
iput(sbi->s_zgroup[ztype].g_inode);
sbi->s_zgroup[ztype].g_inode = NULL;
}
}
}
/* * Check that the device is zoned. If it is, get the list of zones and create * sub-directories and files according to the device zone configuration and * format options.
*/ staticint zonefs_fill_super(struct super_block *sb, struct fs_context *fc)
{ struct zonefs_sb_info *sbi; struct zonefs_context *ctx = fc->fs_private; struct inode *inode; enum zonefs_ztype ztype; int ret;
if (!bdev_is_zoned(sb->s_bdev)) {
zonefs_err(sb, "Not a zoned block device\n"); return -EINVAL;
}
/* * Initialize super block information: the maximum file size is updated * when the zone files are created so that the format option * ZONEFS_F_AGGRCNV which increases the maximum file size of a file * beyond the zone size is taken into account.
*/
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM;
/* * The block size is set to the device zone write granularity to ensure * that write operations are always aligned according to the device * interface constraints.
*/
sb_set_blocksize(sb, bdev_zone_write_granularity(sb->s_bdev));
sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
sbi->s_uid = GLOBAL_ROOT_UID;
sbi->s_gid = GLOBAL_ROOT_GID;
sbi->s_perm = 0640;
sbi->s_mount_opts = ctx->s_mount_opts;
if (!sbi->s_max_wro_seq_files &&
!sbi->s_max_active_seq_files &&
sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
zonefs_info(sb, "No open and active zone limits. Ignoring explicit_open mount option\n");
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
}
/* Initialize the zone groups */
ret = zonefs_init_zgroups(sb); if (ret) goto cleanup;
/* Create the root directory inode */
ret = -ENOMEM;
inode = new_inode(sb); if (!inode) goto cleanup;
sb->s_root = d_make_root(inode); if (!sb->s_root) goto cleanup;
/* * Take a reference on the zone groups directory inodes * to keep them in the inode cache.
*/
ret = zonefs_get_zgroup_inodes(sb); if (ret) goto cleanup;
ret = zonefs_sysfs_register(sb); if (ret) goto cleanup;
staticvoid zonefs_destroy_inodecache(void)
{ /* * Make sure all delayed rcu free inodes are flushed before we * destroy the inode cache.
*/
rcu_barrier();
kmem_cache_destroy(zonefs_inode_cachep);
}
MODULE_AUTHOR("Damien Le Moal");
MODULE_DESCRIPTION("Zone file system for zoned block devices");
MODULE_LICENSE("GPL");
MODULE_ALIAS_FS("zonefs");
module_init(zonefs_init);
module_exit(zonefs_exit);
Messung V0.5
¤ Dauer der Verarbeitung: 0.18 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.