len = dprintf(fd, "%d", score); if (len < 0) {
close(fd); return len;
}
close(fd); return 0;
}
/* * This test creates two nested cgroups with and without enabling * the memory controller.
*/ staticint test_memcg_subtree_control(constchar *root)
{ char *parent, *child, *parent2 = NULL, *child2 = NULL; int ret = KSFT_FAIL; char buf[PAGE_SIZE];
/* Create two nested cgroups with the memory controller enabled */
parent = cg_name(root, "memcg_test_0");
child = cg_name(root, "memcg_test_0/memcg_test_1"); if (!parent || !child) goto cleanup_free;
if (cg_create(parent)) goto cleanup_free;
if (cg_write(parent, "cgroup.subtree_control", "+memory")) goto cleanup_parent;
if (cg_create(child)) goto cleanup_parent;
if (cg_read_strstr(child, "cgroup.controllers", "memory")) goto cleanup_child;
/* Create two nested cgroups without enabling memory controller */
parent2 = cg_name(root, "memcg_test_1");
child2 = cg_name(root, "memcg_test_1/memcg_test_1"); if (!parent2 || !child2) goto cleanup_free2;
if (cg_create(parent2)) goto cleanup_free2;
if (cg_create(child2)) goto cleanup_parent2;
if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf))) goto cleanup_all;
if (!cg_read_strstr(child2, "cgroup.controllers", "memory")) goto cleanup_all;
if (!values_close(file, current, 10)) goto cleanup;
ret = 0;
cleanup:
close(fd); return ret;
}
/* * This test create a memory cgroup, allocates * some anonymous memory and some pagecache * and checks memory.current, memory.peak, and some memory.stat values.
*/ staticint test_memcg_current_peak(constchar *root)
{ int ret = KSFT_FAIL; long current, peak, peak_reset; char *memcg; bool fd2_closed = false, fd3_closed = false, fd4_closed = false; int peak_fd = -1, peak_fd2 = -1, peak_fd3 = -1, peak_fd4 = -1; struct stat ss;
memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup;
if (cg_create(memcg)) goto cleanup;
current = cg_read_long(memcg, "memory.current"); if (current != 0) goto cleanup;
peak = cg_read_long(memcg, "memory.peak"); if (peak != 0) goto cleanup;
if (cg_run(memcg, alloc_anon_50M_check, NULL)) goto cleanup;
peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(50)) goto cleanup;
/* * We'll open a few FDs for the same memory.peak file to exercise the free-path * We need at least three to be closed in a different order than writes occurred to test * the linked-list handling.
*/
peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
if (peak_fd == -1) { if (errno == ENOENT)
ret = KSFT_SKIP; goto cleanup;
}
/* * Before we try to use memory.peak's fd, try to figure out whether * this kernel supports writing to that file in the first place. (by * checking the writable bit on the file's st_mode)
*/ if (fstat(peak_fd, &ss)) goto cleanup;
if ((ss.st_mode & S_IWUSR) == 0) {
ret = KSFT_SKIP; goto cleanup;
}
if (cg_run(memcg, alloc_pagecache_50M_check, NULL)) goto cleanup;
peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(50)) goto cleanup;
/* Make sure everything is back to normal */
peak = cg_read_long_fd(peak_fd); if (peak < MB(50)) goto cleanup;
peak = cg_read_long_fd(peak_fd4); if (peak < MB(50)) goto cleanup;
fd3_closed = true; if (close(peak_fd3)) goto cleanup;
fd4_closed = true; if (close(peak_fd4)) goto cleanup;
ret = KSFT_PASS;
cleanup:
close(peak_fd); if (!fd2_closed)
close(peak_fd2); if (!fd3_closed)
close(peak_fd3); if (!fd4_closed)
close(peak_fd4);
cg_destroy(memcg);
free(memcg);
return ret;
}
staticint alloc_pagecache_50M_noexit(constchar *cgroup, void *arg)
{ int fd = (long)arg; int ppid = getppid();
/* * Wait until processes are killed asynchronously by the OOM killer * If we exceed a timeout, fail.
*/ staticint cg_test_proc_killed(constchar *cgroup)
{ int limit;
for (limit = 10; limit > 0; limit--) { if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0) return 0;
usleep(100000);
} return -1;
}
staticbool reclaim_until(constchar *memcg, long goal);
/* * First, this test creates the following hierarchy: * A memory.min = 0, memory.max = 200M * A/B memory.min = 50M * A/B/C memory.min = 75M, memory.current = 50M * A/B/D memory.min = 25M, memory.current = 50M * A/B/E memory.min = 0, memory.current = 50M * A/B/F memory.min = 500M, memory.current = 0 * * (or memory.low if we test soft protection) * * Usages are pagecache and the test keeps a running * process in every leaf cgroup. * Then it creates A/G and creates a significant * memory pressure in A. * * Then it checks actual memory usages and expects that: * A/B memory.current ~= 50M * A/B/C memory.current ~= 29M [memory.events:low > 0] * A/B/D memory.current ~= 21M [memory.events:low > 0] * A/B/E memory.current ~= 0 [memory.events:low == 0 if !memory_recursiveprot, * undefined otherwise] * A/B/F memory.current = 0 [memory.events:low == 0] * (for origin of the numbers, see model in memcg_protection.m.) * * After that it tries to allocate more than there is * unprotected memory in A available, and checks that: * a) memory.min protects pagecache even in this case, * b) memory.low allows reclaiming page cache with low events. * * Then we try to reclaim from A/B/C using memory.reclaim until its * usage reaches 10M. * This makes sure that: * (a) We ignore the protection of the reclaim target memcg. * (b) The previously calculated emin value (~29M) should be dismissed.
*/ staticint test_memcg_protection(constchar *root, bool min)
{ int ret = KSFT_FAIL, rc; char *parent[3] = {NULL}; char *children[4] = {NULL}; constchar *attribute = min ? "memory.min" : "memory.low"; long c[4]; long current; int i, attempts; int fd;
fd = get_temp_fd(); if (fd < 0) goto cleanup;
parent[0] = cg_name(root, "memcg_test_0"); if (!parent[0]) goto cleanup;
parent[1] = cg_name(parent[0], "memcg_test_1"); if (!parent[1]) goto cleanup;
parent[2] = cg_name(parent[0], "memcg_test_2"); if (!parent[2]) goto cleanup;
if (cg_create(parent[0])) goto cleanup;
if (cg_read_long(parent[0], attribute)) { /* No memory.min on older kernels is fine */ if (min)
ret = KSFT_SKIP; goto cleanup;
}
if (cg_write(parent[0], "cgroup.subtree_control", "+memory")) goto cleanup;
if (cg_write(parent[0], "memory.max", "200M")) goto cleanup;
if (cg_write(parent[0], "memory.swap.max", "0")) goto cleanup;
if (cg_create(parent[1])) goto cleanup;
if (cg_write(parent[1], "cgroup.subtree_control", "+memory")) goto cleanup;
if (cg_create(parent[2])) goto cleanup;
for (i = 0; i < ARRAY_SIZE(children); i++) {
children[i] = cg_name_indexed(parent[1], "child_memcg", i); if (!children[i]) goto cleanup;
current = min ? MB(50) : MB(30); if (!values_close(cg_read_long(parent[1], "memory.current"), current, 3)) goto cleanup;
if (!reclaim_until(children[0], MB(10))) goto cleanup;
if (min) {
ret = KSFT_PASS; goto cleanup;
}
/* * Child 2 has memory.low=0, but some low protection may still be * distributed down from its parent with memory.low=50M if cgroup2 * memory_recursiveprot mount option is enabled. Ignore the low * event count in this case.
*/ for (i = 0; i < ARRAY_SIZE(children); i++) { int ignore_low_events_index = has_recursiveprot ? 2 : -1; int no_low_events_index = 1; long low, oom;
if (oom) goto cleanup; if (i == ignore_low_events_index) continue; if (i <= no_low_events_index && low <= 0) goto cleanup; if (i > no_low_events_index && low) goto cleanup;
}
ret = KSFT_PASS;
cleanup: for (i = ARRAY_SIZE(children) - 1; i >= 0; i--) { if (!children[i]) continue;
cg_destroy(children[i]);
free(children[i]);
}
for (i = ARRAY_SIZE(parent) - 1; i >= 0; i--) { if (!parent[i]) continue;
staticint alloc_pagecache_max_30M(constchar *cgroup, void *arg)
{
size_t size = MB(50); int ret = -1; long current, high, max; int fd;
high = cg_read_long(cgroup, "memory.high");
max = cg_read_long(cgroup, "memory.max"); if (high != MB(30) && max != MB(30)) return -1;
fd = get_temp_fd(); if (fd < 0) return -1;
if (alloc_pagecache(fd, size)) goto cleanup;
current = cg_read_long(cgroup, "memory.current"); if (!values_close(current, MB(30), 5)) goto cleanup;
ret = 0;
cleanup:
close(fd); return ret;
}
/* * This test checks that memory.high limits the amount of * memory which can be consumed by either anonymous memory * or pagecache.
*/ staticint test_memcg_high(constchar *root)
{ int ret = KSFT_FAIL; char *memcg; long high;
memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup;
if (cg_create(memcg)) goto cleanup;
if (cg_read_strcmp(memcg, "memory.high", "max\n")) goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup;
if (cg_write(memcg, "memory.high", "30M")) goto cleanup;
if (cg_run(memcg, alloc_anon, (void *)MB(31))) goto cleanup;
if (!cg_run(memcg, alloc_pagecache_50M_check, NULL)) goto cleanup;
if (cg_run(memcg, alloc_pagecache_max_30M, NULL)) goto cleanup;
high = cg_read_key_long(memcg, "memory.events", "high "); if (high <= 0) goto cleanup;
/* * This test checks that memory.high is able to throttle big single shot * allocation i.e. large allocation within one kernel entry.
*/ staticint test_memcg_high_sync(constchar *root)
{ int ret = KSFT_FAIL, pid, fd = -1; char *memcg; long pre_high, pre_max; long post_high, post_max;
memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup;
if (pre_high == post_high || pre_max != post_max) goto cleanup;
ret = KSFT_PASS;
cleanup: if (fd >= 0)
close(fd);
cg_destroy(memcg);
free(memcg);
return ret;
}
/* * This test checks that memory.max limits the amount of * memory which can be consumed by either anonymous memory * or pagecache.
*/ staticint test_memcg_max(constchar *root)
{ int ret = KSFT_FAIL; char *memcg; long current, max;
memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup;
if (cg_create(memcg)) goto cleanup;
if (cg_read_strcmp(memcg, "memory.max", "max\n")) goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup;
if (cg_write(memcg, "memory.max", "30M")) goto cleanup;
/* Should be killed by OOM killer */ if (!cg_run(memcg, alloc_anon, (void *)MB(100))) goto cleanup;
if (cg_run(memcg, alloc_pagecache_max_30M, NULL)) goto cleanup;
current = cg_read_long(memcg, "memory.current"); if (current > MB(30) || !current) goto cleanup;
max = cg_read_key_long(memcg, "memory.events", "max "); if (max <= 0) goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
/* * Reclaim from @memcg until usage reaches @goal by writing to * memory.reclaim. * * This function will return false if the usage is already below the * goal. * * This function assumes that writing to memory.reclaim is the only * source of change in memory.current (no concurrent allocations or * reclaim). * * This function makes sure memory.reclaim is sane. It will return * false if memory.reclaim's error codes do not make sense, even if * the usage goal was satisfied.
*/ staticbool reclaim_until(constchar *memcg, long goal)
{ char buf[64]; int retries, err; long current, to_reclaim; bool reclaimed = false;
for (retries = 5; retries > 0; retries--) {
current = cg_read_long(memcg, "memory.current");
if (current < goal || values_close(current, goal, 3)) break; /* Did memory.reclaim return 0 incorrectly? */ elseif (reclaimed) returnfalse;
/* * This test checks that memory.reclaim reclaims the given * amount of memory (from both anon and file, if possible).
*/ staticint test_memcg_reclaim(constchar *root)
{ int ret = KSFT_FAIL; int fd = -1; int retries; char *memcg; long current, expected_usage;
memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup;
if (cg_create(memcg)) goto cleanup;
current = cg_read_long(memcg, "memory.current"); if (current != 0) goto cleanup;
/* * If swap is enabled, try to reclaim from both anon and file, else try * to reclaim from file only.
*/ if (is_swap_enabled()) {
cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(50));
expected_usage = MB(100);
} else
expected_usage = MB(50);
/* * Wait until current usage reaches the expected usage (or we run out of * retries).
*/
retries = 5; while (!values_close(cg_read_long(memcg, "memory.current"),
expected_usage, 10)) { if (retries--) {
sleep(1); continue;
} else {
fprintf(stderr, "failed to allocate %ld for memcg reclaim test\n",
expected_usage); goto cleanup;
}
}
/* * Reclaim until current reaches 30M, this makes sure we hit both anon * and file if swap is enabled.
*/ if (!reclaim_until(memcg, MB(30))) goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
close(fd);
return ret;
}
staticint alloc_anon_50M_check_swap(constchar *cgroup, void *arg)
{ long mem_max = (long)arg;
size_t size = MB(50); char *buf, *ptr; long mem_current, swap_current; int ret = -1;
/* * This test checks that memory.swap.max limits the amount of * anonymous memory which can be swapped out. Additionally, it verifies that * memory.swap.peak reflects the high watermark and can be reset.
*/ staticint test_memcg_swap_max_peak(constchar *root)
{ int ret = KSFT_FAIL; char *memcg; long max, peak; struct stat ss; int swap_peak_fd = -1, mem_peak_fd = -1;
/* any non-empty string resets */ staticconstchar reset_string[] = "foobarbaz";
if (!is_swap_enabled()) return KSFT_SKIP;
memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup;
if (cg_create(memcg)) goto cleanup;
if (cg_read_long(memcg, "memory.swap.current")) {
ret = KSFT_SKIP; goto cleanup;
}
if (swap_peak_fd == -1) { if (errno == ENOENT)
ret = KSFT_SKIP; goto cleanup;
}
/* * Before we try to use memory.swap.peak's fd, try to figure out * whether this kernel supports writing to that file in the first * place. (by checking the writable bit on the file's st_mode)
*/ if (fstat(swap_peak_fd, &ss)) goto cleanup;
if ((ss.st_mode & S_IWUSR) == 0) {
ret = KSFT_SKIP; goto cleanup;
}
if (cg_read_strcmp(memcg, "memory.max", "max\n")) goto cleanup;
if (cg_read_strcmp(memcg, "memory.swap.max", "max\n")) goto cleanup;
if (cg_write(memcg, "memory.swap.max", "30M")) goto cleanup;
if (cg_write(memcg, "memory.max", "30M")) goto cleanup;
/* Should be killed by OOM killer */ if (!cg_run(memcg, alloc_anon, (void *)MB(100))) goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom ") != 1) goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1) goto cleanup;
peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(29)) goto cleanup;
peak = cg_read_long(memcg, "memory.swap.peak"); if (peak < MB(29)) goto cleanup;
peak = cg_read_long_fd(mem_peak_fd); if (peak < MB(29)) goto cleanup;
peak = cg_read_long_fd(swap_peak_fd); if (peak < MB(29)) goto cleanup;
/* * open, reset and close the peak swap on another FD to make sure * multiple extant fds don't corrupt the linked-list
*/
peak_reset = cg_write(memcg, "memory.swap.peak", (char *)reset_string); if (peak_reset) goto cleanup;
peak_reset = cg_write(memcg, "memory.peak", (char *)reset_string); if (peak_reset) goto cleanup;
/* actually reset on the fds */
peak_reset = write(swap_peak_fd, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup;
peak = cg_read_long_fd(swap_peak_fd); if (peak > MB(10)) goto cleanup;
/* * The cgroup is now empty, but there may be a page or two associated * with the open FD accounted to it.
*/
peak = cg_read_long_fd(mem_peak_fd); if (peak > MB(1)) goto cleanup;
if (cg_read_long(memcg, "memory.peak") < MB(29)) goto cleanup;
if (cg_read_long(memcg, "memory.swap.peak") < MB(29)) goto cleanup;
if (cg_run(memcg, alloc_anon_50M_check_swap, (void *)MB(30))) goto cleanup;
max = cg_read_key_long(memcg, "memory.events", "max "); if (max <= 0) goto cleanup;
peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(29)) goto cleanup;
peak = cg_read_long(memcg, "memory.swap.peak"); if (peak < MB(29)) goto cleanup;
peak = cg_read_long_fd(mem_peak_fd); if (peak < MB(29)) goto cleanup;
peak = cg_read_long_fd(swap_peak_fd); if (peak < MB(19)) goto cleanup;
ret = KSFT_PASS;
cleanup: if (mem_peak_fd != -1 && close(mem_peak_fd))
ret = KSFT_FAIL; if (swap_peak_fd != -1 && close(swap_peak_fd))
ret = KSFT_FAIL;
cg_destroy(memcg);
free(memcg);
return ret;
}
/* * This test disables swapping and tries to allocate anonymous memory * up to OOM. Then it checks for oom and oom_kill events in * memory.events.
*/ staticint test_memcg_oom_events(constchar *root)
{ int ret = KSFT_FAIL; char *memcg;
memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup;
if (cg_create(memcg)) goto cleanup;
if (cg_write(memcg, "memory.max", "30M")) goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup;
if (!cg_run(memcg, alloc_anon, (void *)MB(100))) goto cleanup;
if (cg_read_strcmp(memcg, "cgroup.procs", "")) goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom ") != 1) goto cleanup;
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1) goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
struct tcp_server_args { unsignedshort port; int ctl[2];
};
/* * This test checks socket memory accounting. * The test forks a TCP server listens on a random port between 1000 * and 61000. Once it gets a client connection, it starts writing to * its socket. * The TCP client interleaves reads from the socket with check whether * memory.current and memory.stat.sock are similar.
*/ staticint test_memcg_sock(constchar *root)
{ int bind_retries = 5, ret = KSFT_FAIL, pid, err; unsignedshort port; char *memcg;
memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup;
if (cg_create(memcg)) goto cleanup;
while (bind_retries--) { struct tcp_server_args args;
close(args.ctl[1]); if (read(args.ctl[0], &err, sizeof(err)) != sizeof(err)) goto cleanup;
close(args.ctl[0]);
if (!err) break; if (err != EADDRINUSE) goto cleanup;
waitpid(pid, NULL, 0);
}
if (err == EADDRINUSE) {
ret = KSFT_SKIP; goto cleanup;
}
if (tcp_client(memcg, port) != KSFT_PASS) goto cleanup;
waitpid(pid, &err, 0); if (WEXITSTATUS(err)) goto cleanup;
if (cg_read_long(memcg, "memory.current") < 0) goto cleanup;
if (cg_read_key_long(memcg, "memory.stat", "sock ")) goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_destroy(memcg);
free(memcg);
return ret;
}
/* * This test disables swapping and tries to allocate anonymous memory * up to OOM with memory.group.oom set. Then it checks that all * processes in the leaf were killed. It also checks that oom_events * were propagated to the parent level.
*/ staticint test_memcg_oom_group_leaf_events(constchar *root)
{ int ret = KSFT_FAIL; char *parent, *child; long parent_oom_events;
if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0) goto cleanup;
parent_oom_events = cg_read_key_long(
parent, "memory.events", "oom_kill "); /* * If memory_localevents is not enabled (the default), the parent should * count OOM events in its children groups. Otherwise, it should not * have observed any events.
*/ if (has_localevents && parent_oom_events != 0) goto cleanup; elseif (!has_localevents && parent_oom_events <= 0) goto cleanup;
ret = KSFT_PASS;
cleanup: if (child)
cg_destroy(child); if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
/* * This test disables swapping and tries to allocate anonymous memory * up to OOM with memory.group.oom set. Then it checks that all * processes in the parent and leaf were killed.
*/ staticint test_memcg_oom_group_parent_events(constchar *root)
{ int ret = KSFT_FAIL; char *parent, *child;
if (!cg_run(child, alloc_anon, (void *)MB(100))) goto cleanup;
if (cg_test_proc_killed(child)) goto cleanup; if (cg_test_proc_killed(parent)) goto cleanup;
ret = KSFT_PASS;
cleanup: if (child)
cg_destroy(child); if (parent)
cg_destroy(parent);
free(child);
free(parent);
return ret;
}
/* * This test disables swapping and tries to allocate anonymous memory * up to OOM with memory.group.oom set. Then it checks that all * processes were killed except those set with OOM_SCORE_ADJ_MIN
*/ staticint test_memcg_oom_group_score_events(constchar *root)
{ int ret = KSFT_FAIL; char *memcg; int safe_pid;
memcg = cg_name(root, "memcg_test_0");
if (!memcg) goto cleanup;
if (cg_create(memcg)) goto cleanup;
if (cg_write(memcg, "memory.max", "50M")) goto cleanup;
if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup;
if (cg_write(memcg, "memory.oom.group", "1")) goto cleanup;
int main(int argc, char **argv)
{ char root[PATH_MAX]; int i, proc_status, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
/* * Check that memory controller is available: * memory is listed in cgroup.controllers
*/ if (cg_read_strstr(root, "cgroup.controllers", "memory"))
ksft_exit_skip("memory controller isn't available\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) if (cg_write(root, "cgroup.subtree_control", "+memory"))
ksft_exit_skip("Failed to set memory controller\n");
proc_status = proc_mount_contains("memory_recursiveprot"); if (proc_status < 0)
ksft_exit_skip("Failed to query cgroup mount option\n");
has_recursiveprot = proc_status;
proc_status = proc_mount_contains("memory_localevents"); if (proc_status < 0)
ksft_exit_skip("Failed to query cgroup mount option\n");
has_localevents = proc_status;
for (i = 0; i < ARRAY_SIZE(tests); i++) { switch (tests[i].fn(root)) { case KSFT_PASS:
ksft_test_result_pass("%s\n", tests[i].name); break; case KSFT_SKIP:
ksft_test_result_skip("%s\n", tests[i].name); break; default:
ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name); break;
}
}
return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.