/** * for_each_trace_kprobe - iterate over the trace_kprobe list * @pos: the struct trace_kprobe * for each entry * @dpos: the struct dyn_event * to use as a loop cursor
*/ #define for_each_trace_kprobe(pos, dpos) \
for_each_dyn_event(dpos) \ if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
/* Return 0 if it fails to find the symbol address */ static nokprobe_inline unsignedlong trace_kprobe_address(struct trace_kprobe *tk)
{ unsignedlong addr;
staticinlineint __enable_trace_kprobe(struct trace_kprobe *tk)
{ int ret = 0;
if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) { if (trace_kprobe_is_return(tk))
ret = enable_kretprobe(&tk->rp); else
ret = enable_kprobe(&tk->rp.kp);
}
list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) { if (!trace_kprobe_is_registered(tk)) continue; if (trace_kprobe_is_return(tk))
disable_kretprobe(&tk->rp); else
disable_kprobe(&tk->rp.kp);
}
}
/* * Enable trace_probe * if the file is NULL, enable "perf" handler, or enable "trace" handler.
*/ staticint enable_trace_kprobe(struct trace_event_call *call, struct trace_event_file *file)
{ struct trace_probe *tp; struct trace_kprobe *tk; bool enabled; int ret = 0;
tp = trace_probe_primary_from_call(call); if (WARN_ON_ONCE(!tp)) return -ENODEV;
enabled = trace_probe_is_enabled(tp);
/* This also changes "enabled" state */ if (file) {
ret = trace_probe_add_file(tp, file); if (ret) return ret;
} else
trace_probe_set_flag(tp, TP_FLAG_PROFILE);
if (enabled) return 0;
list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) { if (trace_kprobe_has_gone(tk)) continue;
ret = __enable_trace_kprobe(tk); if (ret) break;
enabled = true;
}
if (ret) { /* Failed to enable one of them. Roll back all */ if (enabled)
__disable_trace_kprobe(tp); if (file)
trace_probe_remove_file(tp, file); else
trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
}
return ret;
}
/* * Disable trace_probe * if the file is NULL, disable "perf" handler, or disable "trace" handler.
*/ staticint disable_trace_kprobe(struct trace_event_call *call, struct trace_event_file *file)
{ struct trace_probe *tp;
tp = trace_probe_primary_from_call(call); if (WARN_ON_ONCE(!tp)) return -ENODEV;
if (file) { if (!trace_probe_get_file_link(tp, file)) return -ENOENT; if (!trace_probe_has_single_file(tp)) goto out;
trace_probe_clear_flag(tp, TP_FLAG_TRACE);
} else
trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
if (!trace_probe_is_enabled(tp))
__disable_trace_kprobe(tp);
out: if (file) /* * Synchronization is done in below function. For perf event, * file == NULL and perf_trace_event_unreg() calls * tracepoint_synchronize_unregister() to ensure synchronize * event. We don't need to care about it.
*/
trace_probe_remove_file(tp, file);
if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset)) returnfalse;
/* Get the entry address of the target function */
addr -= offset;
/* * Since ftrace_location_range() does inclusive range check, we need * to subtract 1 byte from the end address.
*/ return !ftrace_location_range(addr, addr + size - 1);
}
/* Check if the address is on a suffixed-symbol */ if (!lookup_symbol_name(addr, symname)) {
p = strchr(symname, '.'); if (!p) returntrue;
*p = '\0';
addr = (unsignedlong)kprobe_lookup_name(symname, 0); if (addr) return __within_notrace_func(addr);
}
/* Internal register function - just handle k*probes and flags */ staticint __register_trace_kprobe(struct trace_kprobe *tk)
{ int i, ret;
ret = security_locked_down(LOCKDOWN_KPROBES); if (ret) return ret;
if (trace_kprobe_is_registered(tk)) return -EINVAL;
if (within_notrace_func(tk)) {
pr_warn("Could not probe notrace function %ps\n",
(void *)trace_kprobe_address(tk)); return -EINVAL;
}
for (i = 0; i < tk->tp.nr_args; i++) {
ret = traceprobe_update_arg(&tk->tp.args[i]); if (ret) return ret;
}
/* Set/clear disabled flag according to tp->flag */ if (trace_probe_is_enabled(&tk->tp))
tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; else
tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
if (trace_kprobe_is_return(tk))
ret = register_kretprobe(&tk->rp); else
ret = register_kprobe(&tk->rp.kp);
return ret;
}
/* Internal unregister function - just handle k*probes and flags */ staticvoid __unregister_trace_kprobe(struct trace_kprobe *tk)
{ if (trace_kprobe_is_registered(tk)) { if (trace_kprobe_is_return(tk))
unregister_kretprobe(&tk->rp); else
unregister_kprobe(&tk->rp.kp); /* Cleanup kprobe for reuse and mark it unregistered */
INIT_HLIST_NODE(&tk->rp.kp.hlist);
INIT_LIST_HEAD(&tk->rp.kp.list); if (tk->rp.kp.symbol_name)
tk->rp.kp.addr = NULL;
}
}
/* Unregister a trace_probe and probe_event */ staticint unregister_trace_kprobe(struct trace_kprobe *tk)
{ /* If other probes are on the event, just unregister kprobe */ if (trace_probe_has_sibling(&tk->tp)) goto unreg;
/* Enabled event can not be unregistered */ if (trace_probe_is_enabled(&tk->tp)) return -EBUSY;
/* If there's a reference to the dynamic event */ if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp))) return -EBUSY;
/* Will fail if probe is being used by ftrace or perf */ if (unregister_kprobe_event(tk)) return -EBUSY;
/* * trace_probe_compare_arg_type() ensured that nr_args and * each argument name and type are same. Let's compare comm.
*/ for (i = 0; i < orig->tp.nr_args; i++) { if (strcmp(orig->tp.args[i].comm,
comp->tp.args[i].comm)) break;
}
if (i == orig->tp.nr_args) returntrue;
}
returnfalse;
}
staticint append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
{ int ret;
ret = trace_probe_compare_arg_type(&tk->tp, &to->tp); if (ret) { /* Note that argument starts index = 2 */
trace_probe_log_set_index(ret + 1);
trace_probe_log_err(0, DIFF_ARG_TYPE); return -EEXIST;
} if (trace_kprobe_has_same_kprobe(to, tk)) {
trace_probe_log_set_index(0);
trace_probe_log_err(0, SAME_PROBE); return -EEXIST;
}
/* Append to existing event */
ret = trace_probe_append(&tk->tp, &to->tp); if (ret) return ret;
/* Register k*probe */
ret = __register_trace_kprobe(tk); if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
ret = 0;
}
if (ret)
trace_probe_unlink(&tk->tp); else
dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
return ret;
}
/* Register a trace_probe and probe_event */ staticint register_trace_kprobe(struct trace_kprobe *tk)
{ struct trace_kprobe *old_tk; int ret;
/* Register new event */
ret = register_kprobe_event(tk); if (ret) { if (ret == -EEXIST) {
trace_probe_log_set_index(0);
trace_probe_log_err(0, EVENT_EXIST);
} else
pr_warn("Failed to register probe event(%d)\n", ret); return ret;
}
/* Register k*probe */
ret = __register_trace_kprobe(tk); if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
ret = 0;
}
if (ret < 0)
unregister_kprobe_event(tk); else
dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
staticint register_module_trace_kprobe(struct module *mod, struct trace_kprobe *tk)
{ constchar *p; int ret = 0;
p = strchr(trace_kprobe_symbol(tk), ':'); if (p)
ret = validate_module_probe_symbol(module_name(mod), p + 1); if (!ret)
ret = __register_trace_kprobe(tk); return ret;
}
if (val != MODULE_STATE_COMING) return NOTIFY_DONE;
/* Update probes on coming module */
guard(mutex)(&event_mutex);
for_each_trace_kprobe(tk, pos) { if (trace_kprobe_within_module(tk, mod)) { /* Don't need to check busy - this should have gone. */
__unregister_trace_kprobe(tk);
ret = register_module_trace_kprobe(mod, tk); if (ret)
pr_warn("Failed to re-register probe %s on %s: %d\n",
trace_probe_name(&tk->tp),
module_name(mod), ret);
}
}
if (count > 1) { /* * Users should use ADDR to remove the ambiguity of * using KSYM only.
*/ return -EADDRNOTAVAIL;
} elseif (count == 0) { /* * We can return ENOENT earlier than when register the * kprobe.
*/ return -ENOENT;
} return 0;
}
#ifdef CONFIG_MODULES /* Return NULL if the module is not loaded or under unloading. */ staticstruct module *try_module_get_by_name(constchar *name)
{ struct module *mod;
guard(rcu)();
mod = find_module(name); if (mod && !try_module_get(mod))
mod = NULL; return mod;
} #else #define try_module_get_by_name(name) (NULL) #endif
staticint validate_probe_symbol(char *symbol)
{ struct module *mod = NULL; char *modname = NULL, *p; int ret = 0;
p = strchr(symbol, ':'); if (p) {
modname = symbol;
symbol = p + 1;
*p = '\0';
mod = try_module_get_by_name(modname); if (!mod) goto out;
}
ret = validate_module_probe_symbol(modname, symbol);
out: if (p)
*p = ':'; if (mod)
module_put(mod); return ret;
}
event = strchr(&argv[0][1], ':'); if (event)
event++;
if (isdigit(argv[0][1])) { char *buf __free(kfree) = NULL;
if (!is_return) {
trace_probe_log_err(1, BAD_MAXACT_TYPE); return -EINVAL;
} if (event)
len = event - &argv[0][1] - 1; else
len = strlen(&argv[0][1]); if (len > MAX_EVENT_NAME_LEN - 1) {
trace_probe_log_err(1, BAD_MAXACT); return -EINVAL;
}
buf = kmemdup(&argv[0][1], len + 1, GFP_KERNEL); if (!buf) return -ENOMEM;
buf[len] = '\0';
ret = kstrtouint(buf, 0, &maxactive); if (ret || !maxactive) {
trace_probe_log_err(1, BAD_MAXACT); return -EINVAL;
} /* kretprobes instances are iterated over via a list. The * maximum should stay reasonable.
*/ if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
trace_probe_log_err(1, MAXACT_TOO_BIG); return -EINVAL;
}
}
/* try to parse an address. if that fails, try to read the
* input as a symbol. */ if (kstrtoul(argv[1], 0, (unsignedlong *)&addr)) {
trace_probe_log_set_index(1); /* Check whether uprobe event specified */ if (strchr(argv[1], '/') && strchr(argv[1], ':')) return -ECANCELED;
/* a symbol specified */
symbol = kstrdup(argv[1], GFP_KERNEL); if (!symbol) return -ENOMEM;
ret = traceprobe_expand_dentry_args(argc, argv, &dbuf); if (ret) return ret;
/* setup a probe */
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
argc, is_return); if (IS_ERR(tk)) {
ret = PTR_ERR(tk); /* This must return -ENOMEM, else there is a bug */
WARN_ON_ONCE(ret != -ENOMEM); return ret; /* We know tk is not allocated */
}
/* parse arguments */ for (i = 0; i < argc; i++) {
trace_probe_log_set_index(i + 2);
ctx->offset = 0;
ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], ctx); if (ret) return ret; /* This can be -ENOMEM */
} /* entry handler for kretprobe */ if (is_return && tk->tp.entry_arg) {
tk->rp.entry_handler = trace_kprobe_entry_handler;
tk->rp.data_size = traceprobe_get_entry_data_size(&tk->tp);
}
ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
ret = traceprobe_set_print_fmt(&tk->tp, ptype); if (ret < 0) return ret;
ret = register_trace_kprobe(tk); if (ret) {
trace_probe_log_set_index(1); if (ret == -EILSEQ)
trace_probe_log_err(0, BAD_INSN_BNDRY); elseif (ret == -ENOENT)
trace_probe_log_err(0, BAD_PROBE_ADDR); elseif (ret != -ENOMEM && ret != -EEXIST)
trace_probe_log_err(0, FAIL_REG_PROBE); return ret;
} /* * Here, 'tk' has been registered to the list successfully, * so we don't need to free it.
*/
tk = NULL;
/** * kprobe_event_cmd_init - Initialize a kprobe event command object * @cmd: A pointer to the dynevent_cmd struct representing the new event * @buf: A pointer to the buffer used to build the command * @maxlen: The length of the buffer passed in @buf * * Initialize a synthetic event command object. Use this before * calling any of the other kprobe_event functions.
*/ void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
{
dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
trace_kprobe_run_command);
}
EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
/** * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list * @cmd: A pointer to the dynevent_cmd struct representing the new event * @kretprobe: Is this a return probe? * @name: The name of the kprobe event * @loc: The location of the kprobe event * @...: Variable number of arg (pairs), one pair for each field * * NOTE: Users normally won't want to call this function directly, but * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically * adds a NULL to the end of the arg list. If this function is used * directly, make sure the last arg in the variable arg list is NULL. * * Generate a kprobe event command to be executed by * kprobe_event_gen_cmd_end(). This function can be used to generate the * complete command or only the first part of it; in the latter case, * kprobe_event_add_fields() can be used to add more fields following this. * * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This * returns -EINVAL if @loc == NULL. * * Return: 0 if successful, error otherwise.
*/ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe, constchar *name, constchar *loc, ...)
{ char buf[MAX_EVENT_NAME_LEN]; struct dynevent_arg arg;
va_list args; int ret;
if (cmd->type != DYNEVENT_TYPE_KPROBE) return -EINVAL;
/** * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list * @cmd: A pointer to the dynevent_cmd struct representing the new event * @...: Variable number of arg (pairs), one pair for each field * * NOTE: Users normally won't want to call this function directly, but * rather use the kprobe_event_add_fields() wrapper, which * automatically adds a NULL to the end of the arg list. If this * function is used directly, make sure the last arg in the variable * arg list is NULL. * * Add probe fields to an existing kprobe command using a variable * list of args. Fields are added in the same order they're listed. * * Return: 0 if successful, error otherwise.
*/ int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
{ struct dynevent_arg arg;
va_list args; int ret = 0;
if (cmd->type != DYNEVENT_TYPE_KPROBE) return -EINVAL;
dynevent_arg_init(&arg, 0);
va_start(args, cmd); for (;;) { constchar *field;
field = va_arg(args, constchar *); if (!field) break;
if (++cmd->n_fields > MAX_TRACE_ARGS) {
ret = -EINVAL; break;
}
arg.str = field;
ret = dynevent_arg_add(cmd, &arg, NULL); if (ret) break;
}
va_end(args);
/** * kprobe_event_delete - Delete a kprobe event * @name: The name of the kprobe event to delete * * Delete a kprobe event with the give @name from kernel code rather * than directly from the command line. * * Return: 0 if successful, error otherwise.
*/ int kprobe_event_delete(constchar *name)
{ char buf[MAX_EVENT_NAME_LEN];
/* Note that we don't verify it, since the code does not come from user space */ staticint
process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, void *dest, void *base)
{ struct pt_regs *regs = rec; unsignedlong val; int ret;
retry: /* 1st stage: get value from context */ switch (code->op) { case FETCH_OP_REG:
val = regs_get_register(regs, code->param); break; case FETCH_OP_STACK:
val = regs_get_kernel_stack_nth(regs, code->param); break; case FETCH_OP_STACKP:
val = kernel_stack_pointer(regs); break; case FETCH_OP_RETVAL:
val = regs_return_value(regs); break; #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API case FETCH_OP_ARG:
val = regs_get_kernel_argument(regs, code->param); break; case FETCH_OP_EDATA:
val = *(unsignedlong *)((unsignedlong)edata + code->offset); break; #endif case FETCH_NOP_SYMBOL: /* Ignore a place holder */
code++; goto retry; default:
ret = process_common_fetch_insn(code, &val); if (ret < 0) return ret;
}
code++;
/* * There is a small chance that get_kretprobe(ri) returns NULL when * the kretprobe is unregister on another CPU between kretprobe's * trampoline_handler and this function.
*/ if (unlikely(!rp)) return -ENOENT;
tk = container_of(rp, struct trace_kprobe, rp);
/* store argument values into ri->data as entry data */ if (tk->tp.entry_arg)
store_trace_entry_data(ri->data, &tk->tp, regs);
if (bpf_prog_array_valid(call)) { unsignedlong orig_ip = instruction_pointer(regs); int ret;
ret = trace_call_bpf(call, regs);
/* * We need to check and see if we modified the pc of the * pt_regs, and if so return 1 so that we don't do the * single stepping.
*/ if (orig_ip != instruction_pointer(regs)) return 1; if (!ret) return 0;
}
head = this_cpu_ptr(call->perf_events); if (hlist_empty(head)) return 0;
/* * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. * * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe * lockless, but we can't race with this __init function.
*/ staticint kprobe_register(struct trace_event_call *event, enum trace_reg type, void *data)
{ struct trace_event_file *file = data;
switch (type) { case TRACE_REG_REGISTER: return enable_trace_kprobe(event, file); case TRACE_REG_UNREGISTER: return disable_trace_kprobe(event, file);
#ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return enable_trace_kprobe(event, NULL); case TRACE_REG_PERF_UNREGISTER: return disable_trace_kprobe(event, NULL); case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: return 0; #endif
} return 0;
}
/* * There is a small chance that get_kretprobe(ri) returns NULL when * the kretprobe is unregister on another CPU between kretprobe's * trampoline_handler and this function.
*/ if (unlikely(!rp)) return 0;
/* create a trace_kprobe, but don't add it to global lists */ struct trace_event_call *
create_local_trace_kprobe(char *func, void *addr, unsignedlong offs, bool is_return)
{ enum probe_print_type ptype; struct trace_kprobe *tk __free(free_trace_kprobe) = NULL; int ret; char *event;
if (func) {
ret = validate_probe_symbol(func); if (ret) return ERR_PTR(ret);
}
/* * local trace_kprobes are not added to dyn_event, so they are never * searched in find_trace_kprobe(). Therefore, there is no concern of * duplicated name here.
*/
event = func ? func : "DUMMY_EVENT";
while (cmd && *cmd != '\0') {
p = strchr(cmd, ';'); if (p)
*p++ = '\0';
ret = create_or_delete_trace_kprobe(cmd); if (ret)
pr_warn("Failed to add event(%d): %s\n", ret, cmd);
cmd = p;
}
enable_boot_kprobe_events();
}
/* * Register dynevent at core_initcall. This allows kernel to setup kprobe * events in postcore_initcall without tracefs.
*/ static __init int init_kprobe_trace_early(void)
{ int ret;
ret = dyn_event_register(&trace_kprobe_ops); if (ret) return ret;
if (trace_kprobe_register_module_notifier()) return -EINVAL;
list_for_each_entry(file, &tr->events, list) if (file->event_call == trace_probe_event_call(&tk->tp)) return file;
return NULL;
}
/* * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this * stage, we can do this lockless.
*/ static __init int kprobe_trace_self_tests_init(void)
{ int ret, warn = 0; int (*target)(int, int, int, int, int, int); struct trace_kprobe *tk; struct trace_event_file *file;
if (tracing_is_disabled()) return -ENODEV;
if (tracing_selftest_disabled) return 0;
target = kprobe_trace_selftest_target;
pr_info("Testing kprobe tracing: ");
ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)"); if (WARN_ONCE(ret, "error on probing function entry.")) {
warn++;
} else { /* Enable trace point */
tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); if (WARN_ONCE(tk == NULL, "error on probing function entry.")) {
warn++;
} else {
file = find_trace_probe_file(tk, top_trace_array()); if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
warn++;
} else
enable_trace_kprobe(
trace_probe_event_call(&tk->tp), file);
}
}
ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval"); if (WARN_ONCE(ret, "error on probing function return.")) {
warn++;
} else { /* Enable trace point */
tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); if (WARN_ONCE(tk == NULL, "error on getting 2nd new probe.")) {
warn++;
} else {
file = find_trace_probe_file(tk, top_trace_array()); if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
warn++;
} else
enable_trace_kprobe(
trace_probe_event_call(&tk->tp), file);
}
}
if (warn) goto end;
ret = target(1, 2, 3, 4, 5, 6);
/* * Not expecting an error here, the check is only to prevent the * optimizer from removing the call to target() as otherwise there * are no side-effects and the call is never performed.
*/ if (ret != 21)
warn++;
/* Disable trace points before removing it */
tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); if (WARN_ONCE(tk == NULL, "error on getting test probe.")) {
warn++;
} else { if (WARN_ONCE(trace_kprobe_nhit(tk) != 1, "incorrect number of testprobe hits."))
warn++;
tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); if (WARN_ONCE(tk == NULL, "error on getting 2nd test probe.")) {
warn++;
} else { if (WARN_ONCE(trace_kprobe_nhit(tk) != 1, "incorrect number of testprobe2 hits."))
warn++;
ret = create_or_delete_trace_kprobe("-:testprobe"); if (WARN_ONCE(ret, "error on deleting a probe."))
warn++;
ret = create_or_delete_trace_kprobe("-:testprobe2"); if (WARN_ONCE(ret, "error on deleting a probe."))
warn++;
end: /* * Wait for the optimizer work to finish. Otherwise it might fiddle * with probes in already freed __init text.
*/
wait_for_kprobe_optimizer(); if (warn)
pr_cont("NG: Some tests are failed. Please check them.\n"); else
pr_cont("OK\n"); return 0;
}
late_initcall(kprobe_trace_self_tests_init);
#endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.25 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.