Read a C language source or header FILE and extract embedded
documentation comments """
import sys import re from pprint import pformat
from kdoc_re import NestedMatch, KernRe from kdoc_item import KdocItem
# # Regular expressions used to parse kernel-doc markups at KernelDoc class. # # Let's declare them in lowercase outside any class to make easier to # convert from the python script. # # As those are evaluated at the beginning, no need to cache them #
# Allow whitespace at end of comment start.
doc_start = KernRe(r'^/\*\*\s*$', cache=False)
# # Tests for the beginning of a kerneldoc block in its various forms. #
doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False)
doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)", cache = False)
doc_begin_func = KernRe(str(doc_com) + # initial " * '
r"(?:\w+\s*\*\s*)?" + # type (not captured)
r'(?:define\s+)?' + # possible "define" (not captured)
r'(\w+)\s*(?:\(\w*\))?\s*' + # name and optional "(...)"
r'(?:[-:].*)?$', # description (not captured)
cache = False)
# # A little helper to get rid of excess white space #
multi_space = KernRe(r'\s\s+') def trim_whitespace(s): return multi_space.sub(' ', s.strip())
class state: """
State machine enums """
# Parser states
NORMAL = 0 # normal code
NAME = 1 # looking for function name
DECLARATION = 2 # We have seen a declaration which might not be done
BODY = 3 # the body of the comment
SPECIAL_SECTION = 4 # doc section ending with a blank line
PROTO = 5 # scanning prototype
DOCBLOCK = 6 # documentation block
INLINE_NAME = 7 # gathering doc outside main block
INLINE_TEXT = 8 # reading the body of inline docs
# Delegate warning output to output logic, as this way it # will report warnings/info only for symbols that are output
self.warnings.append(log_msg) return
# # Begin a new section. # def begin_section(self, line_no, title = SECTION_DEFAULT, dump = False): if dump:
self.dump_section(start_new = True)
self.section = title
self.new_start_line = line_no
def dump_section(self, start_new=True): """
Dumps section contents to arrays/hashes intended for that purpose. """ # # If we have accumulated no contents in the default ("description") # section, don't bother. # if self.section == SECTION_DEFAULT andnot self._contents: return
name = self.section
contents = self.contents()
if type_param.match(name):
name = type_param.group(1)
else: if name in self.sections and self.sections[name] != "": # Only warn on user-specified duplicate section names if name != SECTION_DEFAULT:
self.emit_msg(self.new_start_line,
f"duplicate section name '{name}'\n") # Treat as a new paragraph - add a blank line
self.sections[name] += '\n' + contents else:
self.sections[name] = contents
self.section_start_lines[name] = self.new_start_line
self.new_start_line = 0
# Initial state for the state machines
self.state = state.NORMAL
# Store entry currently being processed
self.entry = None
# Place all potential outputs into an array
self.entries = []
# # We need Python 3.7 for its "dicts remember the insertion # order" guarantee # if sys.version_info.major == 3 and sys.version_info.minor < 7:
self.emit_msg(0, 'Python 3.7 or later is required for correct results')
def emit_msg(self, ln, msg, warning=True): """Emit a message"""
log_msg = f"{self.fname}:{ln} {msg}"
if self.entry:
self.entry.emit_msg(log_msg, warning) return
if warning:
self.config.log.warning(log_msg) else:
self.config.log.info(log_msg)
def dump_section(self, start_new=True): """
Dumps section contents to arrays/hashes intended for that purpose. """
if self.entry:
self.entry.dump_section(start_new)
# TODO: rename it to store_declaration after removal of kernel-doc.pl def output_declaration(self, dtype, name, **args): """
Stores the entry into an entry array.
The actual output and output filters will be handled elsewhere """
if dtype == ""and param.endswith("..."): if KernRe(r'\w\.\.\.$').search(param): # For named variable parameters of the form `x...`, # remove the dots
param = param[:-3] else: # Handles unnamed variable parameters
param = "..."
if param notin self.entry.parameterdescs or \ not self.entry.parameterdescs[param]:
# Handle cache group enforcing variables: they do not need # to be described in header files elif"__cacheline_group"in param: # Ignore __cacheline_group_begin and __cacheline_group_end return
# Warn if parameter has no description # (but ignore ones starting with # as these are not parameters # but inline preprocessor statements) if param notin self.entry.parameterdescs andnot param.startswith("#"):
self.entry.parameterdescs[param] = self.undescribed
self.emit_msg(ln,
f"{dname} '{param}' not described in '{declaration_name}'")
# Strip spaces from param so that it is one continuous string on # parameterlist. This fixes a problem where check_sections() # cannot find a parameter like "addr[6 + 2]" because it actually # appears as "addr[6", "+", "2]" on the parameter list. # However, it's better to maintain the param string unchanged for # output, so just weaken the string compare in check_sections() # to ignore "[blah" in a parameter string.
if arg.startswith('#'): # Treat preprocessor directive as a typeless variable just to fill # corresponding data structures "correctly". Catch it later in # output_* subs.
# Treat preprocessor directive as a typeless variable
self.push_parameter(ln, decl_type, arg, "", "", declaration_name)
for param in args: if KernRe(r'^(\*+)\s*(.*)').match(param):
r = KernRe(r'^(\*+)\s*(.*)') ifnot r.match(param):
self.emit_msg(ln, f"Invalid param: {param}") continue
def check_sections(self, ln, decl_name, decl_type): """
Check for errors inside sections, emitting warnings ifnot found
parameters are described. """ for section in self.entry.sections: if section notin self.entry.parameterlist and \ not known_sections.search(section): if decl_type == 'function':
dname = f"{decl_type} parameter" else:
dname = f"{decl_type} member"
self.emit_msg(ln,
f"Excess {dname} '{section}' description in '{decl_name}'")
def check_return_section(self, ln, declaration_name, return_type): """ If the function doesn't return void, warns about the lack of a return description. """
ifnot self.config.wreturn: return
# Ignore an empty return type (It's a macro) # Ignore functions with a "void" return type (but not "void *") ifnot return_type or KernRe(r'void\s*\w*\s*$').search(return_type): return
ifnot self.entry.sections.get("Return", None):
self.emit_msg(ln,
f"No description found for return value of '{declaration_name}'")
def dump_struct(self, ln, proto): """
Store an entry for an struct or union """
if self.entry.identifier != declaration_name:
self.emit_msg(ln,
f"expecting prototype for {decl_type} {self.entry.identifier}. Prototype was for {decl_type} {declaration_name} instead\n") return
# Unwrap struct_group macros based on this definition: # __struct_group(TAG, NAME, ATTRS, MEMBERS...) # which has variants like: struct_group(NAME, MEMBERS...) # Only MEMBERS arguments require documentation. # # Parsing them happens on two steps: # # 1. drop struct group arguments that aren't at MEMBERS, # storing them as STRUCT_GROUP(MEMBERS) # # 2. remove STRUCT_GROUP() ancillary macro. # # The original logic used to remove STRUCT_GROUP() using an # advanced regex: # # \bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*; # # with two patterns that are incompatible with # Python re module, as it has: # # - a recursive pattern: (?1) # - an atomic grouping: (?>...) # # I tried a simpler version: but it didn't work either: # \bSTRUCT_GROUP\(([^\)]+)\)[^;]*; # # As it doesn't properly match the end parenthesis on some cases. # # So, a better solution was crafted: there's now a NestedMatch # class that ensures that delimiters after a search are properly # matched. So, the implementation to drop STRUCT_GROUP() will be # handled in separate.
# Replace macros # # TODO: use NestedMatch for FOO($1, $2, ...) matches # # it is better to also move those to the NestedMatch logic, # to ensure that parenthesis will be properly matched.
for search, sub in sub_prefixes:
members = search.sub(sub, members)
nested = NestedMatch()
for search, sub in sub_nested_prefixes:
members = nested.sub(search, sub, members)
# Keeps the original declaration as-is
declaration = members
# Split nested struct/union elements # # This loop was simpler at the original kernel-doc perl version, as # while ($members =~ m/$struct_members/) { ... } # reads 'members' string on each interaction. # # Python behavior is different: it parses 'members' only once, # creating a list of tuples from the first interaction. # # On other words, this won't get nested structs. # # So, we need to have an extra loop on Python to override such # re limitation.
# Ignore members marked private
proto = KernRe(r'\/\*\s*private:.*?\/\*\s*public:.*?\*\/', flags=re.S).sub('', proto)
proto = KernRe(r'\/\*\s*private:.*}', flags=re.S).sub('}', proto)
# Strip comments
proto = KernRe(r'\/\*.*?\*\/', flags=re.S).sub('', proto)
# # Parse out the name and members of the enum. Typedef form first. #
r = KernRe(r'typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;') if r.search(proto):
declaration_name = r.group(2)
members = r.group(1).rstrip() # # Failing that, look for a straight enum # else:
r = KernRe(r'enum\s+(\w*)\s*\{(.*)\}') if r.match(proto):
declaration_name = r.group(1)
members = r.group(2).rstrip() # # OK, this isn't going to work. # else:
self.emit_msg(ln, f"{proto}: error: Cannot parse enum!") return # # Make sure we found what we were expecting. # if self.entry.identifier != declaration_name: if self.entry.identifier == "":
self.emit_msg(ln,
f"{proto}: wrong kernel-doc identifier on prototype") else:
self.emit_msg(ln,
f"expecting prototype for enum {self.entry.identifier}. "
f"Prototype was for enum {declaration_name} instead") return
ifnot declaration_name:
declaration_name = "(anonymous)" # # Parse out the name of each enum member, and verify that we # have a description for it. #
member_set = set()
members = KernRe(r'\([^;)]*\)').sub('', members) for arg in members.split(','): ifnot arg: continue
arg = KernRe(r'^\s*(\w+).*').sub(r'\1', arg)
self.entry.parameterlist.append(arg) if arg notin self.entry.parameterdescs:
self.entry.parameterdescs[arg] = self.undescribed
self.emit_msg(ln,
f"Enum value '{arg}' not described in enum '{declaration_name}'")
member_set.add(arg) # # Ensure that every described member actually exists in the enum. # for k in self.entry.parameterdescs: if k notin member_set:
self.emit_msg(ln,
f"Excess enum value '%{k}' description in '{declaration_name}'")
# It seems that Python support for re.X is broken: # At least for me (Python 3.13), this didn't work # (r""" # __attribute__\s*\(\( # (?: # [\w\s]+ # attribute name # (?:\([^)]*\))? # attribute arguments # \s*,? # optional comma at the end # )+ # \)\)\s+ # """, "", re.X),
# So, remove whitespaces and comments from it
(r"__attribute__\s*\(\((?:[\w\s]+(?:\([^)]*\))?\s*,?)+\)\)\s+", "", 0),
]
for search, sub, flags in sub_prefixes:
prototype = KernRe(search, flags).sub(sub, prototype)
# Macros are a special case, as they change the prototype format
new_proto = KernRe(r"^#\s*define\s+").sub("", prototype) if new_proto != prototype:
is_define_proto = True
prototype = new_proto else:
is_define_proto = False
# Yes, this truly is vile. We are looking for: # 1. Return type (may be nothing if we're looking at a macro) # 2. Function name # 3. Function parameters. # # All the while we have to watch out for function pointer parameters # (which IIRC is what the two sections are for), C types (these # regexps don't even start to express all the possibilities), and # so on. # # If you mess with these regexps, it's a good idea to check that # the following functions' documentation still comes out right: # - parport_register_device (function pointer parameters) # - atomic_set (macro) # - pci_match_device, __copy_to_user (long return type)
found = True break ifnot found:
self.emit_msg(ln,
f"cannot understand function prototype: '{prototype}'") return
if self.entry.identifier != declaration_name:
self.emit_msg(ln,
f"expecting prototype for {self.entry.identifier}(). Prototype was for {declaration_name}() instead") return
if self.entry.identifier != declaration_name:
self.emit_msg(ln,
f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n") return
# Handle nested parentheses or brackets
r = KernRe(r'(\(*.\)\s*|\[*.\]\s*);$') while r.search(proto):
proto = r.sub('', proto)
# Parse simple typedefs
r = KernRe(r'typedef.*\s+(\w+)\s*;') if r.match(proto):
declaration_name = r.group(1)
if self.entry.identifier != declaration_name:
self.emit_msg(ln,
f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n") return
@staticmethod def process_export(function_set, line): """
process EXPORT_SYMBOL* tags
This method doesn't use any variable from the class, so declare it with a staticmethod decorator. """
# We support documenting some exported symbols with different # names. A horrible hack.
suffixes = [ '_noprof' ]
# Note: it accepts only one EXPORT_SYMBOL* per line, as having # multiple export lines would violate Kernel coding style.
if export_symbol.search(line):
symbol = export_symbol.group(2) elif export_symbol_ns.search(line):
symbol = export_symbol_ns.group(2) else: returnFalse # # Found an export, trim out any special suffixes # for suffix in suffixes: # Be backward compatible with Python < 3.9 if symbol.endswith(suffix):
symbol = symbol[:-len(suffix)]
function_set.add(symbol) returnTrue
def process_normal(self, ln, line): """
STATE_NORMAL: looking for the /** to begin everything. """
ifnot doc_start.match(line): return
# start a new entry
self.reset_state(ln)
# next line is always the function name
self.state = state.NAME
def process_name(self, ln, line): """
STATE_NAME: Looking for the "name - description" line """ # # Check for a DOC: block and handle them specially. # if doc_block.search(line):
self.entry.identifier = self.entry.section
self.state = state.DOCBLOCK # # Otherwise we're looking for a normal kerneldoc declaration line. # elif doc_decl.search(line):
self.entry.identifier = doc_decl.group(1)
# Test for data declaration if doc_begin_data.search(line):
self.entry.decl_type = doc_begin_data.group(1)
self.entry.identifier = doc_begin_data.group(2) # # Look for a function description # elif doc_begin_func.search(line):
self.entry.identifier = doc_begin_func.group(1)
self.entry.decl_type = "function" # # We struck out. # else:
self.emit_msg(ln,
f"This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst\n{line}")
self.state = state.NORMAL return # # OK, set up for a new kerneldoc entry. #
self.state = state.BODY
self.entry.identifier = self.entry.identifier.strip(" ") # if there's no @param blocks need to set up default section here
self.entry.begin_section(ln + 1) # # Find the description portion, which *should* be there but # isn't always. # (We should be able to capture this from the previous parsing - someday) #
r = KernRe("[-:](.*)") if r.search(line):
self.entry.declaration_purpose = trim_whitespace(r.group(1))
self.state = state.DECLARATION else:
self.entry.declaration_purpose = ""
ifnot self.entry.declaration_purpose and self.config.wshort_desc:
self.emit_msg(ln,
f"missing initial short description on line:\n{line}")
ifnot self.entry.identifier and self.entry.decl_type != "enum":
self.emit_msg(ln,
f"wrong kernel-doc identifier on line:\n{line}")
self.state = state.NORMAL
if self.config.verbose:
self.emit_msg(ln,
f"Scanning doc for {self.entry.decl_type} {self.entry.identifier}",
warning=False) # # Failed to find an identifier. Emit a warning # else:
self.emit_msg(ln, f"Cannot find identifier on line:\n{line}")
# # Helper function to determine if a new section is being started. # def is_new_section(self, ln, line): if doc_sect.search(line):
self.state = state.BODY # # Pick out the name of our new section, tweaking it if need be. #
newsection = doc_sect.group(1) if newsection.lower() == 'description':
newsection = 'Description' elif newsection.lower() == 'context':
newsection = 'Context'
self.state = state.SPECIAL_SECTION elif newsection.lower() in ["@return", "@returns", "return", "returns"]:
newsection = "Return"
self.state = state.SPECIAL_SECTION elif newsection[0] == '@':
self.state = state.SPECIAL_SECTION # # Initialize the contents, and get the new section going. #
newcontents = doc_sect.group(2) ifnot newcontents:
newcontents = ""
self.dump_section()
self.entry.begin_section(ln, newsection)
self.entry.leading_space = None
# # Helper function to detect (and effect) the end of a kerneldoc comment. # def is_comment_end(self, ln, line): if doc_end.search(line):
self.dump_section()
# Look for doc_com + <text> + doc_end:
r = KernRe(r'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') if r.match(line):
self.emit_msg(ln, f"suspicious ending line: {line}")
def process_decl(self, ln, line): """
STATE_DECLARATION: We've seen the beginning of a declaration """ if self.is_new_section(ln, line) or self.is_comment_end(ln, line): return # # Look for anything with the " * " line beginning. # if doc_content.search(line):
cont = doc_content.group(1) # # A blank line means that we have moved out of the declaration # part of the comment (without any "special section" parameter # descriptions). # if cont == "":
self.state = state.BODY # # Otherwise we have more of the declaration section to soak up. # else:
self.entry.declaration_purpose = \
trim_whitespace(self.entry.declaration_purpose + ' ' + cont) else: # Unknown line, ignore
self.emit_msg(ln, f"bad line: {line}")
def process_special(self, ln, line): """
STATE_SPECIAL_SECTION: a section ending with a blank line """ # # If we have hit a blank line (only the " * " marker), then this # section is done. # if KernRe(r"\s*\*\s*$").match(line):
self.entry.begin_section(ln, dump = True)
self.state = state.BODY return # # Not a blank line, look for the other ways to end the section. # if self.is_new_section(ln, line) or self.is_comment_end(ln, line): return # # OK, we should have a continuation of the text for this section. # if doc_content.search(line):
cont = doc_content.group(1) # # If the lines of text after the first in a special section have # leading white space, we need to trim it out or Sphinx will get # confused. For the second line (the None case), see what we # find there and remember it. # if self.entry.leading_space isNone:
r = KernRe(r'^(\s+)') if r.match(cont):
self.entry.leading_space = len(r.group(1)) else:
self.entry.leading_space = 0 # # Otherwise, before trimming any leading chars, be *sure* # that they are white space. We should maybe warn if this # isn't the case. # for i in range(0, self.entry.leading_space): if cont[i] != " ":
self.entry.leading_space = i break # # Add the trimmed result to the section and we're done. #
self.entry.add_text(cont[self.entry.leading_space:]) else: # Unknown line, ignore
self.emit_msg(ln, f"bad line: {line}")
def process_body(self, ln, line): """
STATE_BODY: the bulk of a kerneldoc comment. """ if self.is_new_section(ln, line) or self.is_comment_end(ln, line): return
# Strip newlines/CR's
proto = re.sub(r'[\r\n]+', ' ', proto)
# Check if it's a SYSCALL_DEFINE0 if'SYSCALL_DEFINE0'in proto:
is_void = True
# Replace SYSCALL_DEFINE with correct return type & function name
proto = KernRe(r'SYSCALL_DEFINE.*\(').sub('long sys_', proto)
r = KernRe(r'long\s+(sys_.*?),') if r.search(proto):
proto = KernRe(',').sub('(', proto, count=1) elif is_void:
proto = KernRe(r'\)').sub('(void)', proto, count=1)
# Now delete all of the odd-numbered commas in the proto # so that argument types & names don't have a comma between them
count = 0
length = len(proto)
if is_void:
length = 0 # skip the loop if is_void
for ix in range(length): if proto[ix] == ',':
count += 1 if count % 2 == 1:
proto = proto[:ix] + ' ' + proto[ix + 1:]
def process_proto_function(self, ln, line): """Ancillary routine to process a function prototype"""
# strip C99-style comments to end of line
line = KernRe(r"\/\/.*$", re.S).sub('', line) # # Soak up the line's worth of prototype text, stopping at { or ; if present. # if KernRe(r'\s*#\s*define').match(line):
self.entry.prototype = line elifnot line.startswith('#'): # skip other preprocessor stuff
r = KernRe(r'([^\{]*)') if r.match(line):
self.entry.prototype += r.group(1) + " " # # If we now have the whole prototype, clean it up and declare victory. # if'{'in line or';'in line or KernRe(r'\s*#\s*define').match(line): # strip comments and surrounding spaces
self.entry.prototype = KernRe(r'/\*.*\*/').sub('', self.entry.prototype).strip() # # Handle self.entry.prototypes for function pointers like: # int (*pcs_config)(struct foo) # by turning it into # int pcs_config(struct foo) #
r = KernRe(r'^(\S+\s+)\(\s*\*(\S+)\)')
self.entry.prototype = r.sub(r'\1\2', self.entry.prototype) # # Handle special declaration syntaxes # if'SYSCALL_DEFINE'in self.entry.prototype:
self.entry.prototype = self.syscall_munge(ln,
self.entry.prototype) else:
r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT') if r.search(self.entry.prototype):
self.entry.prototype = self.tracepoint_munge(ln,
self.entry.prototype) # # ... and we're done #
self.dump_function(ln, self.entry.prototype)
self.reset_state(ln)
def process_proto_type(self, ln, line): """Ancillary routine to process a type"""
# Strip C99-style comments and surrounding whitespace
line = KernRe(r"//.*$", re.S).sub('', line).strip() ifnot line: return# nothing to see here
# To distinguish preprocessor directive from regular declaration later. if line.startswith('#'):
line += ";" # # Split the declaration on any of { } or ;, and accumulate pieces # until we hit a semicolon while not inside {brackets} #
r = KernRe(r'(.*?)([{};])') for chunk in r.split(line): if chunk: # Ignore empty matches
self.entry.prototype += chunk # # This cries out for a match statement ... someday after we can # drop Python 3.9 ... # if chunk == '{':
self.entry.brcount += 1 elif chunk == '}':
self.entry.brcount -= 1 elif chunk == ';'and self.entry.brcount <= 0:
self.dump_declaration(ln, self.entry.prototype)
self.reset_state(ln) return # # We hit the end of the line while still in the declaration; put # in a space to represent the newline. #
self.entry.prototype += ' '
def process_proto(self, ln, line): """STATE_PROTO: reading a function/whatever prototype."""
if doc_inline_oneline.search(line):
self.entry.begin_section(ln, doc_inline_oneline.group(1))
self.entry.add_text(doc_inline_oneline.group(2))
self.dump_section()
def parse_export(self): """
Parses EXPORT_SYMBOL* macros from a single Kernel source file. """
export_table = set()
try: with open(self.fname, "r", encoding="utf8",
errors="backslashreplace") as fp:
for line in fp:
self.process_export(export_table, line)
except IOError: returnNone
return export_table
# # The state/action table telling us which function to invoke in # each state. #
state_actions = {
state.NORMAL: process_normal,
state.NAME: process_name,
state.BODY: process_body,
state.DECLARATION: process_decl,
state.SPECIAL_SECTION: process_special,
state.INLINE_NAME: process_inline_name,
state.INLINE_TEXT: process_inline_text,
state.PROTO: process_proto,
state.DOCBLOCK: process_docblock,
}
def parse_kdoc(self): """
Open and process each line of a C source file.
The parsing is controlled via a state machine, and the line is passed
to a different process function depending on the state. The process
function may update the state as needed.
Besides parsing kernel-doc tags, it also parses export symbols. """
prev = ""
prev_ln = None
export_table = set()
try: with open(self.fname, "r", encoding="utf8",
errors="backslashreplace") as fp: for ln, line in enumerate(fp):
line = line.expandtabs().strip("\n")
# Group continuation lines on prototypes if self.state == state.PROTO: if line.endswith("\\"):
prev += line.rstrip("\\") ifnot prev_ln:
prev_ln = ln continue
if prev:
ln = prev_ln
line = prev + line
prev = ""
prev_ln = None
# This is an optimization over the original script. # There, when export_file was used for the same file, # it was read twice. Here, we use the already-existing # loop to parse exported symbols as well. # if (self.state != state.NORMAL) or \ not self.process_export(export_table, line): # Hand this line to the appropriate state handler
self.state_actions[self.state](self, ln, line)
except OSError:
self.config.log.error(f"Error: Cannot open file {self.fname}")
return export_table, self.entries
Messung V0.5
¤ Dauer der Verarbeitung: 0.52 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.