/* * pNFS functions to call and manage layout drivers. * * Copyright (c) 2002 [year of first publication] * The Regents of the University of Michigan * All Rights Reserved * * Dean Hildebrand <dhildebz@umich.edu> * * Permission is granted to use, copy, create derivative works, and * redistribute this software and such derivative works for any purpose, * so long as the name of the University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. If * the above copyright notice or any other identification of the * University of Michigan is included in any copy of any portion of * this software, then the disclaimer below must also be included. * * This software is provided as is, without representation or warranty * of any kind either express or implied, including without limitation * the implied warranties of merchantability, fitness for a particular * purpose, or noninfringement. The Regents of the University of * Michigan shall not be liable for any damages, including special, * indirect, incidental, or consequential damages, with respect to any * claim arising out of or in connection with the use of the software, * even if it has been or is hereafter advised of the possibility of * such damages.
*/
spin_lock(&pnfs_spinlock);
local = find_pnfs_driver_locked(id); if (local != NULL && !try_module_get(local->owner)) {
dprintk("%s: Could not grab reference on module\n", __func__);
local = NULL;
}
spin_unlock(&pnfs_spinlock); return local;
}
void pnfs_put_layoutdriver(conststruct pnfs_layoutdriver_type *ld)
{ if (ld)
module_put(ld->owner);
}
void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{ if (nfss->pnfs_curr_ld) { if (nfss->pnfs_curr_ld->clear_layoutdriver)
nfss->pnfs_curr_ld->clear_layoutdriver(nfss); /* Decrement the MDS count. Purge the deviceid cache if zero */ if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
nfs4_deviceid_purge_client(nfss->nfs_client);
module_put(nfss->pnfs_curr_ld->owner);
}
nfss->pnfs_curr_ld = NULL;
}
/* * When the server sends a list of layout types, we choose one in the order * given in the list below. * * FIXME: should this list be configurable in some fashion? module param? * mount option? something else?
*/ staticconst u32 ld_prefs[] = {
LAYOUT_SCSI,
LAYOUT_BLOCK_VOLUME,
LAYOUT_OSD2_OBJECTS,
LAYOUT_FLEX_FILES,
LAYOUT_NFSV4_1_FILES,
0
};
for (i = 0; ld_prefs[i] != 0; i++) { if (ld1 == ld_prefs[i]) return -1;
if (ld2 == ld_prefs[i]) return 1;
} return 0;
}
/* * Try to set the server's pnfs module to the pnfs layout type specified by id. * Currently only one pNFS layout driver per filesystem is supported. * * @ids array of layout types supported by MDS.
*/ void
set_pnfs_layoutdriver(struct nfs_server *server, conststruct nfs_fh *mntfh, struct nfs_fsinfo *fsinfo)
{ struct pnfs_layoutdriver_type *ld_type = NULL;
u32 id; int i;
for (i = 0; i < fsinfo->nlayouttypes; i++) {
id = fsinfo->layouttype[i];
ld_type = find_pnfs_driver(id); if (!ld_type) {
request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
id);
ld_type = find_pnfs_driver(id);
} if (ld_type) break;
}
if (!ld_type) {
dprintk("%s: No pNFS module found!\n", __func__); goto out_no_driver;
}
/* Need to hold i_lock if caller does not already hold reference */ void
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
{
refcount_inc(&lo->plh_refcount);
}
/* * Update the seqid of a layout stateid after receiving * NFS4ERR_OLD_STATEID
*/ bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst, struct pnfs_layout_range *dst_range, struct inode *inode)
{ struct pnfs_layout_hdr *lo; struct pnfs_layout_range range = {
.iomode = IOMODE_ANY,
.offset = 0,
.length = NFS4_MAX_UINT64,
}; bool ret = false;
LIST_HEAD(head); int err;
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout; if (lo && pnfs_layout_is_valid(lo) &&
nfs4_stateid_match_other(dst, &lo->plh_stateid)) { /* Is our call using the most recent seqid? If so, bump it */ if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
nfs4_stateid_seqid_inc(dst);
ret = true; goto out;
} /* Try to update the seqid to the most recent */
err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0); if (err != -EBUSY) {
dst->seqid = lo->plh_stateid.seqid;
*dst_range = range;
ret = true;
}
}
out:
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head); return ret;
}
/* * Mark a pnfs_layout_hdr and all associated layout segments as invalid * * In order to continue using the pnfs_layout_hdr, a full recovery * is required. * Note that caller must hold inode->i_lock.
*/ int
pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo, struct list_head *lseg_list)
{ struct pnfs_layout_range range = {
.iomode = IOMODE_ANY,
.offset = 0,
.length = NFS4_MAX_UINT64,
}; struct pnfs_layout_segment *lseg, *next;
if (test_bit(fail_bit, &lo->plh_flags) == 0) returnfalse;
end = jiffies;
start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT; if (!time_in_range(lo->plh_retry_timestamp, start, end)) { /* It is time to retry the failed layoutgets */
pnfs_layout_clear_fail_bit(lo, fail_bit); returnfalse;
} returntrue;
}
/* Returns 1 if lseg is removed from list, 0 otherwise */ staticint mark_lseg_invalid(struct pnfs_layout_segment *lseg, struct list_head *tmp_list)
{ int rv = 0;
if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { /* Remove the reference keeping the lseg in the * list. It will now be removed when all * outstanding io is finished.
*/
dprintk("%s: lseg %p ref %d\n", __func__, lseg,
refcount_read(&lseg->pls_refcount)); if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
rv = 1;
} return rv;
}
/** * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later * @lo: layout header containing the lsegs * @tmp_list: list head where doomed lsegs should go * @recall_range: optional recall range argument to match (may be NULL) * @seq: only invalidate lsegs obtained prior to this sequence (may be 0) * * Walk the list of lsegs in the layout header, and tear down any that should * be destroyed. If "recall_range" is specified then the segment must match * that range. If "seq" is non-zero, then only match segments that were handed * out at or before that sequence. * * Returns number of matching invalid lsegs remaining in list after scanning * it and purging them.
*/ int
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, struct list_head *tmp_list, conststruct pnfs_layout_range *recall_range,
u32 seq)
{ struct pnfs_layout_segment *lseg, *next; struct nfs_server *server = NFS_SERVER(lo->plh_inode); int remaining = 0;
/* note free_me must contain lsegs from a single layout_hdr */ void
pnfs_free_lseg_list(struct list_head *free_me)
{ struct pnfs_layout_segment *lseg, *tmp;
/* * Called by the state manager to remove all layouts established under an * expired lease.
*/ void
pnfs_destroy_all_layouts(struct nfs_client *clp)
{
nfs4_deviceid_mark_client_invalid(clp);
nfs4_deviceid_purge_client(clp);
if (pnfs_seqid_is_newer(newseq, oldseq))
nfs4_stateid_copy(&lo->plh_stateid, new);
if (update_barrier) {
pnfs_barrier_update(lo, newseq); return;
} /* * Because of wraparound, we want to keep the barrier * "close" to the current seqids. We really only want to * get here from a layoutget call.
*/ if (atomic_read(&lo->plh_outstanding) == 1)
pnfs_barrier_update(lo, be32_to_cpu(lo->plh_stateid.seqid));
}
/* lget is set to 1 if called from inside send_layoutget call chain */ staticbool
pnfs_layoutgets_blocked(conststruct pnfs_layout_hdr *lo)
{ return lo->plh_block_lgets ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
}
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) return;
spin_lock(&inode->i_lock); if (pnfs_layout_need_return(lo)) { conststruct cred *cred;
nfs4_stateid stateid; enum pnfs_iomode iomode; bool send;
send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
spin_unlock(&inode->i_lock); if (send) { /* Send an async layoutreturn so we dont deadlock */
pnfs_send_layoutreturn(lo, &stateid, &cred, iomode,
PNFS_FL_LAYOUTRETURN_ASYNC);
}
} else
spin_unlock(&inode->i_lock);
}
/* * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr * when the layout segment list is empty. * * Note that a pnfs_layout_hdr can exist with an empty layout segment * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the * deviceid is marked invalid.
*/ int
_pnfs_return_layout(struct inode *ino)
{ struct pnfs_layout_hdr *lo = NULL; struct nfs_inode *nfsi = NFS_I(ino); struct pnfs_layout_range range = {
.iomode = IOMODE_ANY,
.offset = 0,
.length = NFS4_MAX_UINT64,
};
LIST_HEAD(tmp_list); conststruct cred *cred;
nfs4_stateid stateid; int status = 0; bool send, valid_layout;
dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
spin_lock(&ino->i_lock);
lo = nfsi->layout; if (!lo) {
spin_unlock(&ino->i_lock);
dprintk("NFS: %s no layout to return\n", __func__); goto out;
} /* Reference matched in nfs4_layoutreturn_release */
pnfs_get_layout_hdr(lo); /* Is there an outstanding layoutreturn ? */ if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
spin_unlock(&ino->i_lock); if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
TASK_UNINTERRUPTIBLE)) goto out_put_layout_hdr;
spin_lock(&ino->i_lock);
}
valid_layout = pnfs_layout_is_valid(lo);
pnfs_clear_layoutcommit(ino, &tmp_list);
pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
/* Don't send a LAYOUTRETURN if list was initially empty */ if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
!valid_layout) {
spin_unlock(&ino->i_lock);
dprintk("NFS: %s no layout segments to return\n", __func__); goto out_wait_layoutreturn;
}
int
pnfs_commit_and_return_layout(struct inode *inode)
{ struct pnfs_layout_hdr *lo; int ret;
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout; if (lo == NULL) {
spin_unlock(&inode->i_lock); return 0;
}
pnfs_get_layout_hdr(lo); /* Block new layoutgets and read/write to ds */
lo->plh_block_lgets++;
spin_unlock(&inode->i_lock);
filemap_fdatawait(inode->i_mapping);
ret = pnfs_layoutcommit_inode(inode, true); if (ret == 0)
ret = _pnfs_return_layout(inode);
spin_lock(&inode->i_lock);
lo->plh_block_lgets--;
spin_unlock(&inode->i_lock);
pnfs_put_layout_hdr(lo); return ret;
}
if (!nfs_have_layout(ino)) returnfalse;
retry:
rcu_read_lock();
spin_lock(&ino->i_lock);
lo = nfsi->layout; if (!lo || !pnfs_layout_is_valid(lo) ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
lo = NULL; goto out_noroc;
}
pnfs_get_layout_hdr(lo); if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
spin_unlock(&ino->i_lock);
rcu_read_unlock();
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
TASK_UNINTERRUPTIBLE);
pnfs_put_layout_hdr(lo); goto retry;
}
/* no roc if we hold a delegation */ if (nfs4_check_delegation(ino, FMODE_READ)) { if (nfs4_check_delegation(ino, FMODE_WRITE)) goto out_noroc;
skip_read = true;
}
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
state = ctx->state; if (state == NULL) continue; /* Don't return layout if there is open file state */ if (state->state & FMODE_WRITE) goto out_noroc; if (state->state & FMODE_READ)
skip_read = true;
}
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) { if (skip_read && lseg->pls_range.iomode == IOMODE_READ) continue; /* If we are sending layoutreturn, invalidate all valid lsegs */ if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags)) continue; /* * Note: mark lseg for return so pnfs_layout_remove_lseg * doesn't invalidate the layout for us.
*/
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); if (!mark_lseg_invalid(lseg, &lo->plh_return_segs)) continue;
pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
}
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) goto out_noroc;
/* ROC in two conditions: * 1. there are ROC lsegs * 2. we don't send layoutreturn
*/ /* lo ref dropped in pnfs_roc_release() */
layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode); /* If the creds don't match, we can't compound the layoutreturn */ if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0) goto out_noroc;
out_noroc:
spin_unlock(&ino->i_lock);
rcu_read_unlock();
pnfs_layoutcommit_inode(ino, true); if (roc) { struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; if (ld->prepare_layoutreturn)
ld->prepare_layoutreturn(args);
pnfs_put_layout_hdr(lo); returntrue;
} if (layoutreturn)
pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, 0);
pnfs_put_layout_hdr(lo); returnfalse;
}
int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp, struct nfs4_layoutreturn_res **respp, int *ret)
{ struct nfs4_layoutreturn_args *arg = *argpp; int retval = -EAGAIN;
if (!arg) return 0; /* Handle Layoutreturn errors */ switch (*ret) { case 0:
retval = 0; break; case -NFS4ERR_NOMATCHING_LAYOUT: /* Was there an RPC level error? If not, retry */ if (task->tk_rpc_status == 0) break; /* * Is there a fatal network level error? * If so release the layout, but flag the error.
*/ if ((task->tk_rpc_status == -ENETDOWN ||
task->tk_rpc_status == -ENETUNREACH) &&
task->tk_flags & RPC_TASK_NETUNREACH_FATAL) {
*ret = 0;
(*respp)->lrs_present = 0;
retval = -EIO; break;
} /* If the call was not sent, let caller handle it */ if (!RPC_WAS_SENT(task)) return 0; /* * Otherwise, assume the call succeeded and * that we need to release the layout
*/
*ret = 0;
(*respp)->lrs_present = 0;
retval = 0; break; case -NFS4ERR_DELAY: /* Let the caller handle the retry */
*ret = -NFS4ERR_NOMATCHING_LAYOUT; return 0; case -NFS4ERR_OLD_STATEID: if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
&arg->range, arg->inode)) break;
*ret = -NFS4ERR_NOMATCHING_LAYOUT; return -EAGAIN;
}
*argpp = NULL;
*respp = NULL; return retval;
}
/* we might not have grabbed lo reference. so need to check under
* i_lock */
spin_lock(&ino->i_lock);
lo = nfsi->layout; if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
sleep = true;
}
spin_unlock(&ino->i_lock); return sleep;
}
/* * Compare two layout segments for sorting into layout cache. * We want to preferentially return RW over RO layouts, so ensure those * are seen first.
*/ static s64
pnfs_lseg_range_cmp(conststruct pnfs_layout_range *l1, conststruct pnfs_layout_range *l2)
{
s64 d;
/* high offset > low offset */
d = l1->offset - l2->offset; if (d) return d;
/* short length > long length */
d = l2->length - l1->length; if (d) return d;
/* * Use mdsthreshold hints set at each OPEN to determine if I/O should go * to the MDS or over pNFS * * The nfs_inode read_io and write_io fields are cumulative counters reset * when there are no layout segments. Note that in pnfs_update_layout iomode * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a * WRITE request. * * A return of true means use MDS I/O. * * From rfc 5661: * If a file's size is smaller than the file size threshold, data accesses * SHOULD be sent to the metadata server. If an I/O request has a length that * is below the I/O size threshold, the I/O SHOULD be sent to the metadata * server. If both file size and I/O size are provided, the client SHOULD * reach or exceed both thresholds before sending its read or write * requests to the data server.
*/ staticbool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, struct inode *ino, int iomode)
{ struct nfs4_threshold *t = ctx->mdsthreshold; struct nfs_inode *nfsi = NFS_I(ino);
loff_t fsize = i_size_read(ino); bool size = false, size_set = false, io = false, io_set = false, ret = false;
staticint pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
{ /* * send layoutcommit as it can hold up layoutreturn due to lseg * reference
*/
pnfs_layoutcommit_inode(lo->plh_inode, false); return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
nfs_wait_bit_killable,
TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
}
/* The lo must be on the clp list if there is any * chance of a CB_LAYOUTRECALL(FILE) coming in.
*/
spin_lock(&clp->cl_lock);
list_add_tail_rcu(&lo->plh_layouts, &server->layouts);
spin_unlock(&clp->cl_lock);
}
}
/* * Layout segment is retreived from the server if not cached. * The appropriate layout segment is referenced and returned to the caller.
*/ struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
loff_t pos,
u64 count, enum pnfs_iomode iomode, bool strict_iomode,
gfp_t gfp_flags)
{ struct pnfs_layout_range arg = {
.iomode = iomode,
.offset = pos,
.length = count,
}; unsigned pg_offset; struct nfs_server *server = NFS_SERVER(ino); struct nfs_client *clp = server->nfs_client; struct pnfs_layout_hdr *lo = NULL; struct pnfs_layout_segment *lseg = NULL; struct nfs4_layoutget *lgp;
nfs4_stateid stateid; struct nfs4_exception exception = {
.inode = ino,
}; unsignedlong giveup = jiffies + (clp->cl_lease_time << 1); bool first;
lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp)); if (IS_ERR(lseg)) goto out;
first = false;
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); if (lo == NULL) {
spin_unlock(&ino->i_lock);
lseg = ERR_PTR(-ENOMEM);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_NOMEM); goto out;
}
/* Do we even need to bother with this? */ if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_BULK_RECALL);
dprintk("%s matches recall, use MDS\n", __func__); goto out_unlock;
}
/* if LAYOUTGET already failed once we don't try again */ if (pnfs_layout_io_test_failed(lo, iomode)) {
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_IO_TEST_FAIL); goto out_unlock;
}
/* * If the layout segment list is empty, but there are outstanding * layoutget calls, then they might be subject to a layoutrecall.
*/ if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
atomic_read(&lo->plh_outstanding) != 0) {
spin_unlock(&ino->i_lock);
lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN,
TASK_KILLABLE)); if (IS_ERR(lseg)) goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo); goto lookup_again;
}
/* * Because we free lsegs when sending LAYOUTRETURN, we need to wait * for LAYOUTRETURN.
*/ if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
spin_unlock(&ino->i_lock);
dprintk("%s wait for layoutreturn\n", __func__);
lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo)); if (!IS_ERR(lseg)) {
pnfs_put_layout_hdr(lo);
dprintk("%s retrying\n", __func__);
trace_pnfs_update_layout(ino, pos, count, iomode, lo,
lseg,
PNFS_UPDATE_LAYOUT_RETRY); goto lookup_again;
}
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_RETURN); goto out_put_layout_hdr;
}
/* * Choose a stateid for the LAYOUTGET. If we don't have a layout * stateid, or it has been invalidated, then we must use the open * stateid.
*/ if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) { int status;
/* * The first layoutget for the file. Need to serialize per * RFC 5661 Errata 3208.
*/ if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
&lo->plh_flags)) {
spin_unlock(&ino->i_lock);
lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
NFS_LAYOUT_FIRST_LAYOUTGET,
TASK_KILLABLE)); if (IS_ERR(lseg)) goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
dprintk("%s retrying\n", __func__); goto lookup_again;
}
/* Heuristic: don't send layoutget if we have cached data */ if (rng.iomode == IOMODE_READ &&
(i_size_read(ino) == 0 || ino->i_mapping->nrpages != 0)) return;
lo = _pnfs_grab_empty_layout(ino, ctx); if (!lo) return;
lgp = pnfs_alloc_init_layoutget_args(ino, ctx, ¤t_stateid, &rng,
nfs_io_gfp_mask()); if (!lgp) {
pnfs_clear_first_layoutget(lo);
nfs_layoutget_end(lo);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Diese beiden folgenden Angebotsgruppen bietet das Unternehmen0.47Angebot
Wie Sie bei der Firma Beratungs- und Dienstleistungen beauftragen können
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.