// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright 2020-2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE.
*/
/* Long enough to ensure no retry fault comes after svm range is restored and * page table is updated.
*/ #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC) #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) #define dynamic_svm_range_dump(svms) \
_dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms) #else #define dynamic_svm_range_dump(svms) \ do { if (0) svm_range_debug_dump(svms); } while (0) #endif
/* Giant svm range split into smaller ranges based on this, it is decided using * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to * power of 2MB.
*/ static uint64_t max_svm_range_pages;
/** * svm_range_unlink - unlink svm_range from lists and interval tree * @prange: svm range structure to be removed * * Remove the svm_range from the svms and svm_bo lists and the svms * interval tree. * * Context: The caller must hold svms->lock
*/ staticvoid svm_range_unlink(struct svm_range *prange)
{
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange, prange->start, prange->last);
if (prange->svm_bo) {
spin_lock(&prange->svm_bo->list_lock);
list_del(&prange->svm_bo_list);
spin_unlock(&prange->svm_bo->list_lock);
}
mmu_interval_notifier_insert_locked(&prange->notifier, mm,
prange->start << PAGE_SHIFT,
prange->npages << PAGE_SHIFT,
&svm_range_mn_ops);
}
/** * svm_range_add_to_svms - add svm range to svms * @prange: svm range structure to be added * * Add the svm range to svms interval tree and link list * * Context: The caller must hold svms->lock
*/ staticvoid svm_range_add_to_svms(struct svm_range *prange)
{
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange, prange->start, prange->last);
spin_lock(&svm_bo->list_lock); while (!list_empty(&svm_bo->range_list)) { struct svm_range *prange =
list_first_entry(&svm_bo->range_list, struct svm_range, svm_bo_list); /* list_del_init tells a concurrent svm_range_vram_node_new when * it's safe to reuse the svm_bo pointer and svm_bo_list head.
*/
list_del_init(&prange->svm_bo_list);
spin_unlock(&svm_bo->list_lock);
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange->start, prange->last);
mutex_lock(&prange->lock);
prange->svm_bo = NULL; /* prange should not hold vram page now */
WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
mutex_unlock(&prange->lock);
mm = svm_bo->eviction_fence->mm; /* * The forked child process takes svm_bo device pages ref, svm_bo could be * released after parent process is gone.
*/
p = kfd_lookup_process_by_mm(mm); if (p) {
pdd = kfd_get_process_device_data(svm_bo->node, p); if (pdd)
atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
kfd_unref_process(p);
}
mmput(mm);
}
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) /* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
dma_fence_put(&svm_bo->eviction_fence->base);
amdgpu_bo_unref(&svm_bo->bo);
kfree(svm_bo);
}
staticvoid svm_range_bo_unref(struct svm_range_bo *svm_bo)
{ if (svm_bo)
kref_put(&svm_bo->kref, svm_range_bo_release);
}
staticbool
svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
{
mutex_lock(&prange->lock); if (!prange->svm_bo) {
mutex_unlock(&prange->lock); returnfalse;
} if (prange->ttm_res) { /* We still have a reference, all is well */
mutex_unlock(&prange->lock); returntrue;
} if (svm_bo_ref_unless_zero(prange->svm_bo)) { /* * Migrate from GPU to GPU, remove range from source svm_bo->node * range list, and return false to allocate svm_bo from destination * node.
*/ if (prange->svm_bo->node != node) {
mutex_unlock(&prange->lock);
svm_range_bo_unref(prange->svm_bo); returnfalse;
} if (READ_ONCE(prange->svm_bo->evicting)) { struct dma_fence *f; struct svm_range_bo *svm_bo; /* The BO is getting evicted, * we need to get a new one
*/
mutex_unlock(&prange->lock);
svm_bo = prange->svm_bo;
f = dma_fence_get(&svm_bo->eviction_fence->base);
svm_range_bo_unref(prange->svm_bo); /* wait for the fence to avoid long spin-loop * at list_empty_careful
*/
dma_fence_wait(f, false);
dma_fence_put(f);
} else { /* The BO was still around and we got * a new reference to it
*/
mutex_unlock(&prange->lock);
pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
/* We need a new svm_bo. Spin-loop to wait for concurrent * svm_range_bo_release to finish removing this range from * its range list and set prange->svm_bo to null. After this, * it is safe to reuse the svm_bo pointer and svm_bo_list head.
*/ while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
cond_resched();
p = container_of(prange->svms, struct kfd_process, svms);
pdd = kfd_process_device_data_by_id(p, gpu_id); if (!pdd) {
pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id); return NULL;
}
for (i = 0; i < nattr; i++) {
uint32_t val = attrs[i].value; int gpuidx = MAX_GPU_INSTANCE;
switch (attrs[i].type) { case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
gpuidx = kfd_process_gpuidx_from_gpuid(p, val); break; case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
gpuidx = kfd_process_gpuidx_from_gpuid(p, val); break; case KFD_IOCTL_SVM_ATTR_ACCESS: case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
gpuidx = kfd_process_gpuidx_from_gpuid(p, val); break; case KFD_IOCTL_SVM_ATTR_SET_FLAGS: break; case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: break; case KFD_IOCTL_SVM_ATTR_GRANULARITY: break; default:
pr_debug("unknown attr type 0x%x\n", attrs[i].type); return -EINVAL;
}
for (i = 0; i < nattr; i++) { switch (attrs[i].type) { case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
prange->preferred_loc = attrs[i].value; break; case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
prange->prefetch_loc = attrs[i].value; break; case KFD_IOCTL_SVM_ATTR_ACCESS: case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: case KFD_IOCTL_SVM_ATTR_NO_ACCESS: if (!p->xnack_enabled)
*update_mapping = true;
for (i = 0; i < nattr; i++) { switch (attrs[i].type) { case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: if (prange->preferred_loc != attrs[i].value) returnfalse; break; case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: /* Prefetch should always trigger a migration even * if the value of the attribute didn't change.
*/ returnfalse; case KFD_IOCTL_SVM_ATTR_ACCESS: case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value); if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { if (test_bit(gpuidx, prange->bitmap_access) ||
test_bit(gpuidx, prange->bitmap_aip)) returnfalse;
} elseif (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { if (!test_bit(gpuidx, prange->bitmap_access)) returnfalse;
} else { if (!test_bit(gpuidx, prange->bitmap_aip)) returnfalse;
} break; case KFD_IOCTL_SVM_ATTR_SET_FLAGS: if ((prange->flags & attrs[i].value) != attrs[i].value) returnfalse; break; case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: if ((prange->flags & attrs[i].value) != 0) returnfalse; break; case KFD_IOCTL_SVM_ATTR_GRANULARITY: if (prange->granularity != attrs[i].value) returnfalse; break; default:
WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
}
}
returntrue;
}
/** * svm_range_debug_dump - print all range information from svms * @svms: svm range list header * * debug output svm range start, end, prefetch location from svms * interval tree and link list * * Context: The caller must hold svms->lock
*/ staticvoid svm_range_debug_dump(struct svm_range_list *svms)
{ struct interval_tree_node *node; struct svm_range *prange;
*vram_pages = 0; for (i = 0; i < num_elements; i++) {
dma_addr_t *temp;
temp = (dma_addr_t *)dst + i;
*temp = *((dma_addr_t *)src + i); if (*temp&SVM_RANGE_VRAM_DOMAIN)
(*vram_pages)++;
}
return (void *)dst;
}
staticint
svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
{ int i;
for (i = 0; i < MAX_GPU_INSTANCE; i++) { if (!src->dma_addr[i]) continue;
dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i], sizeof(*src->dma_addr[i]), src->npages, 0, NULL); if (!dst->dma_addr[i]) return -ENOMEM;
}
/** * svm_range_split_adjust - split range and adjust * * @new: new range * @old: the old range * @start: the old range adjust to start address in pages * @last: the old range adjust to last address in pages * * Copy system memory dma_addr or vram ttm_res in old range to new * range from new_start up to size new->npages, the remaining old range is from * start to last * * Return: * 0 - OK, -ENOMEM - out of memory
*/ staticint
svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
uint64_t start, uint64_t last)
{ int r;
pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
new->svms, new->start, old->start, old->last, start, last);
if (new->start < old->start ||
new->last > old->last) {
WARN_ONCE(1, "invalid new range start or last\n"); return -EINVAL;
}
r = svm_range_split_pages(new, old, start, last); if (r) return r;
if (old->actual_loc && old->ttm_res) {
r = svm_range_split_nodes(new, old, start, last); if (r) return r;
}
/** * svm_range_split - split a range in 2 ranges * * @prange: the svm range to split * @start: the remaining range start address in pages * @last: the remaining range last address in pages * @new: the result new range generated * * Two cases only: * case 1: if start == prange->start * prange ==> prange[start, last] * new range [last + 1, prange->last] * * case 2: if last == prange->last * prange ==> prange[start, last] * new range [prange->start, start - 1] * * Return: * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
*/ staticint
svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, struct svm_range **new)
{
uint64_t old_start = prange->start;
uint64_t old_last = prange->last; struct svm_range_list *svms; int r = 0;
for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
/* Collect all pages in the same address range and memory domain * that can be mapped with a single call to update mapping.
*/ if (i < offset + npages - 1 &&
last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) continue;
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain); if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
/* For dGPU mode, we use same vm_manager to allocate VRAM for * different memory partition based on fpfn/lpfn, we should use * same vm_manager.vram_base_offset regardless memory partition.
*/
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
NULL, last_start, prange->start + i,
pte_flags,
(last_start - prange->start) << PAGE_SHIFT,
bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
NULL, dma_addr, &vm->last_update);
pdd = kfd_process_device_from_gpuidx(p, gpuidx); if (!pdd) return NULL;
return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
}
/* * Validation+GPU mapping with concurrent invalidation (MMU notifiers) * * To prevent concurrent destruction or change of range attributes, the * svm_read_lock must be held. The caller must not hold the svm_write_lock * because that would block concurrent evictions and lead to deadlocks. To * serialize concurrent migrations or validations of the same range, the * prange->migrate_mutex must be held. * * For VRAM ranges, the SVM BO must be allocated and valid (protected by its * eviction fence. * * The following sequence ensures race-free validation and GPU mapping: * * 1. Reserve page table (and SVM BO if range is in VRAM) * 2. hmm_range_fault to get page addresses (if system memory) * 3. DMA-map pages (if system memory) * 4-a. Take notifier lock * 4-b. Check that pages still valid (mmu_interval_read_retry) * 4-c. Check that the range was not split or otherwise invalidated * 4-d. Update GPU page table * 4.e. Release notifier lock * 5. Release page table (and SVM BO) reservation
*/ staticint svm_range_validate_and_map(struct mm_struct *mm, unsignedlong map_start, unsignedlong map_last, struct svm_range *prange, int32_t gpuidx, bool intr, bool wait, bool flush_tlb)
{ struct svm_validate_context *ctx; unsignedlong start, end, addr; struct kfd_process *p; void *owner;
int32_t idx; int r = 0;
/* If prefetch range to GPU, or GPU retry fault migrate range to * GPU, which has ACCESS attribute to the range, create mapping * on that GPU.
*/ if (prange->actual_loc) {
gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
prange->actual_loc); if (gpuidx < 0) {
WARN_ONCE(1, "failed get device by id 0x%x\n",
prange->actual_loc);
r = -EINVAL; goto free_ctx;
} if (test_bit(gpuidx, prange->bitmap_access))
bitmap_set(ctx->bitmap, gpuidx, 1);
}
/* * If prange is already mapped or with always mapped flag, * update mapping on GPUs with ACCESS attribute
*/ if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { if (prange->mapped_to_gpu ||
prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
}
} else {
bitmap_or(ctx->bitmap, prange->bitmap_access,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
r = 0; goto free_ctx;
}
if (prange->actual_loc && !prange->ttm_res) { /* This should never happen. actual_loc gets set by * svm_migrate_ram_to_vram after allocating a BO.
*/
WARN_ONCE(1, "VRAM BO missing during validation\n");
r = -EINVAL; goto free_ctx;
}
r = svm_range_reserve_bos(ctx, intr); if (r) goto free_ctx;
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT; /* HMM requires at least READ permissions. If provided with PROT_NONE, * unmap the memory. If it's not already mapped, this is a no-op * If PROT_WRITE is provided without READ, warn first then unmap
*/ if (!(vma->vm_flags & VM_READ)) { unsignedlong e, s;
svm_range_lock(prange); if (vma->vm_flags & VM_WRITE)
pr_debug("VM_WRITE without VM_READ is not supported");
s = max(start, prange->start);
e = min(end, prange->last); if (e >= s)
r = svm_range_unmap_from_gpus(prange, s, e,
KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
svm_range_unlock(prange); /* If unmap returns non-zero, we'll bail on the next for loop * iteration, so just leave r and continue
*/
addr = next; continue;
}
WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
readonly, owner, NULL,
&hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL); if (r)
pr_debug("failed %d to get svm range pages\n", r);
} else {
r = -EFAULT;
}
if (!r) {
offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
hmm_range->hmm_pfns); if (r)
pr_debug("failed %d to dma map range\n", r);
}
svm_range_lock(prange);
/* Free backing memory of hmm_range if it was initialized * Overrride return value to TRY AGAIN only if prior returns * were successful
*/ if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
if (!r && !list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
r = -EAGAIN;
}
p = container_of(svms, struct kfd_process, svms);
process_info = p->kgd_process_info;
/* Keep mm reference when svm_range_validate_and_map ranges */
mm = get_task_mm(p->lead_thread); if (!mm) {
pr_debug("svms 0x%p process mm gone\n", svms); return;
}
/* * If range is migrating, wait for migration is done.
*/
mutex_lock(&prange->migrate_mutex);
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
MAX_GPU_INSTANCE, false, true, false); if (r)
pr_debug("failed %d to map 0x%lx to gpus\n", r,
prange->start);
mutex_unlock(&prange->migrate_mutex); if (r) goto out_reschedule;
if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid) goto out_reschedule;
}
if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
evicted_ranges) goto out_reschedule;
evicted_ranges = 0;
r = kgd2kfd_resume_mm(mm); if (r) { /* No recovery from this failure. Probably the CP is * hanging. No point trying again.
*/
pr_debug("failed %d to resume KFD\n", r);
}
/* If validation failed, reschedule another attempt */ if (evicted_ranges) {
pr_debug("reschedule to restore svm range\n");
queue_delayed_work(system_freezable_wq, &svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
/** * svm_range_evict - evict svm range * @prange: svm range structure * @mm: current process mm_struct * @start: starting process queue number * @last: last process queue number * @event: mmu notifier event when range is evicted or migrated * * Stop all queues of the process to ensure GPU doesn't access the memory, then * return to let CPU evict the buffer and proceed CPU pagetable update. * * Don't need use lock to sync cpu pagetable invalidation with GPU execution. * If invalidation happens while restore work is running, restore work will * restart to ensure to get the latest CPU pages mapping to GPU, then start * the queues.
*/ staticint
svm_range_evict(struct svm_range *prange, struct mm_struct *mm, unsignedlong start, unsignedlong last, enum mmu_notifier_event event)
{ struct svm_range_list *svms = prange->svms; struct svm_range *pchild; struct kfd_process *p; int r = 0;
/** * svm_range_add - add svm range and handle overlap * @p: the range add to this process svms * @start: page size aligned * @size: page size aligned * @nattr: number of attributes * @attrs: array of attributes * @update_list: output, the ranges need validate and update GPU mapping * @insert_list: output, the ranges need insert to svms * @remove_list: output, the ranges are replaced and need remove from svms * @remap_list: output, remap unaligned svm ranges * * Check if the virtual address range has overlap with any existing ranges, * split partly overlapping ranges and add new ranges in the gaps. All changes * should be applied to the range_list and interval tree transactionally. If * any range split or allocation fails, the entire update fails. Therefore any * existing overlapping svm_ranges are cloned and the original svm_ranges left * unchanged. * * If the transaction succeeds, the caller can update and insert clones and * new ranges, then free the originals. * * Otherwise the caller can free the clones and new ranges, while the old * svm_ranges remain unchanged. * * Context: Process context, caller must hold svms->lock * * Return: * 0 - OK, otherwise error code
*/ staticint
svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, struct list_head *update_list, struct list_head *insert_list, struct list_head *remove_list, struct list_head *remap_list)
{ unsignedlong last = start + size - 1UL; struct svm_range_list *svms = &p->svms; struct interval_tree_node *node; struct svm_range *prange; struct svm_range *tmp; struct list_head new_list; int r = 0;
if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
prange->mapped_to_gpu) { /* nothing to do */
} elseif (node->start < start || node->last > last) { /* node intersects the update range and its attributes * will change. Clone and split it, apply updates only * to the overlapping part
*/ struct svm_range *old = prange;
prange = svm_range_clone(old); if (!prange) {
r = -ENOMEM; goto out;
}
if (node->start < start) {
pr_debug("change old range start\n");
r = svm_range_split_head(prange, start,
insert_list, remap_list); if (r) goto out;
} if (node->last > last) {
pr_debug("change old range last\n");
r = svm_range_split_tail(prange, last,
insert_list, remap_list); if (r) goto out;
}
} else { /* The node is contained within start..last, * just update it
*/
list_add(&prange->update_list, update_list);
}
/* insert a new node if needed */ if (node->start > start) {
r = svm_range_split_new(svms, start, node->start - 1,
READ_ONCE(max_svm_range_pages),
&new_list, update_list); if (r) goto out;
}
node = next;
start = next_start;
}
/* add a final range at the end if needed */ if (start <= last)
r = svm_range_split_new(svms, start, last,
READ_ONCE(max_svm_range_pages),
&new_list, update_list);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.