/* * We may see a timer interrupt after vcpu_put() has been called which * sets the CPU's vcpu pointer to NULL, because even though the timer * has been disabled in timer_save_state(), the hardware interrupt * signal may not have been retired from the interrupt controller yet.
*/ if (!vcpu) return IRQ_HANDLED;
/* * Returns the earliest expiration time in ns among guest timers. * Note that it will return 0 if none of timers can fire.
*/ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
{
u64 min_delta = ULLONG_MAX; int i;
for (i = 0; i < nr_timers(vcpu); i++) { struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
/* * Check that the timer has really expired from the guest's * PoV (NTP on the host may have forced it to expire * early). If we should have slept longer, restart it.
*/
ns = kvm_timer_earliest_exp(vcpu); if (unlikely(ns)) {
hrtimer_forward_now(hrt, ns_to_ktime(ns)); return HRTIMER_RESTART;
}
/* * Check that the timer has really expired from the guest's * PoV (NTP on the host may have forced it to expire * early). If not ready, schedule for a later time.
*/
ns = kvm_timer_compute_delta(ctx); if (unlikely(ns)) {
hrtimer_forward_now(hrt, ns_to_ktime(ns)); return HRTIMER_RESTART;
}
/* Populate the device bitmap with the timer states */
regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
KVM_ARM_DEV_EL1_PTIMER); if (kvm_timer_should_fire(vtimer))
regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER; if (kvm_timer_should_fire(ptimer))
regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
}
staticvoid kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
{ /* * Paper over NV2 brokenness by publishing the interrupt status * bit. This still results in a poor quality of emulation (guest * writes will have no effect until the next exit). * * But hey, it's fast, right?
*/ if (is_hyp_ctxt(ctx->vcpu) &&
(ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) { unsignedlong val = timer_get_ctl(ctx);
__assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level);
timer_set_ctl(ctx, val);
}
}
/* Only called for a fully emulated timer */ staticvoid timer_emulate(struct arch_timer_context *ctx)
{ bool should_fire = kvm_timer_should_fire(ctx);
trace_kvm_timer_emulate(ctx, should_fire);
if (should_fire != ctx->irq.level)
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
kvm_timer_update_status(ctx, should_fire);
/* * If the timer can fire now, we don't need to have a soft timer * scheduled for the future. If the timer cannot fire at all, * then we also don't need a soft timer.
*/ if (should_fire || !kvm_timer_irq_can_fire(ctx)) return;
case TIMER_VTIMER: case TIMER_HVTIMER:
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
cval = read_sysreg_el0(SYS_CNTV_CVAL);
if (has_broken_cntvoff())
cval -= timer_get_offset(ctx);
timer_set_cval(ctx, cval);
/* Disable the timer */
write_sysreg_el0(0, SYS_CNTV_CTL);
isb();
/* * The kernel may decide to run userspace after * calling vcpu_put, so we reset cntvoff to 0 to * ensure a consistent read between user accesses to * the virtual counter and kernel access to the * physical counter of non-VHE case. * * For VHE, the virtual counter uses a fixed virtual * offset of zero, so no need to zero CNTVOFF_EL2 * register, but this is actually useful when switching * between EL1/vEL2 with NV. * * Do it unconditionally, as this is either unavoidable * or dirt cheap.
*/
set_cntvoff(0); break; case TIMER_PTIMER: case TIMER_HPTIMER:
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
cval = read_sysreg_el0(SYS_CNTP_CVAL);
cval -= timer_get_offset(ctx);
timer_set_cval(ctx, cval);
/* Disable the timer */
write_sysreg_el0(0, SYS_CNTP_CTL);
isb();
set_cntpoff(0); break; case NR_KVM_TIMERS:
BUG();
}
/* * Schedule the background timer before calling kvm_vcpu_halt, so that this * thread is removed from its waitqueue and made runnable when there's a timer * interrupt to handle.
*/ staticvoid kvm_timer_blocking(struct kvm_vcpu *vcpu)
{ struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map;
get_timer_map(vcpu, &map);
/* * If no timers are capable of raising interrupts (disabled or * masked), then there's no more work for us to do.
*/ if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
!kvm_timer_irq_can_fire(map.emul_vtimer) &&
!kvm_timer_irq_can_fire(map.emul_ptimer) &&
!vcpu_has_wfit_active(vcpu)) return;
/* * At least one guest time will expire. Schedule a background timer. * Set the earliest expiration time among the guest timers.
*/
soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
}
/* * Update the timer output so that it is likely to match the * state we're about to restore. If the timer expires between * this point and the register restoration, we'll take the * interrupt anyway.
*/
kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
if (irqchip_in_kernel(vcpu->kvm))
phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
/* * Update the timer output so that it is likely to match the * state we're about to restore. If the timer expires between * this point and the register restoration, we'll take the * interrupt anyway.
*/
kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
/* * When using a userspace irqchip with the architected timers and a * host interrupt controller that doesn't support an active state, we * must still prevent continuously exiting from the guest, and * therefore mask the physical interrupt by disabling it on the host * interrupt controller when the virtual level is high, such that the * guest can make forward progress. Once we detect the output level * being de-asserted, we unmask the interrupt again so that we exit * from the guest when the timer fires.
*/ if (vtimer->irq.level)
disable_percpu_irq(host_vtimer_irq); else
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
}
/* If _pred is true, set bit in _set, otherwise set it in _clr */ #define assign_clear_set_bit(_pred, _bit, _clr, _set) \ do { \ if (_pred) \
(_set) |= (_bit); \ else \
(_clr) |= (_bit); \
} while (0)
/* * We only ever unmap the vtimer irq on a VHE system that runs nested * virtualization, in which case we have both a valid emul_vtimer, * emul_ptimer, direct_vtimer, and direct_ptimer. * * Since this is called from kvm_timer_vcpu_load(), a change between * vEL2 and vEL1/0 will have just happened, and the timer_map will * represent this, and therefore we switch the emul/direct mappings * below.
*/
hw = kvm_vgic_get_map(vcpu, timer_irq(map->direct_vtimer)); if (hw < 0) {
kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_vtimer));
kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_ptimer));
ret = kvm_vgic_map_phys_irq(vcpu,
map->direct_vtimer->host_timer_irq,
timer_irq(map->direct_vtimer),
&arch_timer_irq_ops);
WARN_ON_ONCE(ret);
ret = kvm_vgic_map_phys_irq(vcpu,
map->direct_ptimer->host_timer_irq,
timer_irq(map->direct_ptimer),
&arch_timer_irq_ops);
WARN_ON_ONCE(ret);
}
}
/* * No trapping gets configured here with nVHE. See * __timer_enable_traps(), which is where the stuff happens.
*/ if (!has_vhe()) return;
/* * Our default policy is not to trap anything. As we progress * within this function, reality kicks in and we start adding * traps based on emulation requirements.
*/
tvt = tpt = tvc = tpc = false;
tvt02 = tpt02 = false;
/* * NV2 badly breaks the timer semantics by redirecting accesses to * the EL1 timer state to memory, so let's call ECV to the rescue if * available: we trap all CNT{P,V}_{CTL,CVAL,TVAL}_EL0 accesses. * * The treatment slightly varies depending whether we run a nVHE or * VHE guest: nVHE will use the _EL0 registers directly, while VHE * will use the _EL02 accessors. This translates in different trap * bits. * * None of the trapping is required when running in non-HYP context, * unless required by the L1 hypervisor settings once we advertise * ECV+NV in the guest, or that we need trapping for other reasons.
*/ if (cpus_have_final_cap(ARM64_HAS_ECV) && is_hyp_ctxt(vcpu)) { if (vcpu_el2_e2h_is_set(vcpu))
tvt02 = tpt02 = true; else
tvt = tpt = true;
}
/* * We have two possibility to deal with a physical offset: * * - Either we have CNTPOFF (yay!) or the offset is 0: * we let the guest freely access the HW * * - or neither of these condition apply: * we trap accesses to the HW, but still use it * after correcting the physical offset
*/ if (!has_cntpoff() && timer_get_offset(map->direct_ptimer))
tpt = tpc = true;
/* * For the poor sods that could not correctly substract one value * from another, trap the full virtual timer and counter.
*/ if (has_broken_cntvoff() && timer_get_offset(map->direct_vtimer))
tvt = tvc = true;
/* * Apply the enable bits that the guest hypervisor has requested for * its own guest. We can only add traps that wouldn't have been set * above. * Implementation choices: we do not support NV when E2H=0 in the * guest, and we don't support configuration where E2H is writable * by the guest (either FEAT_VHE or FEAT_E2H0 is implemented, but * not both). This simplifies the handling of the EL1NV* bits.
*/ if (is_nested_ctxt(vcpu)) {
u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
/* Use the VHE format for mental sanity */ if (!vcpu_el2_e2h_is_set(vcpu))
val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10;
if (static_branch_likely(&has_gic_active_state)) { if (vcpu_has_nv(vcpu))
kvm_timer_vcpu_load_nested_switch(vcpu, &map);
kvm_timer_vcpu_load_gic(map.direct_vtimer); if (map.direct_ptimer)
kvm_timer_vcpu_load_gic(map.direct_ptimer);
} else {
kvm_timer_vcpu_load_nogic(vcpu);
}
kvm_timer_unblocking(vcpu);
timer_restore_state(map.direct_vtimer); if (map.direct_ptimer)
timer_restore_state(map.direct_ptimer); if (map.emul_vtimer)
timer_emulate(map.emul_vtimer); if (map.emul_ptimer)
timer_emulate(map.emul_ptimer);
timer_save_state(map.direct_vtimer); if (map.direct_ptimer)
timer_save_state(map.direct_ptimer);
/* * Cancel soft timer emulation, because the only case where we * need it after a vcpu_put is in the context of a sleeping VCPU, and * in that case we already factor in the deadline for the physical * timer when scheduling the bg_timer. * * In any case, we re-schedule the hrtimer for the physical timer when * coming back to the VCPU thread in kvm_timer_vcpu_load().
*/ if (map.emul_vtimer)
soft_timer_cancel(&map.emul_vtimer->hrtimer); if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer);
if (kvm_vcpu_is_blocking(vcpu))
kvm_timer_blocking(vcpu);
}
void kvm_timer_sync_nested(struct kvm_vcpu *vcpu)
{ /* * When NV2 is on, guest hypervisors have their EL1 timer register * accesses redirected to the VNCR page. Any guest action taken on * the timer is postponed until the next exit, leading to a very * poor quality of emulation. * * This is an unmitigated disaster, only papered over by FEAT_ECV, * which allows trapping of the timer registers even with NV2. * Still, this is still worse than FEAT_NV on its own. Meh.
*/ if (!cpus_have_final_cap(ARM64_HAS_ECV)) { /* * For a VHE guest hypervisor, the EL2 state is directly * stored in the host EL1 timers, while the emulated EL1 * state is stored in the VNCR page. The latter could have * been updated behind our back, and we must reset the * emulation of the timers. * * A non-VHE guest hypervisor doesn't have any direct access * to its timers: the EL2 registers trap despite being * notionally direct (we use the EL1 HW, as for VHE), while * the EL1 registers access memory. * * In both cases, process the emulated timers on each guest * exit. Boo.
*/ struct timer_map map;
get_timer_map(vcpu, &map);
/* * With a userspace irqchip we have to check if the guest de-asserted the * timer and if so, unmask the timer irq signal on the host interrupt * controller to ensure that we see future timer signals.
*/ staticvoid unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
{ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
if (!kvm_timer_should_fire(vtimer)) {
kvm_timer_update_irq(vcpu, false, vtimer); if (static_branch_likely(&has_gic_active_state))
set_timer_irq_phys_active(vtimer, false); else
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
}
}
/* * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 * and to 0 for ARMv7. We provide an implementation that always * resets the timer to be disabled and unmasked and is compliant with * the ARMv7 architecture.
*/ for (int i = 0; i < nr_timers(vcpu); i++)
timer_set_ctl(vcpu_get_timer(vcpu, i), 0);
/* * A vcpu running at EL2 is in charge of the offset applied to * the virtual timer, so use the physical VM offset, and point * the vcpu offset to CNTVOFF_EL2.
*/ if (vcpu_has_nv(vcpu)) { struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
for (int i = 0; i < NR_KVM_TIMERS; i++)
timer_context_init(vcpu, i);
/* Synchronize offsets across timers of a VM if not already provided */ if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read());
timer_set_offset(vcpu_ptimer(vcpu), 0);
}
switch (regid) { case KVM_REG_ARM_TIMER_CTL:
timer = vcpu_vtimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); break; case KVM_REG_ARM_TIMER_CNT: if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
&vcpu->kvm->arch.flags)) {
timer = vcpu_vtimer(vcpu);
timer_set_offset(timer, kvm_phys_timer_read() - value);
} break; case KVM_REG_ARM_TIMER_CVAL:
timer = vcpu_vtimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); break; case KVM_REG_ARM_PTIMER_CTL:
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); break; case KVM_REG_ARM_PTIMER_CNT: if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
&vcpu->kvm->arch.flags)) {
timer = vcpu_ptimer(vcpu);
timer_set_offset(timer, kvm_phys_timer_read() - value);
} break; case KVM_REG_ARM_PTIMER_CVAL:
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); break;
default: return -1;
}
return 0;
}
static u64 read_timer_ctl(struct arch_timer_context *timer)
{ /* * Set ISTATUS bit if it's expired. * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit * regardless of ENABLE bit for our implementation convenience.
*/
u32 ctl = timer_get_ctl(timer);
if (!kvm_timer_compute_delta(timer))
ctl |= ARCH_TIMER_CTRL_IT_STAT;
return ctl;
}
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
{ switch (regid) { case KVM_REG_ARM_TIMER_CTL: return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CTL); case KVM_REG_ARM_TIMER_CNT: return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CNT); case KVM_REG_ARM_TIMER_CVAL: return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CVAL); case KVM_REG_ARM_PTIMER_CTL: return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CTL); case KVM_REG_ARM_PTIMER_CNT: return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CNT); case KVM_REG_ARM_PTIMER_CVAL: return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CVAL);
} return (u64)-1;
}
if (kvm_vgic_global_state.no_hw_deactivation) { struct fwnode_handle *fwnode; struct irq_data *data;
fwnode = irq_domain_alloc_named_fwnode("kvm-timer"); if (!fwnode) return -ENOMEM;
/* Assume both vtimer and ptimer in the same parent */
data = irq_get_irq_data(host_vtimer_irq);
domain = irq_domain_create_hierarchy(data->domain, 0,
NR_KVM_TIMERS, fwnode,
&timer_domain_ops, NULL); if (!domain) {
irq_domain_free_fwnode(fwnode); return -ENOMEM;
}
/* * CNTVOFF_EL2 is broken on some implementations. For those, we trap * all virtual timer/counter accesses, requiring FEAT_ECV. * * However, a hypervisor supporting nesting is likely to mitigate the * erratum at L0, and not require other levels to mitigate it (which * would otherwise be a terrible performance sink due to trap * amplification). * * Given that the affected HW implements both FEAT_VHE and FEAT_E2H0, * and that NV is likely not to (because of limitations of the * architecture), only enable the workaround when FEAT_VHE and * FEAT_E2H0 are both detected. Time will tell if this actually holds.
*/
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
mmfr4 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR4_EL1); if (SYS_FIELD_GET(ID_AA64MMFR1_EL1, VH, mmfr1) &&
!SYS_FIELD_GET(ID_AA64MMFR4_EL1, E2H0, mmfr4) &&
SYS_FIELD_GET(ID_AA64MMFR0_EL1, ECV, mmfr0) &&
(has_vhe() || has_hvhe()) &&
cpus_have_final_cap(ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF)) {
static_branch_enable(&broken_cntvoff_key);
kvm_info("Broken CNTVOFF_EL2, trapping virtual timer\n");
}
}
int __init kvm_timer_hyp_init(bool has_gic)
{ struct arch_timer_kvm_info *info; int err;
info = arch_timer_get_kvm_info();
timecounter = &info->timecounter;
if (!timecounter->cc) {
kvm_err("kvm_arch_timer: uninitialized timecounter\n"); return -ENODEV;
}
/* A timer IRQ has fired, but no matching timer was found? */
WARN_RATELIMIT(1, "timer INTID%d unknown\n", vintid);
returnfalse;
}
int kvm_timer_enable(struct kvm_vcpu *vcpu)
{ struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map; int ret;
if (timer->enabled) return 0;
/* Without a VGIC we do not map virtual IRQs to physical IRQs */ if (!irqchip_in_kernel(vcpu->kvm)) goto no_vgic;
/* * At this stage, we have the guarantee that the vgic is both * available and initialized.
*/ if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n"); return -EINVAL;
}
get_timer_map(vcpu, &map);
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_vtimer->host_timer_irq,
timer_irq(map.direct_vtimer),
&arch_timer_irq_ops); if (ret) return ret;
if (map.direct_ptimer) {
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_ptimer->host_timer_irq,
timer_irq(map.direct_ptimer),
&arch_timer_irq_ops);
}
if (ret) return ret;
no_vgic:
timer->enabled = 1; return 0;
}
/* If we have CNTPOFF, permanently set ECV to enable it */ void kvm_timer_init_vhe(void)
{ if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV);
}
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ int __user *uaddr = (int __user *)(long)attr->addr; int irq, idx, ret = 0;
if (!irqchip_in_kernel(vcpu->kvm)) return -EINVAL;
if (get_user(irq, uaddr)) return -EFAULT;
if (!(irq_is_ppi(irq))) return -EINVAL;
mutex_lock(&vcpu->kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE,
&vcpu->kvm->arch.flags)) {
ret = -EBUSY; goto out;
}
switch (attr->attr) { case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
idx = TIMER_VTIMER; break; case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
idx = TIMER_PTIMER; break; case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
idx = TIMER_HVTIMER; break; case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
idx = TIMER_HPTIMER; break; default:
ret = -ENXIO; goto out;
}
/* * We cannot validate the IRQ unicity before we run, so take it at * face value. The verdict will be given on first vcpu run, for each * vcpu. Yes this is late. Blame it on the stupid API.
*/
vcpu->kvm->arch.timer_data.ppi[idx] = irq;
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ switch (attr->attr) { case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER: case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER: return 0;
}
return -ENXIO;
}
int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, struct kvm_arm_counter_offset *offset)
{ int ret = 0;
if (offset->reserved) return -EINVAL;
mutex_lock(&kvm->lock);
if (!kvm_trylock_all_vcpus(kvm)) {
set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
/* * If userspace decides to set the offset using this * API rather than merely restoring the counter * values, the offset applies to both the virtual and * physical views.
*/
kvm->arch.timer_data.voffset = offset->counter_offset;
kvm->arch.timer_data.poffset = offset->counter_offset;
kvm_unlock_all_vcpus(kvm);
} else {
ret = -EBUSY;
}
mutex_unlock(&kvm->lock);
return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.20 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.