/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
!v->using_unified_queue) { struct dpg_pause_state new_state;
/* We can safely return early here because we've cancelled the * the delayed work so there is no one else to set it to false * and we don't care if someone else sets it to true.
*/
mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
!v->using_unified_queue) { struct dpg_pause_state new_state;
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
!adev->vcn.inst[ring->me].using_unified_queue)
atomic_dec(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
/** * vcn_v2_5_early_init - set function pointers and load microcode * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem
*/ staticint vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0; for (i = 0; i < adev->vcn.num_vcn_inst; i++)
adev->vcn.inst[i].num_enc_rings = 1;
} else {
u32 harvest; int i;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
adev->vcn.inst[i].num_enc_rings = 2;
} if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
AMDGPU_VCN_HARVEST_VCN1)) /* both instances are harvested, disable the block */ return -ENOENT;
}
if (adev->vcn.harvest_config & (1 << j)) continue; /* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq); if (r) return r;
/* VCN ENC TRAP */ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq); if (r) return r;
}
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq); if (r) return r;
r = amdgpu_vcn_sw_init(adev, j); if (r) return r;
/* Override the work func */
adev->vcn.inst[j].idle_work.work.func = vcn_v2_5_idle_work_handler;
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); if (!amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev); if (r) return r;
}
r = amdgpu_vcn_ras_sw_init(adev); if (r) return r;
/* Allocate memory for VCN IP Dump buffer */
ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); if (!ptr) {
DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
adev->vcn.ip_dump = NULL;
} else {
adev->vcn.ip_dump = ptr;
}
r = amdgpu_vcn_sysfs_reset_mask_init(adev); if (r) return r;
return 0;
}
/** * vcn_v2_5_sw_fini - sw fini for VCN block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation
*/ staticint vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{ int i, r, idx; struct amdgpu_device *adev = ip_block->adev; volatilestruct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
}
drm_dev_exit(idx);
}
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
amdgpu_vcn_sysfs_reset_mask_fini(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i); if (r) return r;
r = amdgpu_vcn_sw_fini(adev, i); if (r) return r;
}
kfree(adev->vcn.ip_dump);
return 0;
}
/** * vcn_v2_5_hw_init - start and test VCN block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing
*/ staticint vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r = 0;
if (amdgpu_sriov_vf(adev))
r = vcn_v2_5_sriov_start(adev);
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue;
r = amdgpu_ring_test_helper(ring); if (r) return r;
for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring); if (r) return r;
}
}
}
return r;
}
/** * vcn_v2_5_hw_fini - stop the hardware block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more
*/ staticint vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
if (adev->vcn.harvest_config & (1 << i)) continue;
cancel_delayed_work_sync(&vinst->idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(vinst->cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
}
return 0;
}
/** * vcn_v2_5_suspend - suspend VCN block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block
*/ staticint vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int r, i;
r = vcn_v2_5_hw_fini(ip_block); if (r) return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(ip_block->adev, i); if (r) return r;
}
return 0;
}
/** * vcn_v2_5_resume - resume VCN block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block
*/ staticint vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int r, i;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_resume(ip_block->adev, i); if (r) return r;
}
if (adev->vcn.harvest_config & (1 << i)) return; /* UVD disable CGC */
data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
/* set the write pointer delay */
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
/* program the RB_BASE for ring buffer */
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
/* Keeping one read-back to ensure all register writes are done, * otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
return 0;
}
staticint vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{ struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; volatilestruct amdgpu_fw_shared *fw_shared =
adev->vcn.inst[i].fw_shared.cpu_addr; struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp; int j, k, r;
if (adev->vcn.harvest_config & (1 << i)) return 0;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, true, i);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) return vcn_v2_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
/* VCN global tiling registers */
WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable LMI MC and UMC channels */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
for (k = 0; k < 10; ++k) {
uint32_t status;
for (j = 0; j < 100; ++j) {
status = RREG32_SOC15(VCN, i, mmUVD_STATUS); if (status & 2) break; if (amdgpu_emu_mode == 1)
msleep(500); else
mdelay(10);
}
r = 0; if (status & 2) break;
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
r = -1;
}
if (r) {
DRM_ERROR("VCN decode not responding, giving up!!!\n"); return r;
}
/* enable master interrupt */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* clear the busy bit of VCN_STATUS */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
ring = &adev->vcn.inst[i].ring_dec; /* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; /* program the RB_BASE for ring buffer */
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[0];
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[1];
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Keeping one read-back to ensure all register writes are done, * otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, mmUVD_STATUS);
/* * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of * memory descriptor location
*/
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
/* 2, update vmid of descriptor */
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; /* use domain0 for MM scheduler */
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
/* 3, notify mmsch about the size of this descriptor */
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */
WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
/* * 5, kick off the initialization and wait until * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
*/
WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop = 10; while ((data & 0x10000002) != 0x10000002) {
udelay(100);
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop--; if (!loop) break;
}
if (!loop) {
dev_err(adev->dev, "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
data); return -EBUSY;
}
/* Keeping one read-back to ensure all register writes are done, * otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
return 0;
}
staticint vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
{ struct amdgpu_device *adev = vinst->adev; int i = vinst->inst;
uint32_t tmp; int r;
if (adev->vcn.harvest_config & (1 << i)) return 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
r = vcn_v2_5_stop_dpg_mode(vinst); goto done;
}
/* wait for vcn idle */
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); if (r) goto done;
tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
UVD_LMI_STATUS__READ_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); if (r) goto done;
/* block LMI UMC channel */
tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); if (r) goto done;
/* Keeping one read-back to ensure all register writes are done, * otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, mmUVD_STATUS);
done: if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
staticvoid vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{ int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue;
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
adev->vcn.inst[i].ring_dec.me = i;
}
}
staticvoid vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
{ int i, j;
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
}
}
staticint vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst)
{ int r;
r = vcn_v2_5_stop(vinst); if (r) return r; return vcn_v2_5_start(vinst);
}
staticbool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue;
ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
return ret;
}
staticint vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue;
ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
UVD_STATUS__IDLE); if (ret) return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.