/* * Copyright (c) 2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* Vport UC/MC hash node */ struct vport_addr { struct l2addr_node node;
u8 action;
u16 vport; struct mlx5_flow_handle *flow_rule; bool mpfs; /* UC MAC was added to MPFs */ /* A flag indicating that mac was added due to mc promiscuous vport */ bool mc_promisc;
};
MLX5_SET(modify_nic_vport_context_in, in,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); if (vport || mlx5_core_is_ecpf(dev))
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
if (set_flags & SET_VLAN_STRIP)
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_strip, 1);
if (set_flags & SET_VLAN_INSERT) { if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) { /* insert either if vlan exist in packet or not */
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_insert,
MLX5_VPORT_CVLAN_INSERT_ALWAYS);
} else { /* insert only if no vlan in packet */
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_insert,
MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
}
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.cvlan_pcp, qos);
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.cvlan_id, vlan);
}
MLX5_SET(modify_esw_vport_context_in, in,
field_select.vport_cvlan_strip, 1);
MLX5_SET(modify_esw_vport_context_in, in,
field_select.vport_cvlan_insert, 1);
/* Skip mlx5_mpfs_add_mac for eswitch_managers, * it is already done by its netdev in mlx5e_execute_l2_action
*/ if (mlx5_esw_is_manager_vport(esw, vport)) goto fdb_add;
err = mlx5_mpfs_add_mac(esw->dev, mac); if (err) {
esw_warn(esw->dev, "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
mac, vport, err); return err;
}
vaddr->mpfs = true;
fdb_add: /* SRIOV is enabled: Forward UC MAC to vport */ if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) {
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
/* Skip mlx5_mpfs_del_mac for eswitch managers, * it is already done by its netdev in mlx5e_execute_l2_action
*/ if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) goto fdb_del;
err = mlx5_mpfs_del_mac(esw->dev, mac); if (err)
esw_warn(esw->dev, "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
mac, vport, err);
vaddr->mpfs = false;
fdb_del: if (vaddr->flow_rule)
mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL;
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
/* Add this multicast mac to all the mc promiscuous vports */
update_allmulti_vports(esw, vaddr, esw_mc);
add: /* If the multicast mac is added as a result of mc promiscuous vport, * don't increment the multicast ref count
*/ if (!vaddr->mc_promisc)
esw_mc->refcnt++;
/* Forward MC MAC to vport */
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
vport, mac, vaddr->flow_rule,
esw_mc->refcnt, esw_mc->uplink_rule); return 0;
}
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); if (!esw_mc) {
esw_warn(esw->dev, "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
mac, vport); return -EINVAL;
}
esw_debug(esw->dev, "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
vport, mac, vaddr->flow_rule, esw_mc->refcnt,
esw_mc->uplink_rule);
if (vaddr->flow_rule)
mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL;
/* If the multicast mac is added as a result of mc promiscuous vport, * don't decrement the multicast ref count.
*/ if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) return 0;
/* Remove this multicast mac from all the mc promiscuous vports */
update_allmulti_vports(esw, vaddr, esw_mc);
if (esw_mc->uplink_rule)
mlx5_del_flow_rules(esw_mc->uplink_rule);
l2addr_hash_del(esw_mc); return 0;
}
/* Apply vport UC/MC list to HW l2 table and FDB table */ staticvoid esw_apply_vport_addr_list(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int list_type)
{ bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
vport_addr_action vport_addr_add;
vport_addr_action vport_addr_del; struct vport_addr *addr; struct l2addr_node *node; struct hlist_head *hash; struct hlist_node *tmp; int hi;
for (i = 0; i < size; i++) { if (is_uc && !is_valid_ether_addr(mac_list[i])) continue;
if (!is_uc && !is_multicast_ether_addr(mac_list[i])) continue;
addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); if (addr) {
addr->action = MLX5_ACTION_NONE; /* If this mac was previously added because of allmulti * promiscuous rx mode, its now converted to be original * vport mac.
*/ if (addr->mc_promisc) { struct esw_mc_addr *esw_mc =
l2addr_hash_find(esw->mc_table,
mac_list[i], struct esw_mc_addr); if (!esw_mc) {
esw_warn(esw->dev, "Failed to MAC(%pM) in mcast DB\n",
mac_list[i]); continue;
}
esw_mc->refcnt++;
addr->mc_promisc = false;
} continue;
}
/* Host PF has its own mac/guid. */ if (vport_num) {
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
vport->info.mac);
mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
vport->info.node_guid);
}
ret = esw_vport_setup(esw, vport); if (ret) goto done;
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true; if (vport->vport != MLX5_VPORT_PF &&
(vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
esw->enabled_ipsec_vf_count++;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well * in smartNIC as it's a vport group manager.
*/ if (mlx5_esw_is_manager_vport(esw, vport_num) ||
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); if (ret) goto err_vhca_mapping;
}
/* External controller host PF has factory programmed MAC. * Read it from the device.
*/ if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
/** * mlx5_esw_query_functions - Returns raw output about functions state * @dev: Pointer to device to query * * mlx5_esw_query_functions() allocates and returns functions changed * raw output memory pointer from device on success. Otherwise returns ERR_PTR. * Caller must free the memory using kvfree() when valid pointer is returned.
*/ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{ int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
u32 *out; int err;
out = kvzalloc(outlen, GFP_KERNEL); if (!out) return ERR_PTR(-ENOMEM);
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); if (!err) return out;
staticint host_pf_enable_hca(struct mlx5_core_dev *dev)
{ if (!mlx5_core_is_ecpf(dev)) return 0;
/* Once vport and representor are ready, take out the external host PF * out of initializing state. Enabling HCA clears the iser->initializing * bit and host PF driver loading can progress.
*/ return mlx5_cmd_host_pf_enable_hca(dev);
}
staticvoid host_pf_disable_hca(struct mlx5_core_dev *dev)
{ if (!mlx5_core_is_ecpf(dev)) return;
mlx5_cmd_host_pf_disable_hca(dev);
}
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs * whichever are present on the eswitch.
*/ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, enum mlx5_eswitch_vport_event enabled_events)
{ bool pf_needed; int ret;
/* Enable PF vport */ if (pf_needed) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
enabled_events); if (ret) return ret;
}
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev); if (ret) goto pf_hca_err;
/* Enable ECPF vport */ if (mlx5_ecpf_vport_exists(esw->dev)) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events); if (ret) goto ecpf_err;
}
/* Enable ECVF vports */ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
ret = mlx5_eswitch_load_ec_vf_vports(esw,
esw->esw_funcs.num_ec_vfs,
enabled_events); if (ret) goto ec_vf_err;
}
/* Enable VF vports */
ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
enabled_events); if (ret) goto vf_err; return 0;
vf_err: if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
ec_vf_err: if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err: if (pf_needed)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); return ret;
}
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs * whichever are previously enabled on the eswitch.
*/ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_eswitch_unload_ec_vf_vports(esw,
esw->esw_funcs.num_ec_vfs);
if (mlx5_ecpf_vport_exists(esw->dev)) {
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
host_pf_disable_hca(esw->dev);
if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
staticvoid mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
{ struct devlink *devlink = priv_to_devlink(esw->dev); union devlink_param_value val; int err;
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
err = mlx5_fs_egress_acls_init(dev, total_vports); if (err) return err;
} else {
esw_warn(dev, "egress ACL is not supported by FW\n");
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
err = mlx5_fs_ingress_acls_init(dev, total_vports); if (err) goto err;
} else {
esw_warn(dev, "ingress ACL is not supported by FW\n");
}
esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED; return 0;
err: if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
mlx5_fs_egress_acls_cleanup(dev); return err;
}
esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED; if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
mlx5_fs_ingress_acls_cleanup(dev); if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
mlx5_fs_egress_acls_cleanup(dev);
}
/** * mlx5_eswitch_enable_locked - Enable eswitch * @esw: Pointer to eswitch * @num_vfs: Enable eswitch for given number of VFs. This is optional. * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. * Caller should pass num_vfs > 0 when enabling eswitch for * vf vports. Caller should pass num_vfs = 0, when eswitch * is enabled without sriov VFs or when caller * is unaware of the sriov state of the host PF on ECPF based * eswitch. Caller should pass < 0 when num_vfs should be * completely ignored. This is typically the case when eswitch * is enabled without sriov regardless of PF/ECPF system. * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. * It returns 0 on success or error code on failure.
*/ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{ int err;
devl_assert_locked(priv_to_devlink(esw->dev));
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); return -EOPNOTSUPP;
}
mlx5_eswitch_get_devlink_param(esw);
err = mlx5_esw_acls_ns_init(esw); if (err) return err;
/** * mlx5_eswitch_enable - Enable eswitch * @esw: Pointer to eswitch * @num_vfs: Enable eswitch switch for given number of VFs. * Caller must pass num_vfs > 0 when enabling eswitch for * vf vports. * mlx5_eswitch_enable() returns 0 on success or error code on failure.
*/ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{ bool toggle_lag; int ret = 0;
if (!mlx5_esw_allowed(esw)) return 0;
devl_assert_locked(priv_to_devlink(esw->dev));
toggle_lag = !mlx5_esw_is_fdb_created(esw);
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
if (!mlx5_esw_is_fdb_created(esw)) {
ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else { enum mlx5_eswitch_vport_event vport_events;
vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE; /* If this is the ECPF the number of host VFs is managed via the * eswitch function change event handler, and any num_vfs provided * here are intended to be EC VFs.
*/ if (!mlx5_core_is_ecpf(esw->dev)) {
ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events); if (!ret)
esw->esw_funcs.num_vfs = num_vfs;
} elseif (mlx5_core_ec_sriov_enabled(esw->dev)) {
ret = mlx5_eswitch_load_ec_vf_vports(esw, num_vfs, vport_events); if (!ret)
esw->esw_funcs.num_ec_vfs = num_vfs;
}
}
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
return ret;
}
/* When disabling sriov, free driver level resources. */ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
{ if (!mlx5_esw_allowed(esw)) return;
devl_assert_locked(priv_to_devlink(esw->dev)); /* If driver is unloaded, this function is called twice by remove_one() * and mlx5_unload(). Prevent the second call.
*/ if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf) return;
if (!mlx5_core_is_ecpf(esw->dev)) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
} elseif (mlx5_core_ec_sriov_enabled(esw->dev)) {
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs); if (clear_vf)
mlx5_eswitch_clear_ec_vf_vports_info(esw);
}
if (esw->mode == MLX5_ESWITCH_OFFLOADS) { struct devlink *devlink = priv_to_devlink(esw->dev);
devl_rate_nodes_destroy(devlink);
} /* Destroy legacy fdb when disabling sriov in legacy mode. */ if (esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_eswitch_disable_locked(esw);
if (!mlx5_core_is_ecpf(esw->dev))
esw->esw_funcs.num_vfs = 0; else
esw->esw_funcs.num_ec_vfs = 0;
}
/* Free resources for corresponding eswitch mode. It is called by devlink * when changing eswitch mode or modprobe when unloading driver.
*/ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
{ struct devlink *devlink = priv_to_devlink(esw->dev);
/* Notify eswitch users that it is exiting from current mode. * So that it can do necessary cleanup before the eswitch is disabled.
*/
mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
MLX5_SET(query_hca_cap_in, in, other_function, true); return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
{ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); void *query_ctx; void *hca_caps; int err;
if (!mlx5_core_is_ecpf(dev)) {
*max_sfs = 0; return 0;
}
query_ctx = kzalloc(query_out_sz, GFP_KERNEL); if (!query_ctx) return -ENOMEM;
err = mlx5_query_hca_cap_host_pf(dev, query_ctx); if (err) goto out_free;
esw_info(dev, "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
esw->total_vports,
MLX5_MAX_UC_PER_VPORT(dev),
MLX5_MAX_MC_PER_VPORT(dev)); return 0;
if (evport->info.spoofchk && !is_valid_ether_addr(mac))
mlx5_core_warn(esw->dev, "Set invalid MAC while spoofchk is on, vport(%d)\n",
vport_num);
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac); if (err) {
mlx5_core_warn(esw->dev, "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
vport_num, err); return err;
}
node_guid_gen_from_mac(&node_guid, mac);
err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid); if (err)
mlx5_core_warn(esw->dev, "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
vport_num, err);
out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM;
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out); if (err) goto free_out;
/** * mlx5_esw_hold() - Try to take a read lock on esw mode lock. * @mdev: mlx5 core device. * * Should be called by esw resources callers. * * Return: true on success or false.
*/ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
{ struct mlx5_eswitch *esw = mdev->priv.eswitch;
/* e.g. VF doesn't have eswitch so nothing to do */ if (!mlx5_esw_allowed(esw)) returntrue;
if (down_read_trylock(&esw->mode_lock) != 0) { if (esw->eswitch_operation_in_progress) {
up_read(&esw->mode_lock); returnfalse;
} returntrue;
}
if (mlx5_esw_allowed(esw))
atomic64_dec_if_positive(&esw->user_count);
}
/** * mlx5_esw_try_lock() - Take a write lock on esw mode lock. * @esw: eswitch device. * * Should be called by esw mode change routine. * * Return: * * 0 - esw mode if successfully locked and refcount is 0. * * -EBUSY - refcount is not 0. * * -EINVAL - In the middle of switching mode or lock is already held.
*/ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
{ if (down_write_trylock(&esw->mode_lock) == 0) return -EINVAL;
/** * mlx5_eswitch_get_total_vports - Get total vports of the eswitch * * @dev: Pointer to core device * * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
*/
u16 mlx5_eswitch_get_total_vports(conststruct mlx5_core_dev *dev)
{ struct mlx5_eswitch *esw;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.