/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
*/
/* Reserved, old SEND_W_INV = 1 << 16,*/
IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW, /* * Devices should set IB_DEVICE_UD_IP_SUM if they support * insertion of UDP and TCP checksum on outgoing UD IPoIB * messages and can verify the validity of checksum for * incoming messages. Setting this flag implies that the * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
*/
IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
/* * This device supports the IB "base memory management extension", * which includes support for fast registrations (IB_WR_REG_MR, * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should * also be set by any iWarp device which must support FRs to comply * to the iWarp verbs spec. iWarp devices also support the * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the * stag.
*/
IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM, /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
IB_DEVICE_MANAGED_FLOW_STEERING =
IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING, /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS, /* The device supports padding incoming writes to cacheline. */
IB_DEVICE_PCI_WRITE_END_PADDING =
IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING, /* Placement type attributes */
IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL,
IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT,
IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE,
};
enum ib_kernel_cap_flags { /* * This device supports a per-device lkey or stag that can be * used without performing a memory registration for the local * memory. Note that ULPs should never check this flag, but * instead of use the local_dma_lkey flag in the ib_pd structure, * which will always contain a usable lkey.
*/
IBK_LOCAL_DMA_LKEY = 1 << 0, /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
IBK_INTEGRITY_HANDOVER = 1 << 1, /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
IBK_ON_DEMAND_PAGING = 1 << 2, /* IB_MR_TYPE_SG_GAPS is supported */
IBK_SG_GAPS_REG = 1 << 3, /* Driver supports RDMA_NLDEV_CMD_DELLINK */
IBK_ALLOW_USER_UNREG = 1 << 4,
/* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5, /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
IBK_UD_TSO = 1 << 6, /* iopib will use the device ops: * get_vf_config * get_vf_guid * get_vf_stats * set_vf_guid * set_vf_link_state
*/
IBK_VIRTUAL_FUNCTION = 1 << 7, /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
IBK_RDMA_NETDEV_OPA = 1 << 8,
};
struct ib_rss_caps { /* Corresponding bit will be set if qp type from * 'enum ib_qp_type' is supported, e.g. * supported_qpts |= 1 << IB_QPT_UD
*/
u32 supported_qpts;
u32 max_rwq_indirection_tables;
u32 max_rwq_indirection_table_size;
};
enum ib_tm_cap_flags { /* Support tag matching with rendezvous offload for RC transport */
IB_TM_CAP_RNDV_RC = 1 << 0,
};
struct ib_tm_caps { /* Max size of RNDV header */
u32 max_rndv_hdr_size; /* Max number of entries in tag matching list */
u32 max_num_tags; /* From enum ib_tm_cap_flags */
u32 flags; /* Max number of outstanding list operations */
u32 max_ops; /* Max number of SGE in tag matching entry */
u32 max_sge;
};
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
u64 max_mr_size;
u64 page_size_cap;
u32 vendor_id;
u32 vendor_part_id;
u32 hw_ver; int max_qp; int max_qp_wr;
u64 device_cap_flags;
u64 kernel_cap_flags; int max_send_sge; int max_recv_sge; int max_sge_rd; int max_cq; int max_cqe; int max_mr; int max_pd; int max_qp_rd_atom; int max_ee_rd_atom; int max_res_rd_atom; int max_qp_init_rd_atom; int max_ee_init_rd_atom; enum ib_atomic_cap atomic_cap; enum ib_atomic_cap masked_atomic_cap; int max_ee; int max_rdd; int max_mw; int max_raw_ipv6_qp; int max_raw_ethy_qp; int max_mcast_grp; int max_mcast_qp_attach; int max_total_mcast_qp_attach; int max_ah; int max_srq; int max_srq_wr; int max_srq_sge; unsignedint max_fast_reg_page_list_len; unsignedint max_pi_fast_reg_page_list_len;
u16 max_pkeys;
u8 local_ca_ack_delay; int sig_prot_cap; int sig_guard_cap; struct ib_odp_caps odp_caps;
uint64_t timestamp_mask;
uint64_t hca_core_clock; /* in KHZ */ struct ib_rss_caps rss_caps;
u32 max_wq_type_rq;
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ struct ib_tm_caps tm_caps; struct ib_cq_caps cq_caps;
u64 max_dm_size; /* Max entries for sgl for optimized performance per READ */
u32 max_sgl_rd;
};
/** * struct rdma_stat_desc * @name - The name of the counter * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL * @priv - Driver private information; Core code should not use
*/ struct rdma_stat_desc { constchar *name; unsignedint flags; constvoid *priv;
};
/** * struct rdma_hw_stats * @lock - Mutex to protect parallel write access to lifespan and values * of counters, which are 64bits and not guaranteed to be written * atomicaly on 32bits systems. * @timestamp - Used by the core code to track when the last update was * @lifespan - Used by the core code to determine how old the counters * should be before being updated again. Stored in jiffies, defaults * to 10 milliseconds, drivers can override the default be specifying * their own value during their allocation routine. * @descs - Array of pointers to static descriptors used for the counters * in directory. * @is_disabled - A bitmap to indicate each counter is currently disabled * or not. * @num_counters - How many hardware counters there are. If name is * shorter than this number, a kernel oops will result. Driver authors * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) * in their code to prevent this. * @value - Array of u64 counters that are accessed by the sysfs code and * filled in by the drivers get_stats routine
*/ struct rdma_hw_stats { struct mutex lock; /* Protect lifespan and values[] */ unsignedlong timestamp; unsignedlong lifespan; conststruct rdma_stat_desc *descs; unsignedlong *is_disabled; int num_counters;
u64 value[] __counted_by(num_counters);
};
#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
struct rdma_hw_stats *rdma_alloc_hw_stats_struct( conststruct rdma_stat_desc *descs, int num_counters, unsignedlong lifespan);
/* Define bits for the various functionality this port needs to be supported by * the core.
*/ /* Management 0x00000FFF */ #define RDMA_CORE_CAP_IB_MAD 0x00000001 #define RDMA_CORE_CAP_IB_SMI 0x00000002 #define RDMA_CORE_CAP_IB_CM 0x00000004 #define RDMA_CORE_CAP_IW_CM 0x00000008 #define RDMA_CORE_CAP_IB_SA 0x00000010 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
struct ib_grh {
__be32 version_tclass_flow;
__be16 paylen;
u8 next_hdr;
u8 hop_limit; union ib_gid sgid; union ib_gid dgid;
};
union rdma_network_hdr { struct ib_grh ibgrh; struct { /* The IB spec states that if it's IPv4, the header * is located in the last 20 bytes of the header.
*/
u8 reserved[20]; struct iphdr roce4grh;
};
};
/** * ib_rate_to_mult - Convert the IB rate enum to a multiple of the * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. * @rate: rate to convert.
*/
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
/** * ib_rate_to_mbps - Convert the IB rate enum to Mbps. * For example, IB_RATE_2_5_GBPS will be converted to 2500. * @rate: rate to convert.
*/
__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
/** * enum ib_mr_type - memory region type * @IB_MR_TYPE_MEM_REG: memory region that is used for * normal registration * @IB_MR_TYPE_SG_GAPS: memory region that is capable to * register any arbitrary sg lists (without * the normal mr constraints - see * ib_map_mr_sg) * @IB_MR_TYPE_DM: memory region that is used for device * memory registration * @IB_MR_TYPE_USER: memory region that is used for the user-space * application * @IB_MR_TYPE_DMA: memory region that is used for DMA operations * without address translations (VA=PA) * @IB_MR_TYPE_INTEGRITY: memory region that is used for * data integrity operations
*/ enum ib_mr_type {
IB_MR_TYPE_MEM_REG,
IB_MR_TYPE_SG_GAPS,
IB_MR_TYPE_DM,
IB_MR_TYPE_USER,
IB_MR_TYPE_DMA,
IB_MR_TYPE_INTEGRITY,
};
/** * struct ib_mr_status - Memory region status container * * @fail_status: Bitmask of MR checks status. For each * failed check a corresponding status bit is set. * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS * failure.
*/ struct ib_mr_status {
u32 fail_status; struct ib_sig_err sig_err;
};
/** * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate * enum. * @mult: multiple to convert.
*/
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
/* * Maximum number of rdma_rw_ctx structures in flight at a time. * ib_create_qp() will calculate the right amount of needed WRs * and MRs based on this.
*/
u32 max_rdma_ctxs;
};
enum ib_qp_type { /* * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries * here (and in that order) since the MAD layer uses them as * indices into a 2-entry table.
*/
IB_QPT_SMI,
IB_QPT_GSI,
IB_QPT_RC = IB_UVERBS_QPT_RC,
IB_QPT_UC = IB_UVERBS_QPT_UC,
IB_QPT_UD = IB_UVERBS_QPT_UD,
IB_QPT_RAW_IPV6,
IB_QPT_RAW_ETHERTYPE,
IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
IB_QPT_MAX,
IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER, /* Reserve a range for qp types internal to the low level driver. * These qp types will not be visible at the IB core layer, so the * IB_QPT_MAX usages should not be affected in the core layer
*/
IB_QPT_RESERVED1 = 0x1000,
IB_QPT_RESERVED2,
IB_QPT_RESERVED3,
IB_QPT_RESERVED4,
IB_QPT_RESERVED5,
IB_QPT_RESERVED6,
IB_QPT_RESERVED7,
IB_QPT_RESERVED8,
IB_QPT_RESERVED9,
IB_QPT_RESERVED10,
};
/* These are kernel only and can not be issued by userspace */
IB_WR_REG_MR = 0x20,
IB_WR_REG_MR_INTEGRITY,
/* reserve values for low level drivers' internal use. * These values will not be used at all in the ib core layer.
*/
IB_WR_RESERVED1 = 0xf0,
IB_WR_RESERVED2,
IB_WR_RESERVED3,
IB_WR_RESERVED4,
IB_WR_RESERVED5,
IB_WR_RESERVED6,
IB_WR_RESERVED7,
IB_WR_RESERVED8,
IB_WR_RESERVED9,
IB_WR_RESERVED10,
};
struct ib_ud_wr { struct ib_send_wr wr; struct ib_ah *ah; void *header; int hlen; int mss;
u32 remote_qpn;
u32 remote_qkey;
u16 pkey_index; /* valid for GSI only */
u32 port_num; /* valid for DR SMPs on switch only */
};
/* * XXX: these are apparently used for ->rereg_user_mr, no idea why they * are hidden here instead of a uapi header!
*/ enum ib_mr_rereg_flags {
IB_MR_REREG_TRANS = 1,
IB_MR_REREG_PD = (1<<1),
IB_MR_REREG_ACCESS = (1<<2),
IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
};
struct ib_umem;
enum rdma_remove_reason { /* * Userspace requested uobject deletion or initial try * to remove uobject via cleanup. Call could fail
*/
RDMA_REMOVE_DESTROY, /* Context deletion. This call should delete the actual object itself */
RDMA_REMOVE_CLOSE, /* Driver is being hot-unplugged. This call should delete the actual object itself */
RDMA_REMOVE_DRIVER_REMOVE, /* uobj is being cleaned-up before being committed */
RDMA_REMOVE_ABORT, /* The driver failed to destroy the uobject and is being disconnected */
RDMA_REMOVE_DRIVER_FAILURE,
};
/* * Implementation details of the RDMA core, don't use in drivers:
*/ struct rdma_restrack_entry res;
};
enum ib_raw_packet_caps { /* * Strip cvlan from incoming packet and report it in the matching work * completion is supported.
*/
IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING, /* * Scatter FCS field of an incoming packet to host memory is supported.
*/
IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS, /* Checksum offloads are supported (for both send and receive). */
IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM, /* * When a packet is received for an RQ with no receive WQEs, the * packet processing is delayed.
*/
IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
};
struct ib_qp_security { struct ib_qp *qp; struct ib_device *dev; /* Hold this mutex when changing port and pkey settings. */ struct mutex mutex; struct ib_ports_pkeys *ports_pkeys; /* A list of all open shared QP handles. Required to enforce security * properly for all users of a shared QP.
*/ struct list_head shared_qp_list; void *security; bool destroying;
atomic_t error_list_count; struct completion error_complete; int error_comps_pending;
};
/* * @max_write_sge: Maximum SGE elements per RDMA WRITE request. * @max_read_sge: Maximum SGE elements per RDMA READ request.
*/ struct ib_qp { struct ib_device *device; struct ib_pd *pd; struct ib_cq *send_cq; struct ib_cq *recv_cq;
spinlock_t mr_lock; int mrs_used; struct list_head rdma_mrs; struct list_head sig_mrs; struct ib_srq *srq; struct completion srq_completion; struct ib_xrcd *xrcd; /* XRC TGT QPs only */ struct list_head xrcd_list;
/* Supported steering options */ enum ib_flow_attr_type { /* steering according to rule specifications */
IB_FLOW_ATTR_NORMAL = 0x0, /* default unicast and multicast rule - * receive all Eth traffic which isn't steered to any QP
*/
IB_FLOW_ATTR_ALL_DEFAULT = 0x1, /* default multicast rule - * receive all Eth multicast traffic which isn't steered to any QP
*/
IB_FLOW_ATTR_MC_DEFAULT = 0x2, /* sniffer rule - receive all port traffic */
IB_FLOW_ATTR_SNIFFER = 0x3
};
/* IPv4 header flags */ enum ib_ipv4_flags {
IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
last have this flag set */
};
/* ib_flow_spec_tunnel describes the Vxlan tunnel * the tunnel_id from val has the vni value
*/ struct ib_flow_spec_tunnel {
u32 type;
u16 size; struct ib_flow_tunnel_filter val; struct ib_flow_tunnel_filter mask;
};
enum ib_flow_action_attrs_esp_flags { /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags * This is done in order to share the same flags between user-space and * kernel and spare an unnecessary translation.
*/
/* rdma netdev type - specifies protocol type */ enum rdma_netdev_t {
RDMA_NETDEV_OPA_VNIC,
RDMA_NETDEV_IPOIB,
};
/** * struct rdma_netdev - rdma netdev * For cases where netstack interfacing is required.
*/ struct rdma_netdev { void *clnt_priv; struct ib_device *hca;
u32 port_num; int mtu;
/* * cleanup function must be specified. * FIXME: This is only used for OPA_VNIC and that usage should be * removed too.
*/ void (*free_rdma_netdev)(struct net_device *netdev);
/* control functions */ void (*set_id)(struct net_device *netdev, int id); /* send packet */ int (*send)(struct net_device *dev, struct sk_buff *skb, struct ib_ah *address, u32 dqpn); /* multicast */ int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, union ib_gid *gid, u16 mlid, int set_qkey, u32 qkey); int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, union ib_gid *gid, u16 mlid); /* timeout */ void (*tx_timeout)(struct net_device *dev, unsignedint txqueue);
};
/* Return the offset (in bytes) the user should pass to libc's mmap() */ staticinline u64
rdma_user_mmap_get_offset(conststruct rdma_user_mmap_entry *entry)
{ return (u64)entry->start_pgoff << PAGE_SHIFT;
}
/** * struct ib_device_ops - InfiniBand device operations * This structure defines all the InfiniBand device operations, providers will * need to define the supported operations, otherwise they will be set to null.
*/ struct ib_device_ops { struct module *owner; enum rdma_driver_id driver_id;
u32 uverbs_abi_ver; unsignedint uverbs_no_driver_id_binding:1;
/* * NOTE: New drivers should not make use of device_group; instead new * device parameter should be exposed via netlink command. This * mechanism exists only for existing drivers.
*/ conststruct attribute_group *device_group; conststruct attribute_group **port_groups;
int (*post_send)(struct ib_qp *qp, conststruct ib_send_wr *send_wr, conststruct ib_send_wr **bad_send_wr); int (*post_recv)(struct ib_qp *qp, conststruct ib_recv_wr *recv_wr, conststruct ib_recv_wr **bad_recv_wr); void (*drain_rq)(struct ib_qp *qp); void (*drain_sq)(struct ib_qp *qp); int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); int (*peek_cq)(struct ib_cq *cq, int wc_cnt); int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); int (*post_srq_recv)(struct ib_srq *srq, conststruct ib_recv_wr *recv_wr, conststruct ib_recv_wr **bad_recv_wr); int (*process_mad)(struct ib_device *device, int process_mad_flags,
u32 port_num, conststruct ib_wc *in_wc, conststruct ib_grh *in_grh, conststruct ib_mad *in_mad, struct ib_mad *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index); int (*query_device)(struct ib_device *device, struct ib_device_attr *device_attr, struct ib_udata *udata); int (*modify_device)(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify); void (*get_dev_fw_str)(struct ib_device *device, char *str); conststruct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, int comp_vector); int (*query_port)(struct ib_device *device, u32 port_num, struct ib_port_attr *port_attr); int (*modify_port)(struct ib_device *device, u32 port_num, int port_modify_mask, struct ib_port_modify *port_modify); /** * The following mandatory functions are used only at device * registration. Keep functions such as these at the end of this * structure to avoid cache line misses when accessing struct ib_device * in fast paths.
*/ int (*get_port_immutable)(struct ib_device *device, u32 port_num, struct ib_port_immutable *immutable); enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
u32 port_num); /** * When calling get_netdev, the HW vendor's driver should return the * net device of device @device at port @port_num or NULL if such * a net device doesn't exist. The vendor driver should call dev_hold * on this net device. The HW vendor's device driver must guarantee * that this function returns NULL before the net device has finished * NETDEV_UNREGISTER state.
*/ struct net_device *(*get_netdev)(struct ib_device *device,
u32 port_num); /** * rdma netdev operation * * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params * must return -EOPNOTSUPP if it doesn't support the specified type.
*/ struct net_device *(*alloc_rdma_netdev)( struct ib_device *device, u32 port_num, enum rdma_netdev_t type, constchar *name, unsignedchar name_assign_type, void (*setup)(struct net_device *));
int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, struct rdma_netdev_alloc_params *params); /** * query_gid should be return GID value for @device, when @port_num * link layer is either IB or iWarp. It is no-op if @port_num port * is RoCE link layer.
*/ int (*query_gid)(struct ib_device *device, u32 port_num, int index, union ib_gid *gid); /** * When calling add_gid, the HW vendor's driver should add the gid * of device of port at gid index available at @attr. Meta-info of * that gid (for example, the network device related to this gid) is * available at @attr. @context allows the HW vendor driver to store * extra information together with a GID entry. The HW vendor driver may * allocate memory to contain this information and store it in @context * when a new GID entry is written to. Params are consistent until the * next call of add_gid or delete_gid. The function should return 0 on * success or error otherwise. The function could be called * concurrently for different ports. This function is only called when * roce_gid_table is used.
*/ int (*add_gid)(conststruct ib_gid_attr *attr, void **context); /** * When calling del_gid, the HW vendor's driver should delete the * gid of device @device at gid index gid_index of port port_num * available in @attr. * Upon the deletion of a GID entry, the HW vendor must free any * allocated memory. The caller will clear @context afterwards. * This function is only called when roce_gid_table is used.
*/
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.