struct nvme_fc_fcp_op { struct nvme_request nreq; /* * nvme/host/core.c * requires this to be * the 1st element in the * private structure * associated with the * request.
*/ struct nvmefc_fcp_req fcp_req;
/* * These items are short-term. They will eventually be moved into * a generic FC class. See comments in module init.
*/ staticstruct device *fc_udev_device;
/** * nvme_fc_register_localport - transport entry point called by an * LLDD to register the existence of a NVME * host FC port. * @pinfo: pointer to information about the port to be registered * @template: LLDD entrypoints and operational parameters for the port * @dev: physical hardware device node port corresponds to. Will be * used for DMA mappings * @portptr: pointer to a local port pointer. Upon success, the routine * will allocate a nvme_fc_local_port structure and place its * address in the local port pointer. Upon failure, local port * pointer will be set to 0. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure.
*/ int
nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, struct nvme_fc_port_template *template, struct device *dev, struct nvme_fc_local_port **portptr)
{ struct nvme_fc_lport *newrec; unsignedlong flags; int ret, idx;
/* * look to see if there is already a localport that had been * deregistered and in the process of waiting for all the * references to fully be removed. If the references haven't * expired, we can simply re-enable the localport. Remoteports * and controller reconnections should resume naturally.
*/
newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
/* found an lport, but something about its state is bad */ if (IS_ERR(newrec)) {
ret = PTR_ERR(newrec); goto out_reghost_failed;
/* found existing lport, which was resumed */
} elseif (newrec) {
*portptr = &newrec->localport; return 0;
}
/* nothing found - allocate a new localport struct */
newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
GFP_KERNEL); if (!newrec) {
ret = -ENOMEM; goto out_reghost_failed;
}
idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL); if (idx < 0) {
ret = -ENOSPC; goto out_fail_kfree;
}
if (!get_device(dev) && dev) {
ret = -ENODEV; goto out_ida_put;
}
/** * nvme_fc_unregister_localport - transport entry point called by an * LLDD to deregister/remove a previously * registered a NVME host FC port. * @portptr: pointer to the (registered) local port that is to be deregistered. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure.
*/ int
nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
{ struct nvme_fc_lport *lport = localport_to_lport(portptr); unsignedlong flags;
/* * TRADDR strings, per FC-NVME are fixed format: * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters * udev event will only differ by prefix of what field is * being specified: * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters * 19 + 43 + null_fudge = 64 characters
*/ #define FCNVME_TRADDR_LENGTH 64
case NVME_CTRL_RESETTING: /* * Controller is already in the process of terminating the * association. No need to do anything further. The reconnect * step will naturally occur after the reset completes.
*/ break;
default: /* no action to take - let it delete */ break;
}
}
if (!nvme_fc_rport_get(rport)) {
rport = ERR_PTR(-ENOLCK); goto out_done;
}
spin_unlock_irqrestore(&nvme_fc_lock, flags);
spin_lock_irqsave(&rport->lock, flags);
/* has it been unregistered */ if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { /* means lldd called us twice */
spin_unlock_irqrestore(&rport->lock, flags);
nvme_fc_rport_put(rport); return ERR_PTR(-ESTALE);
}
/* * kick off a reconnect attempt on all associations to the * remote port. A successful reconnects will resume i/o.
*/
list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
nvme_fc_resume_controller(ctrl);
/** * nvme_fc_register_remoteport - transport entry point called by an * LLDD to register the existence of a NVME * subsystem FC port on its fabric. * @localport: pointer to the (registered) local port that the remote * subsystem port is connected to. * @pinfo: pointer to information about the port to be registered * @portptr: pointer to a remote port pointer. Upon success, the routine * will allocate a nvme_fc_remote_port structure and place its * address in the remote port pointer. Upon failure, remote port * pointer will be set to 0. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure.
*/ int
nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, struct nvme_fc_port_info *pinfo, struct nvme_fc_remote_port **portptr)
{ struct nvme_fc_lport *lport = localport_to_lport(localport); struct nvme_fc_rport *newrec; unsignedlong flags; int ret, idx;
if (!nvme_fc_lport_get(lport)) {
ret = -ESHUTDOWN; goto out_reghost_failed;
}
/* * look to see if there is already a remoteport that is waiting * for a reconnect (within dev_loss_tmo) with the same WWN's. * If so, transition to it and reconnect.
*/
newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
/* found an rport, but something about its state is bad */ if (IS_ERR(newrec)) {
ret = PTR_ERR(newrec); goto out_lport_put;
/* found existing rport, which was resumed */
} elseif (newrec) {
nvme_fc_lport_put(lport);
__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
nvme_fc_signal_discovery_scan(lport, newrec);
*portptr = &newrec->remoteport; return 0;
}
/* nothing found - allocate a new remoteport struct */
newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
GFP_KERNEL); if (!newrec) {
ret = -ENOMEM; goto out_lport_put;
}
idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL); if (idx < 0) {
ret = -ENOSPC; goto out_kfree_rport;
}
/** * nvme_fc_unregister_remoteport - transport entry point called by an * LLDD to deregister/remove a previously * registered a NVME subsystem FC port. * @portptr: pointer to the (registered) remote port that is to be * deregistered. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure.
*/ int
nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
{ struct nvme_fc_rport *rport = remoteport_to_rport(portptr); struct nvme_fc_ctrl *ctrl; unsignedlong flags;
list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { /* if dev_loss_tmo==0, dev loss is immediate */ if (!portptr->dev_loss_tmo) {
dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: controller connectivity lost.\n",
ctrl->cnum);
nvme_delete_ctrl(&ctrl->ctrl);
} else
nvme_fc_ctrl_connectivity_loss(ctrl);
}
spin_unlock_irqrestore(&rport->lock, flags);
nvme_fc_abort_lsops(rport);
if (atomic_read(&rport->act_ctrl_cnt) == 0)
rport->lport->ops->remoteport_delete(portptr);
/* * release the reference, which will allow, if all controllers * go away, which should only occur after dev_loss_tmo occurs, * for the rport to be torn down.
*/
nvme_fc_rport_put(rport);
/** * nvme_fc_rescan_remoteport - transport entry point called by an * LLDD to request a nvme device rescan. * @remoteport: pointer to the (registered) remote port that is to be * rescanned. * * Returns: N/A
*/ void
nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
{ struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
/* * The fcloop device passes in a NULL device pointer. Real LLD's will * pass in a valid device pointer. If NULL is passed to the dma mapping * routines, depending on the platform, it may or may not succeed, and * may crash. * * As such: * Wrap all the dma routines and check the dev pointer. * * If simple mappings (return just a dma address, we'll noop them, * returning a dma address of 0. * * On more complex mappings (dma_map_sg), a pseudo routine fills * in the scatter list, setting all dma addresses to 0.
*/
ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
if (!ret) { /* * No timeout/not interruptible as we need the struct * to exist until the lldd calls us back. Thus mandate * wait until driver calls back. lldd responsible for * the timeout action
*/
wait_for_completion(&lsop->ls_done);
__nvme_fc_finish_ls_req(lsop);
ret = lsop->ls_error;
}
if (ret) return ret;
/* ACC or RJT payload ? */ if (rjt->w0.ls_cmd == FCNVME_LS_RJT) return -ENXIO;
return 0;
}
staticint
nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop, void (*done)(struct nvmefc_ls_req *req, int status))
{ /* don't wait for completion */
/* fc-nvme initiator doesn't care about success or failure of cmd */
kfree(lsop);
}
/* * This routine sends a FC-NVME LS to disconnect (aka terminate) * the FC-NVME Association. Terminating the association also * terminates the FC-NVME connections (per queue, both admin and io * queues) that are part of the association. E.g. things are torn * down, and the related FC-NVME Association ID and Connection IDs * become invalid. * * The behavior of the fc-nvme initiator is such that its * understanding of the association and connections will implicitly * be torn down. The action is implicit as it may be due to a loss of * connectivity with the fc-nvme target, so you may never get a * response even if you tried. As such, the action of this routine * is to asynchronously send the LS, ignore any results of the LS, and * continue on with terminating the association. If the fc-nvme target * is present and receives the LS, it too can tear down.
*/ staticvoid
nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
{ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; struct fcnvme_ls_disconnect_assoc_acc *discon_acc; struct nvmefc_ls_req_op *lsop; struct nvmefc_ls_req *lsreq; int ret;
ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
lsop->lsrsp); if (ret) {
dev_warn(lport->dev, "LLDD rejected LS RSP xmt: LS %d status %d\n",
w0->ls_cmd, ret);
nvme_fc_xmt_ls_rsp_free(lsop); return;
}
}
list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { if (!nvme_fc_ctrl_get(ctrl)) continue;
spin_lock(&ctrl->lock); if (association_id == ctrl->association_id) {
oldls = ctrl->rcv_disconn;
ctrl->rcv_disconn = lsop;
ret = ctrl;
}
spin_unlock(&ctrl->lock); if (ret) /* leave the ctrl get reference */ break;
nvme_fc_ctrl_put(ctrl);
}
spin_unlock_irqrestore(&rport->lock, flags);
/* transmit a response for anything that was pending */ if (oldls) {
dev_info(rport->lport->dev, "NVME-FC{%d}: Multiple Disconnect Association " "LS's received\n", ctrl->cnum); /* overwrite good response with bogus failure */
oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, sizeof(*oldls->rspbuf),
rqst->w0.ls_cmd,
FCNVME_RJT_RC_UNAB,
FCNVME_RJT_EXP_NONE, 0);
nvme_fc_xmt_ls_rsp(oldls);
}
return ret;
}
/* * returns true to mean LS handled and ls_rsp can be sent * returns false to defer ls_rsp xmt (will be done as part of * association termination)
*/ staticbool
nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
{ struct nvme_fc_rport *rport = lsop->rport; struct fcnvme_ls_disconnect_assoc_rqst *rqst =
&lsop->rqstbuf->rq_dis_assoc; struct fcnvme_ls_disconnect_assoc_acc *acc =
&lsop->rspbuf->rsp_dis_assoc; struct nvme_fc_ctrl *ctrl = NULL; int ret = 0;
memset(acc, 0, sizeof(*acc));
ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); if (!ret) { /* match an active association */
ctrl = nvme_fc_match_disconn_ls(rport, lsop); if (!ctrl)
ret = VERR_NO_ASSOC;
}
/* * the transmit of the response will occur after the exchanges * for the association have been ABTS'd by * nvme_fc_delete_association().
*/
/* fail the association */
nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
/* release the reference taken by nvme_fc_match_disconn_ls() */
nvme_fc_ctrl_put(ctrl);
returnfalse;
}
/* * Actual Processing routine for received FC-NVME LS Requests from the LLD * returns true if a response should be sent afterward, false if rsp will * be sent asynchronously.
*/ staticbool
nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
{ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; bool ret = true;
lsop->lsrsp->nvme_fc_private = lsop;
lsop->lsrsp->rspbuf = lsop->rspbuf;
lsop->lsrsp->rspdma = lsop->rspdma;
lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; /* Be preventative. handlers will later set to valid length */
lsop->lsrsp->rsplen = 0;
/* * handlers: * parse request input, execute the request, and format the * LS response
*/ switch (w0->ls_cmd) { case FCNVME_LS_DISCONNECT_ASSOC:
ret = nvme_fc_ls_disconnect_assoc(lsop); break; case FCNVME_LS_DISCONNECT_CONN:
lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, sizeof(*lsop->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); break; case FCNVME_LS_CREATE_ASSOCIATION: case FCNVME_LS_CREATE_CONNECTION:
lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, sizeof(*lsop->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); break; default:
lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, sizeof(*lsop->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); break;
}
/** * nvme_fc_rcv_ls_req - transport entry point called by an LLDD * upon the reception of a NVME LS request. * * The nvme-fc layer will copy payload to an internal structure for * processing. As such, upon completion of the routine, the LLDD may * immediately free/reuse the LS request buffer passed in the call. * * If this routine returns error, the LLDD should abort the exchange. * * @portptr: pointer to the (registered) remote port that the LS * was received from. The remoteport is associated with * a specific localport. * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be * used to reference the exchange corresponding to the LS * when issuing an ls response. * @lsreqbuf: pointer to the buffer containing the LS Request * @lsreqbuf_len: length, in bytes, of the received LS request
*/ int
nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, struct nvmefc_ls_rsp *lsrsp, void *lsreqbuf, u32 lsreqbuf_len)
{ struct nvme_fc_rport *rport = remoteport_to_rport(portptr); struct nvme_fc_lport *lport = rport->lport; struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; struct nvmefc_ls_rcv_op *lsop; unsignedlong flags; int ret;
nvme_fc_rport_get(rport);
/* validate there's a routine to transmit a response */ if (!lport->ops->xmt_ls_rsp) {
dev_info(lport->dev, "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
ret = -EINVAL; goto out_put;
}
if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
dev_info(lport->dev, "RCV %s LS failed: payload too large\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
ret = -E2BIG; goto out_put;
}
lsop = kzalloc(sizeof(*lsop), GFP_KERNEL); if (!lsop) {
nvme_fc_rcv_ls_req_err_msg(lport, w0);
ret = -ENOMEM; goto out_put;
}
/* * nvme_fc_io_getuuid - Routine called to get the appid field * associated with request by the lldd * @req:IO request from nvme fc to driver * Returns: UUID if there is an appid associated with VM or * NULL if the user/libvirt has not set the appid to VM
*/ char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
{ struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); struct request *rq = op->rq;
/* * WARNING: * The current linux implementation of a nvme controller * allocates a single tag set for all io queues and sizes * the io queues to fully hold all possible tags. Thus, the * implementation does not reference or care about the sqhd * value as it never needs to use the sqhd/sqtail pointers * for submission pacing. * * This affects the FC-NVME implementation in two ways: * 1) As the value doesn't matter, we don't need to waste * cycles extracting it from ERSPs and stamping it in the * cases where the transport fabricates CQEs on successful * completions. * 2) The FC-NVME implementation requires that delivery of * ERSP completions are to go back to the nvme layer in order * relative to the rsn, such that the sqhd value will always * be "in order" for the nvme layer. As the nvme layer in * linux doesn't care about sqhd, there's no need to return * them in order. * * Additionally: * As the core nvme layer in linux currently does not look at * every field in the cqe - in cases where the FC transport must * fabricate a CQE, the following fields will not be set as they * are not referenced: * cqe.sqid, cqe.sqhd, cqe.command_id * * Failure or error of an individual i/o, in a transport * detected fashion unrelated to the nvme completion status, * potentially cause the initiator and target sides to get out * of sync on SQ head/tail (aka outstanding io count allowed). * Per FC-NVME spec, failure of an individual command requires * the connection to be terminated, which in turn requires the * association to be terminated.
*/
if (opstate == FCPOP_STATE_ABORTED)
status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); elseif (freq->status) {
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device, "NVME-FC{%d}: io failed due to lldd error %d\n",
ctrl->cnum, freq->status);
}
/* * For the linux implementation, if we have an unsuccessful * status, the blk-mq layer can typically be called with the * non-zero status and the content of the cqe isn't important.
*/ if (status) goto done;
/* * command completed successfully relative to the wire * protocol. However, validate anything received and * extract the status and result from the cqe (create it * where necessary).
*/
switch (freq->rcv_rsplen) {
case 0: case NVME_FC_SIZEOF_ZEROS_RSP: /* * No response payload or 12 bytes of payload (which * should all be zeros) are considered successful and * no payload in the CQE by the transport.
*/ if (freq->transferred_length !=
be32_to_cpu(op->cmd_iu.data_len)) {
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device, "NVME-FC{%d}: io failed due to bad transfer " "length: %d vs expected %d\n",
ctrl->cnum, freq->transferred_length,
be32_to_cpu(op->cmd_iu.data_len)); goto done;
}
result.u64 = 0; break;
casesizeof(struct nvme_fc_ersp_iu): /* * The ERSP IU contains a full completion with CQE. * Validate ERSP IU and look at cqe.
*/ if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
(freq->rcv_rsplen / 4) ||
be32_to_cpu(op->rsp_iu.xfrd_len) !=
freq->transferred_length ||
op->rsp_iu.ersp_result ||
sqe->common.command_id != cqe->command_id)) {
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device, "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " "iu len %d, xfr len %d vs %d, status code " "%d, cmdid %d vs %d\n",
ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
be32_to_cpu(op->rsp_iu.xfrd_len),
freq->transferred_length,
op->rsp_iu.ersp_result,
sqe->common.command_id,
cqe->command_id); goto done;
}
result = cqe->result;
status = cqe->status; break;
default:
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device, "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " "len %d\n",
ctrl->cnum, freq->rcv_rsplen); goto done;
}
aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { if (ctrl->lport->ops->fcprqst_priv_sz) { private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
GFP_KERNEL); if (!private) return -ENOMEM;
}
/* * Considered whether we should allocate buffers for all SQEs * and CQEs and dma map them - mapping their respective entries * into the request structures (kernel vm addr and dma address) * thus the driver could use the buffers/mappings directly. * It only makes sense if the LLDD would use them for its * messaging api. It's very unlikely most adapter api's would use * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload * structures were used instead.
*/
}
/* * This routine terminates a queue at the transport level. * The transport has already ensured that all outstanding ios on * the queue have been terminated. * The transport will send a Disconnect LS request to terminate * the queue's connection. Termination of the admin queue will also * terminate the association at the target.
*/ staticvoid
nvme_fc_free_queue(struct nvme_fc_queue *queue)
{ if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) return;
clear_bit(NVME_FC_Q_LIVE, &queue->flags); /* * Current implementation never disconnects a single queue. * It always terminates a whole association. So there is never * a disconnect(queue) LS sent to the target.
*/
for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
__nvme_fc_delete_hw_queue(ctrl, queue, i);
}
staticint
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{ struct nvme_fc_queue *queue = &ctrl->queues[1]; int i, ret;
for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); if (ret) goto delete_queues;
}
return 0;
delete_queues: for (; i > 0; i--)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); return ret;
}
staticint
nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{ int i, ret = 0;
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
(qsize / 5)); if (ret) break;
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) break;
/* * All accesses from nvme core layer done - can now free the * controller. Called after last nvme_put_ctrl() call
*/ staticvoid
nvme_fc_free_ctrl(struct nvme_ctrl *nctrl)
{ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
WARN_ON(nctrl != &ctrl->ctrl);
nvme_fc_ctrl_put(ctrl);
}
/* * This routine is used by the transport when it needs to find active * io on a queue that is to be terminated. The transport uses * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke * this routine to kill them on a 1 by 1 basis. * * As FC allocates FC exchange for each io, the transport must contact
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.