// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*/
/* * fabric_sm Node State Machine: Fabric States * ns_sm Node State Machine: Name/Directory Services States * p2p_sm Node State Machine: Point-to-Point Node States
*/
/* issue hw node free; don't care if succeeds right away * or sometime later, will check node->attached later in * shutdown process
*/
rc = efc_cmd_node_detach(efc, &node->rnode); if (rc < 0) {
node_printf(node, "Failed freeing HW node, rc=%d\n",
rc);
}
} /* * node has either been detached or is in the process of being detached, * call common node's initiate cleanup function
*/
efc_node_initiate_cleanup(node);
}
/* * now loop through the nodes in the nport * and send topology notification
*/
xa_for_each(&node->nport->lookup, index, tmp_node) { if (tmp_node != node) {
efc_node_post_event(tmp_node,
EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
&node->nport->topology);
}
}
}
/* Check to see if the fabric is an F_PORT or and N_PORT */ if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) { /* sm: if not nport / efc_domain_attach */ /* ext_status has the fc_id, attach domain */
efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
efc_fabric_notify_topology(node);
WARN_ON(node->nport->domain->attached);
efc_domain_attach(node->nport->domain,
cbdata->ext_status);
efc_node_transition(node,
__efc_fabric_wait_domain_attach,
NULL); break;
}
/* sm: if nport and p2p_winner / efc_domain_attach */
efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P); if (efc_p2p_setup(node->nport)) {
node_printf(node, "p2p setup failed, shutting down node\n");
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
efc_fabric_initiate_shutdown(node); break;
}
if (node->nport->p2p_winner) {
efc_node_transition(node,
__efc_p2p_wait_domain_attach,
NULL); if (node->nport->domain->attached &&
!node->nport->domain->domain_notify_pend) { /* * already attached, * just send ATTACH_OK
*/
node_printf(node, "p2p winner, domain already attached\n");
efc_node_post_event(node,
EFC_EVT_DOMAIN_ATTACH_OK,
NULL);
}
} else { /* * peer is p2p winner; * PLOGI will be received on the * remote SID=1 node; * this node has served its purpose
*/
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
efc_fabric_initiate_shutdown(node);
}
break;
}
case EFC_EVT_ELS_REQ_ABORTED: case EFC_EVT_SRRS_ELS_REQ_RJT: case EFC_EVT_SRRS_ELS_REQ_FAIL: { struct efc_nport *nport = node->nport; /* * with these errors, we have no recovery, * so shutdown the nport, leave the link * up and the domain ready
*/ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
__efc_fabric_common, __func__)) { return;
}
node_printf(node, "FLOGI failed evt=%s, shutting down nport [%s]\n",
efc_sm_event_name(evt), nport->display_name);
WARN_ON(!node->els_req_cnt);
node->els_req_cnt--;
efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); break;
}
/* Instantiate a name services node */
ns = efc_node_find(nport, FC_FID_DIR_SERV); if (!ns) {
ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false); if (!ns) return -EIO;
} /* * for found ns, should we be transitioning from here? * breaks transition only * 1. from within state machine or * 2. if after alloc
*/ if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
efc_node_pause(ns, __efc_ns_init); else
efc_node_transition(ns, __efc_ns_init, NULL); return 0;
}
fabctl = efc_node_find(nport, FC_FID_FCTRL); if (!fabctl) {
fabctl = efc_node_alloc(nport, FC_FID_FCTRL, false, false); if (!fabctl) return -EIO;
} /* * for found ns, should we be transitioning from here? * breaks transition only * 1. from within state machine or * 2. if after alloc
*/
efc_node_transition(fabctl, __efc_fabctl_init, NULL); return 0;
}
switch (evt) { case EFC_EVT_ENTER:
efc_node_hold_frames(node); break;
case EFC_EVT_EXIT:
efc_node_accept_frames(node); break;
/* wait for any of these attach events and then shutdown */ case EFC_EVT_NODE_ATTACH_OK:
node->attached = true;
node_printf(node, "Attach evt=%s, proceed to shutdown\n",
efc_sm_event_name(evt));
efc_fabric_initiate_shutdown(node); break;
case EFC_EVT_NODE_ATTACH_FAIL:
node->attached = false;
node_printf(node, "Attach evt=%s, proceed to shutdown\n",
efc_sm_event_name(evt));
efc_fabric_initiate_shutdown(node); break;
/* ignore shutdown event as we're already in shutdown path */ case EFC_EVT_SHUTDOWN:
node_printf(node, "Shutdown event received\n"); break;
/* * Waits for an RFFID response event; * if rscn enabled, a GIDPT name services request is issued.
*/ switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: { if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
__efc_fabric_common, __func__)) { return;
}
WARN_ON(!node->els_req_cnt);
node->els_req_cnt--; if (node->nport->enable_rscn) { /* sm: if enable_rscn / send GIDPT */
efc_ns_send_gidpt(node);
efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
NULL);
} else { /* if 'T' only, we're done, go to idle */
efc_node_transition(node, __efc_ns_idle, NULL);
} break;
} /* * if receive RSCN just ignore, * we haven't sent GID_PT yet (ACC sent by fabctl node)
*/ case EFC_EVT_RSCN_RCVD: break;
/* Count the number of nodes */
port_count = 0;
xa_for_each(&nport->lookup, index, n) {
port_count++;
}
/* Allocate a buffer for all nodes */
active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC); if (!active_nodes) {
node_printf(node, "efc_malloc failed\n"); return -EIO;
}
/* Fill buffer with fc_id of active nodes */
i = 0;
xa_for_each(&nport->lookup, index, n) {
port_id = n->rnode.fc_id; switch (port_id) { case FC_FID_FLOGI: case FC_FID_FCTRL: case FC_FID_DIR_SERV: break; default: if (port_id != FC_FID_DOM_MGR)
active_nodes[i++] = n; break;
}
}
/* update the active nodes buffer */ for (i = 0; i < plist_count; i++) {
hton24(gidpt[i].fp_fid, port_id);
/* Those remaining in the active_nodes[] are now gone ! */ for (i = 0; i < port_count; i++) { /* * if we're an initiator and the remote node * is a target, then post the node missing event. * if we're target and we have enabled * target RSCN, then post the node missing event.
*/ if (!active_nodes[i]) continue;
node_sm_trace(); /* * Wait for a GIDPT response from the name server. Process the FC_IDs * that are reported by creating new remote ports, as needed.
*/
case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* not much we can do; will retry with the next RSCN */
node_printf(node, "GID_PT failed to complete\n");
WARN_ON(!node->els_req_cnt);
node->els_req_cnt--;
efc_node_transition(node, __efc_ns_idle, NULL); break;
}
/* if receive RSCN here, queue up another discovery processing */ case EFC_EVT_RSCN_RCVD: {
node_printf(node, "RSCN received during GID_PT processing\n");
node->rscn_pending = true; break;
}
case EFC_EVT_RSCN_RCVD: { /* sm: / send GIDPT */ /* * If target RSCN processing is enabled, * and this is target only (not initiator), * and tgt_rscn_delay is non-zero, * then we delay issuing the GID_PT
*/ if (efc->tgt_rscn_delay_msec != 0 &&
!node->nport->enable_ini && node->nport->enable_tgt &&
enable_target_rscn(efc)) {
efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
} else {
efc_ns_send_gidpt(node);
efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
NULL);
} break;
}
switch (evt) { case EFC_EVT_ENTER: {
u64 delay_msec, tmp;
/* * Compute the delay time. * Set to tgt_rscn_delay, if the time since last GIDPT * is less than tgt_rscn_period, then use tgt_rscn_period.
*/
delay_msec = efc->tgt_rscn_delay_msec;
tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec; if (tmp < efc->tgt_rscn_period_msec)
delay_msec = efc->tgt_rscn_period_msec;
switch (evt) { case EFC_EVT_ENTER: /* no need to login to fabric controller, just send SCR */
efc_send_scr(node);
efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL); break;
case EFC_EVT_NODE_ATTACH_OK:
node->attached = true; break;
/* Forward this event to the name-services node */
ns = efc_node_find(nport, FC_FID_DIR_SERV); if (ns)
efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata); else
efc_log_warn(efc, "can't find name server node\n");
}
/* * Fabric controller node state machine: Ready. * In this state, the fabric controller sends a RSCN, which is received * by this node and is forwarded to the name services node object; and * the RSCN LS_ACC is sent.
*/ switch (evt) { case EFC_EVT_RSCN_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt;
/* * sm: / process RSCN (forward to name services node), * send LS_ACC
*/
efc_process_rscn(node, cbdata);
efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
NULL); break;
}
/* * this transient node (SID=0 (recv'd FLOGI) * or DID=fabric (sent FLOGI)) * is the p2p winner, will use a separate node * to send PLOGI to peer
*/
WARN_ON(!node->nport->p2p_winner);
rnode = efc_node_find(nport, node->nport->p2p_remote_port_id); if (rnode) { /* * the "other" transient p2p node has * already kicked off the * new node from which PLOGI is sent
*/
node_printf(node, "Node with fc_id x%x already exists\n",
rnode->rnode.fc_id);
} else { /* * create new node (SID=1, DID=2) * from which to send PLOGI
*/
rnode = efc_node_alloc(nport,
nport->p2p_remote_port_id, false, false); if (!rnode) {
efc_log_err(efc, "node alloc failed\n"); return;
}
/* * the transient node (SID=0 or DID=fabric) * has served its purpose
*/ if (node->rnode.fc_id == 0) { /* * if this is the SID=0 node, * move to the init state in case peer * has restarted FLOGI discovery and FLOGI is pending
*/ /* don't send PLOGI on efc_d_init entry */
efc_node_init_device(node, false);
} else { /* * if this is the DID=fabric node * (we initiated FLOGI), shut it down
*/
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
efc_fabric_initiate_shutdown(node);
} break;
}
switch (evt) { case EFC_EVT_ENTER:
efc_node_hold_frames(node); break;
case EFC_EVT_EXIT:
efc_node_accept_frames(node); break;
case EFC_EVT_SRRS_ELS_CMPL_OK:
WARN_ON(!node->els_cmpl_cnt);
node->els_cmpl_cnt--;
/* sm: if p2p_winner / domain_attach */ if (node->nport->p2p_winner) {
efc_node_transition(node,
__efc_p2p_wait_domain_attach,
NULL); if (!node->nport->domain->attached) {
node_printf(node, "Domain not attached\n");
efc_domain_attach(node->nport->domain,
node->nport->p2p_port_id);
} else {
node_printf(node, "Domain already attached\n");
efc_node_post_event(node,
EFC_EVT_DOMAIN_ATTACH_OK,
NULL);
}
} else { /* this node has served its purpose; * we'll expect a PLOGI on a separate * node (remote SID=0x1); return this node * to init state in case peer * restarts discovery -- it may already * have (pending frames may exist).
*/ /* don't send PLOGI on efc_d_init entry */
efc_node_init_device(node, false);
} break;
case EFC_EVT_SRRS_ELS_CMPL_FAIL: /* * LS_ACC failed, possibly due to link down; * shutdown node and wait * for FLOGI discovery to restart
*/
node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
WARN_ON(!node->els_cmpl_cnt);
node->els_cmpl_cnt--;
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
efc_fabric_initiate_shutdown(node); break;
case EFC_EVT_PLOGI_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; /* if we're in external loopback mode, just send LS_ACC */ if (node->efc->external_loopback) {
efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
} else { /* * if this isn't external loopback, * pass to default handler
*/
__efc_fabric_common(__func__, ctx, evt, arg);
} break;
} case EFC_EVT_PRLI_RCVD: /* I, or I+T */ /* sent PLOGI and before completion was seen, received the * PRLI from the remote node (WCQEs and RCQEs come in on * different queues and order of processing cannot be assumed) * Save OXID so PRLI can be sent after the attach and continue * to wait for PLOGI response
*/
efc_process_prli_payload(node, cbdata->payload->dma.virt);
efc_send_ls_acc_after_attach(node,
cbdata->header->dma.virt,
EFC_NODE_SEND_LS_ACC_PRLI);
efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
NULL); break; default:
__efc_fabric_common(__func__, ctx, evt, arg);
}
}
switch (evt) { case EFC_EVT_ENTER: /* * Since we've received a PRLI, we have a port login and will * just need to wait for the PLOGI response to do the node * attach and then we can send the LS_ACC for the PRLI. If, * during this time, we receive FCP_CMNDs (which is possible * since we've already sent a PRLI and our peer may have * accepted). * At this time, we are not waiting on any other unsolicited * frames to continue with the login process. Thus, it will not * hurt to hold frames here.
*/
efc_node_hold_frames(node); break;
case EFC_EVT_EXIT:
efc_node_accept_frames(node); break;
case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */ int rc;
/* Completion from PLOGI sent */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
__efc_fabric_common, __func__)) { return;
}
WARN_ON(!node->els_req_cnt);
node->els_req_cnt--; /* sm: / save sparams, efc_node_attach */
efc_node_save_sparms(node, cbdata->els_rsp.virt);
rc = efc_node_attach(node);
efc_node_transition(node, __efc_p2p_wait_node_attach, NULL); if (rc < 0)
efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
NULL); break;
} case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ case EFC_EVT_SRRS_ELS_REQ_RJT: /* PLOGI failed, shutdown the node */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
__efc_fabric_common, __func__)) { return;
}
WARN_ON(!node->els_req_cnt);
node->els_req_cnt--;
node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
efc_fabric_initiate_shutdown(node); break;
switch (evt) { case EFC_EVT_ENTER:
efc_node_hold_frames(node); break;
case EFC_EVT_EXIT:
efc_node_accept_frames(node); break;
case EFC_EVT_NODE_ATTACH_OK:
node->attached = true; switch (node->send_ls_acc) { case EFC_NODE_SEND_LS_ACC_PRLI: {
efc_d_send_prli_rsp(node->ls_acc_io,
node->ls_acc_oxid);
node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
node->ls_acc_io = NULL; break;
} case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */ case EFC_NODE_SEND_LS_ACC_NONE: default: /* Normal case for I */ /* sm: send_plogi_acc is not set / send PLOGI acc */
efc_node_transition(node, __efc_d_port_logged_in,
NULL); break;
} break;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.