// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*/
/* * domain_sm Domain State Machine: States
*/
#include"efc.h"
int
efc_domain_cb(void *arg, int event, void *data)
{ struct efc *efc = arg; struct efc_domain *domain = NULL; int rc = 0; unsignedlong flags = 0;
if (event != EFC_HW_DOMAIN_FOUND)
domain = data;
/* Accept domain callback events from the user driver */
spin_lock_irqsave(&efc->lock, flags); switch (event) { case EFC_HW_DOMAIN_FOUND: {
u64 fcf_wwn = 0; struct efc_domain_record *drec = data;
/* extract the fcf_wwn */
fcf_wwn = be64_to_cpu(*((__be64 *)drec->wwn));
void
efc_register_domain_free_cb(struct efc *efc, void (*callback)(struct efc *efc, void *arg), void *arg)
{ /* Register a callback to be called when the domain is freed */
efc->domain_free_cb = callback;
efc->domain_free_cb_arg = arg; if (!efc->domain && callback)
(*callback)(efc, arg);
}
switch (evt) { case EFC_EVT_ENTER: case EFC_EVT_REENTER: case EFC_EVT_EXIT: case EFC_EVT_ALL_CHILD_NODES_FREE: /* * this can arise if an FLOGI fails on the NPORT, * and the NPORT is shutdown
*/ break; default:
efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
funcname, efc_sm_event_name(evt));
}
}
switch (evt) { case EFC_EVT_ENTER: case EFC_EVT_REENTER: case EFC_EVT_EXIT: break; case EFC_EVT_DOMAIN_FOUND: /* save drec, mark domain_found_pending */
memcpy(&domain->pending_drec, arg, sizeof(domain->pending_drec));
domain->domain_found_pending = true; break; case EFC_EVT_DOMAIN_LOST: /* unmark domain_found_pending */
domain->domain_found_pending = false; break;
default:
efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
funcname, efc_sm_event_name(evt));
}
}
/* allocate struct efc_nport object for local port * Note: drec->fc_id is ALPA from read_topology only if loop
*/ if (efc_cmd_nport_alloc(efc, nport, NULL, (uint8_t *)&bewwpn)) {
efc_log_err(efc, "Can't allocate port\n");
efc_nport_free(nport); break;
}
domain->is_loop = drec->is_loop;
/* * If the loop position map includes ALPA == 0, * then we are in a public loop (NL_PORT) * Note that the first element of the loopmap[] * contains the count of elements, and if * ALPA == 0 is present, it will occupy the first * location after the count.
*/
domain->is_nlport = drec->map.loop[1] == 0x00;
if (!domain->is_loop) { /* Initiate HW domain alloc */ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
efc_log_err(efc, "Failed to initiate HW domain allocation\n"); break;
}
efc_sm_transition(ctx, __efc_domain_wait_alloc, arg); break;
}
/* Save the domain service parameters */
memcpy(domain->service_params + 4, domain->dma.virt, sizeof(struct fc_els_flogi) - 4);
memcpy(nport->service_params + 4, domain->dma.virt, sizeof(struct fc_els_flogi) - 4);
/* * Update the nport's service parameters, * user might have specified non-default names
*/
sp->fl_wwpn = cpu_to_be64(nport->wwpn);
sp->fl_wwnn = cpu_to_be64(nport->wwnn);
/* * Take the loop topology path, * unless we are an NL_PORT (public loop)
*/ if (domain->is_loop && !domain->is_nlport) { /* * For loop, we already have our FC ID * and don't need fabric login. * Transition to the allocated state and * post an event to attach to * the domain. Note that this breaks the * normal action/transition * pattern here to avoid a race with the * domain attach callback.
*/ /* sm: is_loop / domain_attach */
efc_sm_transition(ctx, __efc_domain_allocated, NULL);
__efc_domain_attach_internal(domain, nport->fc_id); break;
}
{ struct efc_node *node;
case EFC_EVT_DOMAIN_ALLOC_FAIL:
efc_log_err(efc, "%s recv'd waiting for DOMAIN_ALLOC_OK;",
efc_sm_event_name(evt));
efc_log_err(efc, "shutting down domain\n");
domain->req_domain_free = true; break;
case EFC_EVT_DOMAIN_FOUND: /* Should not happen */ break;
case EFC_EVT_DOMAIN_LOST:
efc_log_debug(efc, "%s received while waiting for hw_domain_alloc()\n",
efc_sm_event_name(evt));
efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL); break;
case EFC_EVT_DOMAIN_FOUND: /* Should not happen */
efc_log_err(efc, "%s: evt: %d should not happen\n",
__func__, evt); break;
case EFC_EVT_DOMAIN_LOST: {
efc_log_debug(efc, "%s received while in EFC_EVT_DOMAIN_REQ_ATTACH\n",
efc_sm_event_name(evt)); if (!list_empty(&domain->nport_list)) { /* * if there are nports, transition to * wait state and send shutdown to each * nport
*/ struct efc_nport *nport = NULL, *nport_next = NULL;
/* * Set domain notify pending state to avoid * duplicate domain event post
*/
domain->domain_notify_pend = true;
/* Mark as attached */
domain->attached = true;
/* Transition to ready */ /* sm: / forward event to all nports and nodes */
efc_sm_transition(ctx, __efc_domain_ready, NULL);
/* We have an FCFI, so we can accept frames */
domain->req_accept_frames = true;
/* * Notify all nodes that the domain attach request * has completed * Note: nport will have already received notification * of nport attached as a result of the HW's port attach.
*/
list_for_each_entry_safe(nport, next_nport,
&domain->nport_list, list_entry) {
xa_for_each(&nport->lookup, index, node) {
efc_node_post_event(node,
EFC_EVT_DOMAIN_ATTACH_OK,
NULL);
}
}
domain->domain_notify_pend = false; break;
}
case EFC_EVT_DOMAIN_ATTACH_FAIL:
efc_log_debug(efc, "%s received while waiting for hw attach\n",
efc_sm_event_name(evt)); break;
case EFC_EVT_DOMAIN_FOUND: /* Should not happen */
efc_log_err(efc, "%s: evt: %d should not happen\n",
__func__, evt); break;
case EFC_EVT_DOMAIN_LOST: /* * Domain lost while waiting for an attach to complete, * go to a state that waits for the domain attach to * complete, then handle domain lost
*/
efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL); break;
case EFC_EVT_DOMAIN_REQ_ATTACH: /* * In P2P we can get an attach request from * the other FLOGI path, so drop this one
*/ break;
switch (evt) { case EFC_EVT_ENTER: { /* start any pending vports */ if (efc_vport_start(domain)) {
efc_log_debug(domain->efc, "efc_vport_start didn't start vports\n");
} break;
} case EFC_EVT_DOMAIN_LOST: { if (!list_empty(&domain->nport_list)) { /* * if there are nports, transition to wait state * and send shutdown to each nport
*/ struct efc_nport *nport = NULL, *nport_next = NULL;
switch (evt) { case EFC_EVT_DOMAIN_FREE_OK: /* sm: / domain_free */ if (domain->domain_found_pending) { /* * save fcf_wwn and drec from this domain, * free current domain and allocate * a new one with the same fcf_wwn * could use a SLI-4 "re-register VPI" * operation here?
*/
u64 fcf_wwn = domain->fcf_wwn; struct efc_domain_record drec = domain->pending_drec;
if (!domain) {
efc_log_err(efc, "efc_domain_alloc() failed\n"); return;
} /* * got a new domain; at this point, * there are at least two domains * once the req_domain_free flag is processed, * the associated domain will be removed.
*/
efc_sm_transition(&domain->drvsm, __efc_domain_init,
NULL);
efc_sm_post_event(&domain->drvsm,
EFC_EVT_DOMAIN_FOUND, &drec);
} else {
domain->req_domain_free = true;
} break; default:
__efc_domain_common_shutdown(__func__, ctx, evt, arg);
}
}
/* * Wait for the domain alloc/attach completion * after receiving a domain lost.
*/ switch (evt) { case EFC_EVT_DOMAIN_ALLOC_OK: case EFC_EVT_DOMAIN_ATTACH_OK: { if (!list_empty(&domain->nport_list)) { /* * if there are nports, transition to * wait state and send shutdown to each nport
*/ struct efc_nport *nport = NULL, *nport_next = NULL;
for (;;) { /* need to check for hold frames condition after each frame * processed because any given frame could cause a transition * to a state that holds frames
*/ if (efc->hold_frames) break;
/* Get next frame/sequence */
spin_lock_irqsave(&efc->pend_frames_lock, flags);
/* * If we are holding frames or the domain is not yet registered or * there's already frames on the pending list, * then add the new frame to pending list
*/ if (!domain || efc->hold_frames || !list_empty(&efc->pend_frames)) { unsignedlong flags = 0;
if (domain) { /* immediately process pending frames */
efct_domain_process_pending(domain);
}
} else { /* * We are not holding frames and pending list is empty, * just process frame. A non-zero return means the frame * was not handled - so cleanup
*/ if (efc_domain_dispatch_frame(domain, seq))
efc->tt.hw_seq_free(efc, seq);
}
}
if (!seq->header || !seq->header->dma.virt || !seq->payload->dma.virt) {
efc_log_err(efc, "Sequence header or payload is null\n"); return rc;
}
hdr = seq->header->dma.virt;
/* extract the s_id and d_id */
s_id = ntoh24(hdr->fh_s_id);
d_id = ntoh24(hdr->fh_d_id);
spin_lock_irqsave(&efc->lock, flags);
nport = efc_nport_find(domain, d_id); if (!nport) { if (hdr->fh_type == FC_TYPE_FCP) { /* Drop frame */
efc_log_warn(efc, "FCP frame with invalid d_id x%x\n",
d_id); goto out;
}
/* p2p will use this case */
nport = domain->nport; if (!nport || !kref_get_unless_zero(&nport->ref)) {
efc_log_err(efc, "Physical nport is NULL\n"); goto out;
}
}
/* Lookup the node given the remote s_id */
node = efc_node_find(nport, s_id);
/* If not found, then create a new node */ if (!node) { /* * If this is solicited data or control based on R_CTL and * there is no node context, then we can drop the frame
*/ if ((hdr->fh_r_ctl == FC_RCTL_DD_SOL_DATA) ||
(hdr->fh_r_ctl == FC_RCTL_DD_SOL_CTL)) {
efc_log_debug(efc, "sol data/ctrl frame without node\n"); goto out_release;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.