SSL lpfc_nportdisc.c
Interaktion und PortierbarkeitC
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. *
*******************************************************************/
/* Called to clear RSCN discovery flags when driver is unloading. */ staticbool
lpfc_check_unload_and_clr_rscn(unsignedlong *fc_flag)
{ /* If unloading, then clear the FC_RSCN_DEFERRED flag */ if (test_bit(FC_UNLOADING, fc_flag)) {
clear_bit(FC_RSCN_DEFERRED, fc_flag); returnfalse;
} return test_bit(FC_RSCN_DEFERRED, fc_flag);
}
/* Called to verify a rcv'ed ADISC was intended for us. */ staticint
lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn)
{
/* Compare the ADISC rsp WWNN / WWPN matches our internal node * table entry for that node.
*/ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) return 0;
if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) return 0;
/* * The receive data field size and buffer-to-buffer receive data field * size entries are 16 bits but are represented as two 8-bit fields in * the driver data structure to account for rsvd bits and other control * bits. Reconstruct and compare the fields as a 16-bit values before * correcting the byte values.
*/ if (sp->cls1.classValid) { if (!flogi) {
hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
hsp->cls1.rcvDataSizeLsb);
ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
sp->cls1.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) {
sp->cls1.rcvDataSizeLsb =
hsp->cls1.rcvDataSizeLsb;
sp->cls1.rcvDataSizeMsb =
hsp->cls1.rcvDataSizeMsb;
}
}
} elseif (class == CLASS1) goto bad_service_param; if (sp->cls2.classValid) { if (!flogi) {
hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
hsp->cls2.rcvDataSizeLsb);
ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
sp->cls2.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) {
sp->cls2.rcvDataSizeLsb =
hsp->cls2.rcvDataSizeLsb;
sp->cls2.rcvDataSizeMsb =
hsp->cls2.rcvDataSizeMsb;
}
}
} elseif (class == CLASS2) goto bad_service_param; if (sp->cls3.classValid) { if (!flogi) {
hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
hsp->cls3.rcvDataSizeLsb);
ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
sp->cls3.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) {
sp->cls3.rcvDataSizeLsb =
hsp->cls3.rcvDataSizeLsb;
sp->cls3.rcvDataSizeMsb =
hsp->cls3.rcvDataSizeMsb;
}
}
} elseif (class == CLASS3) goto bad_service_param;
/* * Preserve the upper four bits of the MSB from the PLOGI response. * These bits contain the Buffer-to-Buffer State Change Number * from the target and need to be passed to the FW.
*/
hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; if (ssp_value > hsp_value) {
sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
(hsp->cmn.bbRcvSizeMsb & 0x0F);
}
/* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay * freeing associated memory till after ABTS completes.
*/ if (pcmd) {
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
list); if (prsp) {
lp = (uint32_t *) prsp->virt;
ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
}
} else { /* Force ulp_status error since we are returning NULL ptr */ if (!(ulp_status)) { if (phba->sli_rev == LPFC_SLI_REV4) {
bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl,
IOSTAT_LOCAL_REJECT);
rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
} else {
rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
}
}
ptr = NULL;
} return ptr;
}
/* * Free resources / clean up outstanding I/Os * associated with a LPFC_NODELIST entry. This * routine effectively results in a "software abort".
*/ void
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(abort_list);
LIST_HEAD(drv_cmpl_list); struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; int retval = 0;
pring = lpfc_phba_elsring(phba);
/* In case of error recovery path, we might have a NULL pring here */ if (unlikely(!pring)) return;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " "Data: x%lx x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi); /* Clean up all fabric IOs first.*/
lpfc_fabric_abort_nport(ndlp);
/* * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list * of all ELS IOs that need an ABTS. The IOs need to stay on the * txcmplq so that the abort operation completes them successfully.
*/
spin_lock_irq(&phba->hbalock); if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { /* Add to abort_list on on NDLP match. */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
list_add_tail(&iocb->dlist, &abort_list);
} if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
/* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&iocb->dlist);
retval = lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
INIT_LIST_HEAD(&abort_list);
/* Now process the txq */
spin_lock_irq(&phba->hbalock); if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { /* Check to see if iocb matches the nport we are looking for */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
list_del_init(&iocb->list);
list_add_tail(&iocb->list, &abort_list);
}
}
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
}
/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes * @phba: pointer to lpfc hba data structure. * @login_mbox: pointer to REG_RPI mailbox object * * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
*/ staticvoid
lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
{ struct lpfc_iocbq *save_iocb; struct lpfc_nodelist *ndlp;
MAILBOX_t *mb = &login_mbox->u.mb;
if (mb->mbxStatus == MBX_SUCCESS) { /* Now that REG_RPI completed successfully, * we can now proceed with sending the PLOGI ACC.
*/ if (test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) {
rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
save_iocb, ndlp, login_mbox);
} else {
rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
save_iocb, ndlp, NULL);
}
/* If this is a fabric topology, complete the reg_rpi and prli now. * For Pt2Pt, the reg_rpi and PRLI are deferred until after the LS_ACC * completes. This ensures, in Pt2Pt, that the PLOGI LS_ACC is sent * before the PRLI.
*/ if (!test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) { /* Now process the REG_RPI cmpl */
lpfc_mbx_cmpl_reg_login(phba, login_mbox);
clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
}
kfree(save_iocb);
}
ndlp->nlp_class_sup = 0; if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1; if (sp->cls2.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3; if (sp->cls4.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) break;
fallthrough; case NLP_STE_REG_LOGIN_ISSUE: case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: case NLP_STE_MAPPED_NODE: /* For initiators, lpfc_plogi_confirm_nport skips fabric did. * For target mode, execute implicit logo. * Fabric nodes go into NPR.
*/ if (!(ndlp->nlp_type & NLP_FABRIC) &&
!(phba->nvmet_support)) { /* Clear ndlp info, since follow up PRLI may have * updated ndlp information
*/
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
ndlp, NULL); return 1;
} if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0143 PLOGI recv'd from DID: x%x " "WWPN changed: old %llx new %llx\n",
ndlp->nlp_DID,
(unsignedlonglong)nlp_portwwn,
(unsignedlonglong)
wwn_to_u64(sp->portName.u.wwn));
/* Notify transport of connectivity loss to trigger cleanup. */ if (phba->nvmet_support &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
lpfc_nvmet_invalidate_host(phba, ndlp);
ndlp->nlp_prev_state = ndlp->nlp_state; /* rport needs to be unregistered first */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); break;
}
/* Check for Nport to NPort pt2pt protocol */ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
!test_bit(FC_PT2PT_PLOGI, &vport->fc_flag)) { /* rcv'ed PLOGI decides what our NPortId will be */ if (phba->sli_rev == LPFC_SLI_REV4) {
vport->fc_myDID = bf_get(els_rsp64_sid,
&cmdiocb->wqe.xmit_els_rsp);
} else {
vport->fc_myDID = icmd->un.rcvels.parmRo;
}
/* If there is an outstanding FLOGI, abort it now. * The remote NPort is not going to ACC our FLOGI * if its already issuing a PLOGI for pt2pt mode. * This indicates our FLOGI was dropped; however, we * must have ACCed the remote NPorts FLOGI to us * to make it here.
*/ if (test_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag))
lpfc_els_abort_flogi(phba);
ed_tov = be32_to_cpu(sp->cmn.e_d_tov); if (sp->cmn.edtovResolution) { /* E_D_TOV ticks are in nanoseconds */
ed_tov = (phba->fc_edtov + 999999) / 1000000;
}
/* * For pt-to-pt, use the larger EDTOV * RATOV = 2 * EDTOV
*/ if (ed_tov > phba->fc_edtov)
phba->fc_edtov = ed_tov;
phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
/* * If there is an outstanding PLOGI issued, abort it before * sending ACC rsp for received PLOGI. If pending plogi * is not canceled here, the plogi will be rejected by * remote port and will be retried. On a configuration with * single discovery thread, this will cause a huge delay in * discovery. Also this will cause multiple state machines * running in parallel for this node. * This only applies to a fabric environment.
*/ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
test_bit(FC_FABRIC, &vport->fc_flag)) { /* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
if ((vport->port_type == LPFC_NPIV_PORT &&
vport->cfg_restrict_login)) {
/* no deferred ACC */
kfree(save_iocb);
/* This is an NPIV SLI4 instance that does not need to register * a default RPI.
*/ if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_mbox_rsrc_cleanup(phba, login_mbox,
MBOX_THD_UNLOCKED);
login_mbox = NULL;
} else { /* In order to preserve RPIs, we want to cleanup * the default RPI the firmware created to rcv * this ELS request. The only way to do this is * to register, then unregister the RPI.
*/
set_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag);
set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
}
/** * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object * * This routine is invoked to issue a completion to a rcv'ed * ADISC or PDISC after the paused RPI has been resumed.
**/ staticvoid
lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{ struct lpfc_vport *vport; struct lpfc_iocbq *elsiocb; struct lpfc_nodelist *ndlp;
uint32_t cmd;
/* * As soon as we send ACC, the remote NPort can * start sending us data. Thus, for SLI4 we must * resume the RPI before the ACC goes out.
*/ if (vport->phba->sli_rev == LPFC_SLI_REV4) { /* Don't resume an unregistered RPI - unnecessary * mailbox. Just send the ACC when the RPI is not * registered.
*/ if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
elsiocb = kmalloc(sizeof(*elsiocb), GFP_KERNEL); if (elsiocb) { /* Save info from cmd IOCB used in * rsp
*/
memcpy(elsiocb, cmdiocb, sizeof(*elsiocb));
elsiocb->drvrTimeout = cmd;
rc = lpfc_sli4_resume_rpi(ndlp,
lpfc_mbx_cmpl_resume_rpi,
elsiocb); if (rc)
kfree(elsiocb);
goto out;
}
}
}
if (cmd == ELS_CMD_ADISC) {
lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
} else {
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
ndlp, NULL);
}
out: /* If we are authenticated, move to the proper state. * It is possible an ADISC arrived and the remote nport * is already in MAPPED or UNMAPPED state. Catch this * condition and don't set the nlp_state again because * it causes an unnecessary transport unregister/register. * * Nodes marked for ADISC will move MAPPED or UNMAPPED state * after issuing ADISC
*/ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag))
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_MAPPED_NODE);
}
/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device.
*/
set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); if (els_cmd == ELS_CMD_PRLO)
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
/* This clause allows the initiator to ACC the LOGO back to the * Fabric Domain Controller. It does deliberately skip all other * steps because some fabrics send RDP requests after logging out * from the initiator.
*/ if (ndlp->nlp_type & NLP_FABRIC &&
((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) return 0;
/* Notify transport of connectivity loss to trigger cleanup. */ if (phba->nvmet_support &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
lpfc_nvmet_invalidate_host(phba, ndlp);
if (ndlp->nlp_DID == Fabric_DID) { if (vport->port_state <= LPFC_FDISC ||
test_bit(FC_PT2PT, &vport->fc_flag)) goto out;
lpfc_linkdown_port(vport);
set_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL;
i++) { if (!test_bit(FC_VPORT_LOGO_RCVD,
&vports[i]->fc_flag) &&
vports[i]->port_state > LPFC_FDISC) {
active_vlink_present = 1; break;
}
}
lpfc_destroy_vport_work_array(phba, vports);
}
/* * Don't re-instantiate if vport is marked for deletion. * If we are here first then vport_delete is going to wait * for discovery to complete.
*/ if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
active_vlink_present) { /* * If there are other active VLinks present, * re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
clear_bit(FC_LOGO_RCVD_DID_CHNG, &phba->pport->fc_flag);
lpfc_retry_pport_discovery(phba);
}
} else {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3203 LOGO recover nport x%06x state x%x " "ntype x%x fc_flag x%lx\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_type, vport->fc_flag);
/* Special cases for rports that recover post LOGO. */ if ((!(ndlp->nlp_type == NLP_FABRIC) &&
(ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) ||
test_bit(FC_PT2PT, &vport->fc_flag))) ||
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
mod_timer(&ndlp->nlp_delayfunc,
jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3204 Start nlpdelay on DID x%06x " "nflag x%lx lastels x%x ref cnt %u",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_last_elscmd,
kref_read(&ndlp->kref));
}
}
out: /* Unregister from backend, could have been skipped due to ADISC */
lpfc_nlp_unreg_node(vport, ndlp);
clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an * unreg_login, the driver waits so the ACC does not get aborted.
*/ return 0;
}
/* If this driver is in nvme target mode, set the ndlp's fc4 * type to NVME provided the PRLI response claims NVME FC4 * type. Target mode does not issue gft_id so doesn't get * the fc4 type set until now.
*/ if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
/* Fabric Controllers send FCP PRLI as an initiator but should * not get recognized as FCP type and registered with transport.
*/ if (npr->prliType == PRLI_FCP_TYPE &&
!(ndlp->nlp_type & NLP_FABRIC))
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
} if (rport) { /* We need to update the rport role values */
roles = FC_RPORT_ROLE_UNKNOWN; if (ndlp->nlp_type & NLP_FCP_INITIATOR)
roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (ndlp->nlp_type & NLP_FCP_TARGET)
roles |= FC_RPORT_ROLE_FCP_TARGET;
/** * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. * @phba : Pointer to lpfc_hba structure. * @vport: Pointer to lpfc_vport structure. * @ndlp: Pointer to lpfc_nodelist structure. * @rpi : rpi to be release. * * This function will send a unreg_login mailbox command to the firmware * to release a rpi.
**/ staticvoid
lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint16_t rpi)
{
LPFC_MBOXQ_t *pmb; int rc;
/* If there is already an UNREG in progress for this ndlp, * no need to queue up another one.
*/ if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1435 release_rpi SKIP UNREG x%x on " "NPort x%x deferred x%x flg x%lx " "Data: x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did,
ndlp->nlp_flag, ndlp); return;
}
static uint32_t
lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
{ /* This transition is only legal if we previously * rcv'ed a PLOGI. Since we don't want 2 discovery threads * working on the same NPortID, do nothing for this thread * to stop it.
*/ if (!test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0272 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%lx\n",
ndlp->nlp_DID, evt, ndlp->nlp_state,
ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state;
}
/* For a PLOGI, we only accept if our portname is less * than the remote portname.
*/
phba->fc_stat.elsLogiCol++;
port_cmp = memcmp(&vport->fc_portname, &sp->portName, sizeof(struct lpfc_name));
if (port_cmp >= 0) { /* Reject this request because the remote node will accept
ours */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
NULL);
} else { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) &&
vport->num_disc_nodes) {
clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) {
clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
}
} /* If our portname was less */
/* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */ if (vport->phba->sli_rev == LPFC_SLI_REV3)
ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag; /* software abort outstanding PLOGI */
lpfc_els_abort(vport->phba, ndlp);
if (mb->mbxStatus) { /* RegLogin failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0246 RegLogin failed Data: x%x x%x x%x x%x " "x%x\n",
did, mb->mbxStatus, vport->port_state,
mb->un.varRegLogin.vpi,
mb->un.varRegLogin.rpi); /* * If RegLogin failed due to lack of HBA resources do not * retry discovery.
*/ if (mb->mbxStatus == MBXERR_RPI_FULL) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state;
}
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
/* SLI4 ports have preallocated logical rpis. */ if (phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
/* Only if we are not a fabric nport do we issue PRLI */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "3066 RegLogin Complete on x%x x%x x%x\n",
did, ndlp->nlp_type, ndlp->nlp_fc4_type); if (!(ndlp->nlp_type & NLP_FABRIC) &&
(phba->nvmet_support == 0)) { /* The driver supports FCP and NVME concurrently. If the * ndlp's nlp_fc4_type is still zero, the driver doesn't * know what PRLI to send yet. Figure that out now and * call PRLI depending on the outcome.
*/ if (test_bit(FC_PT2PT, &vport->fc_flag)) { /* If we are pt2pt, there is no Fabric to determine * the FC4 type of the remote nport. So if NVME * is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP; if ((!test_bit(FC_PT2PT_NO_NVME, &vport->fc_flag)) &&
(vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME; /* We need to update the localport also */
lpfc_nvme_update_localport(vport);
}
} elseif (ndlp->nlp_fc4_type == 0) { /* If we are only configured for FCP, the driver * should just issue PRLI for FCP. Otherwise issue * GFT_ID to determine if remote port supports NVME.
*/ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0,
ndlp->nlp_DID); return ndlp->nlp_state;
}
ndlp->nlp_fc4_type = NLP_FC4_FCP;
}
/* If we are a target we won't immediately transition into PRLI, * so if REG_LOGIN already completed we don't need to ignore it.
*/ if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) ||
!vport->phba->nvmet_support)
set_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
/* This routine is envoked when we rcv a PRLO request from a nport * we are logged into. We should send back a PRLO rsp setting the * appropriate bits. * NEXT STATE = PRLI_ISSUE
*/ static uint32_t
lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
{ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp * format is different so NULL the two PRLI types so that the * driver correctly gets the correct context.
*/
npr = NULL;
nvpr = NULL;
temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ)
npr = (PRLI *) temp_ptr; elseif (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ)
nvpr = (struct lpfc_nvme_prli *) temp_ptr;
if (ulp_status) { if ((vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) { goto out;
}
/* Adjust the nlp_type accordingly if the PRLI failed */ if (npr)
ndlp->nlp_fc4_type &= ~NLP_FC4_FCP; if (nvpr)
ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
/* We can't set the DSM state till BOTH PRLIs complete */ goto out_err;
}
/* Target driver cannot solicit NVME FB. */ if (bf_get_be32(prli_tgt, nvpr)) { /* Complete the nvme target roles. The transport * needs to know if the rport is capable of
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Diese beiden folgenden Angebotsgruppen bietet das Unternehmen0.68Angebot
Wie Sie bei der Firma Beratungs- und Dienstleistungen beauftragen können
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.