SSL lpfc_hbadisc.c
Interaktion und PortierbarkeitC
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. *
*******************************************************************/
staticint
lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
{ if (ndlp->nlp_fc4_type ||
ndlp->nlp_type & NLP_FABRIC) return 1; return 0;
} /* The source of a terminate rport I/O is either a dev_loss_tmo * event or a call to fc_remove_host. While the rport should be * valid during these downcalls, the transport can call twice * in a single event. This routine provides somoe protection * as the NDLP isn't really free, just released to the pool.
*/ staticint
lpfc_rport_invalid(struct fc_rport *rport)
{ struct lpfc_rport_data *rdata; struct lpfc_nodelist *ndlp;
/* Don't schedule a worker thread event if the vport is going down. */ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
(phba->sli_rev == LPFC_SLI_REV4 &&
!test_bit(HBA_SETUP, &phba->hba_flag))) {
/* Only 1 thread can drop the initial node reference. * If not registered for NVME and NLP_DROPPED flag is * clear, remove the initial reference.
*/ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
drop_initial_node_ref = true;
/* The scsi_transport is done with the rport so lpfc cannot * call to unregister.
*/ if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
/* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node, * unregister calls were made to the scsi and nvme * transports and refcnt was already decremented. Clear * the NLP_XPT_REGD flag only if the NVME nrport is * confirmed unregistered.
*/ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
spin_unlock_irqrestore(&ndlp->lock, iflags);
if (drop_initial_node_ref)
lpfc_nlp_put(ndlp); return;
}
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return;
/* Ignore callback for a mismatched (stale) rport */ if (ndlp->rport != rport) {
lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE, "6788 fc rport mismatch: d_id x%06x ndlp x%px " "fc rport x%px node rport x%px state x%x " "refcnt %u\n",
ndlp->nlp_DID, ndlp, rport, ndlp->rport,
ndlp->nlp_state, kref_read(&ndlp->kref)); return;
}
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6789 rport name %llx != node port name %llx",
rport->port_name,
wwn_to_u64(ndlp->nlp_portname.u.wwn));
evtp = &ndlp->dev_loss_evt;
if (!list_empty(&evtp->evt_listp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6790 rport name %llx dev_loss_evt pending\n",
rport->port_name); return;
}
set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
spin_lock_irqsave(&ndlp->lock, iflags); /* If there is a PLOGI in progress, and we are in a * NLP_NPR_2B_DISC state, don't turn off the flag.
*/ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
/* * The backend does not expect any more calls associated with this * rport. Remove the association between rport and ndlp.
*/
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
ndlp->rport = NULL;
spin_unlock_irqrestore(&ndlp->lock, iflags);
if (phba->worker_thread) { /* We need to hold the node by incrementing the reference * count until this queued work is done
*/
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
spin_lock_irqsave(&phba->hbalock, iflags); if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_worker_wake_up(phba); return;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
} else {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3188 worker thread is stopped %s x%06x, " " rport x%px flg x%lx load_flag x%lx refcnt " "%d\n", __func__, ndlp->nlp_DID,
ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref)); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { /* Node is in dev loss. No further transaction. */
clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
}
}
/** * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport * @vport: Pointer to vport context object. * * This function checks for idle VMID entries related to a particular vport. If * found unused/idle, free them accordingly.
**/ staticvoid lpfc_check_inactive_vmid_one(struct lpfc_vport *vport)
{
u16 keep;
u32 difftime = 0, r, bucket;
u64 *lta; int cpu; struct lpfc_vmid *vmp;
write_lock(&vport->vmid_lock);
if (!vport->cur_vmid_cnt) goto out;
/* iterate through the table */
hash_for_each(vport->hash_table, bucket, vmp, hnode) {
keep = 0; if (vmp->flag & LPFC_VMID_REGISTERED) { /* check if the particular VMID is in use */ /* for all available per cpu variable */
for_each_possible_cpu(cpu) { /* if last access time is less than timeout */
lta = per_cpu_ptr(vmp->last_io_time, cpu); if (!lta) continue;
difftime = (jiffies) - (*lta); if ((vport->vmid_inactivity_timeout *
JIFFIES_PER_HR) > difftime) {
keep = 1; break;
}
}
/* if none of the cpus have been used by the vm, */ /* remove the entry if already registered */ if (!keep) { /* mark the entry for deregistration */
vmp->flag = LPFC_VMID_DE_REGISTER;
write_unlock(&vport->vmid_lock); if (vport->vmid_priority_tagging)
r = lpfc_vmid_uvem(vport, vmp, false); else
r = lpfc_vmid_cmd(vport,
SLI_CTAS_DAPP_IDENT,
vmp);
/* decrement number of active vms and mark */ /* entry in slot as free */
write_lock(&vport->vmid_lock); if (!r) { struct lpfc_vmid *ht = vmp;
/** * lpfc_check_inactive_vmid - VMID inactivity checker * @phba: Pointer to hba context object. * * This function is called from the worker thread to determine if an entry in * the VMID table can be released since there was no I/O activity seen from that * particular VM for the specified time. When this happens, the entry in the * table is released and also the resources on the switch cleared.
**/
/** * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss * @vport: Pointer to vport object. * @ndlp: Pointer to remote node object. * * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of * node during dev_loss_tmo processing, then this function restores the nlp_put * kref decrement from lpfc_dev_loss_tmo_handler.
**/ void
lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{ if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8438 Devloss timeout reversed on DID x%x " "refcnt %d ndlp %p flag x%lx " "port_state = x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
ndlp->nlp_flag, vport->port_state);
}
}
/** * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler * @ndlp: Pointer to remote node object. * * This function is called from the worker thread when devloss timeout timer * expires. For SLI4 host, this routine shall return 1 when at lease one * remote node, including this @ndlp, is still in use of FCF; otherwise, this * routine shall return 0 when there is no remote node is still in use of FCF * when devloss timeout happened to this @ndlp.
**/ staticint
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{ struct lpfc_vport *vport; struct lpfc_hba *phba;
uint8_t *name; int warn_on = 0; int fcf_inuse = 0; bool recovering = false; struct fc_vport *fc_vport = NULL; unsignedlong iflags;
vport = ndlp->vport;
name = (uint8_t *)&ndlp->nlp_portname;
phba = vport->phba;
if (phba->sli_rev == LPFC_SLI_REV4)
fcf_inuse = lpfc_fcf_inuse(phba);
/* Fabric nodes are done. */ if (ndlp->nlp_type & NLP_FABRIC) {
spin_lock_irqsave(&ndlp->lock, iflags);
/* The driver has to account for a race between any fabric * node that's in recovery when dev_loss_tmo expires. When this * happens, the driver has to allow node recovery.
*/ switch (ndlp->nlp_DID) { case Fabric_DID:
fc_vport = vport->fc_vport; if (fc_vport) { /* NPIV path. */ if (fc_vport->vport_state ==
FC_VPORT_INITIALIZING)
recovering = true;
} else { /* Physical port path. */ if (test_bit(HBA_FLOGI_OUTSTANDING,
&phba->hba_flag))
recovering = true;
} break; case Fabric_Cntl_DID: if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag))
recovering = true; break; case FDMI_DID:
fallthrough; case NameServer_DID: if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
recovering = true; break; default: /* Ensure the nlp_DID at least has the correct prefix. * The fabric domain controller's last three nibbles * vary so we handle it in the default case.
*/ if (ndlp->nlp_DID & Fabric_DID_MASK) { if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
recovering = true;
} break;
}
spin_unlock_irqrestore(&ndlp->lock, iflags);
/* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing * the following lpfc_nlp_put is necessary after fabric node is * recovered.
*/
clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); if (recovering) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY | LOG_NODE, "8436 Devloss timeout marked on " "DID x%x refcnt %d ndlp %p " "flag x%lx port_state = x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag,
vport->port_state);
set_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags); return fcf_inuse;
} elseif (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Fabric node fully recovered before this dev_loss_tmo * queue work is processed. Thus, ignore the * dev_loss_tmo event.
*/
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY | LOG_NODE, "8437 Devloss timeout ignored on " "DID x%x refcnt %d ndlp %p " "flag x%lx port_state = x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag,
vport->port_state); return fcf_inuse;
}
/* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
*/ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { return fcf_inuse;
}
if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
vports = lpfc_create_vport_work_array(phba); if (!vports) return;
for (i = 0; i <= phba->max_vports; i++) { if ((!vports[i]) && (i == 0))
vport = phba->pport; else
vport = vports[i]; if (!vport) break;
if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) { if (!lpfc_issue_els_qfpa(vport))
vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA;
}
}
lpfc_destroy_vport_work_array(phba, vports);
}
/** * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler * @phba: Pointer to hba context object. * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. * @nlp_did: remote node identifer with devloss timeout. * * This function is called from the worker thread after invoking devloss * timeout handler and releasing the reference count for the ndlp with * which the devloss timeout was handled for SLI4 host. For the devloss * timeout of the last remote node which had been in use of FCF, when this * routine is invoked, it shall be guaranteed that none of the remote are * in-use of FCF. When devloss timeout to the last remote using the FCF, * if the FIP engine is neither in FCF table scan process nor roundrobin * failover process, the in-use FCF shall be unregistered. If the FIP * engine is in FCF discovery process, the devloss timeout state shall * be set for either the FCF table scan process or roundrobin failover * process to unregister the in-use FCF.
**/ staticvoid
lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
uint32_t nlp_did)
{ /* If devloss timeout happened to a remote node when FCF had no * longer been in-use, do nothing.
*/ if (!fcf_inuse) return;
if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) &&
!lpfc_fcf_inuse(phba)) {
spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { if (test_and_set_bit(HBA_DEVLOSS_TMO,
&phba->hba_flag)) {
spin_unlock_irq(&phba->hbalock); return;
}
lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2847 Last remote node (x%x) using " "FCF devloss tmo\n", nlp_did);
} if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2868 Devloss tmo to FCF rediscovery " "in progress\n"); return;
}
spin_unlock_irq(&phba->hbalock); if (!test_bit(FCF_TS_INPROG, &phba->hba_flag) &&
!test_bit(FCF_RR_INPROG, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2869 Devloss tmo to idle FIP engine, " "unreg in-use FCF and rescan.\n"); /* Unregister in-use FCF and rescan */
lpfc_unregister_fcf_rescan(phba); return;
} if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2870 FCF table scan in progress\n"); if (test_bit(FCF_RR_INPROG, &phba->hba_flag))
lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2871 FLOGI roundrobin FCF failover " "in progress\n");
}
lpfc_unregister_unused_fcf(phba);
}
/** * lpfc_alloc_fast_evt - Allocates data structure for posting event * @phba: Pointer to hba context object. * * This function is called from the functions which need to post * events from interrupt context. This function allocates data * structure required for posting event. It also keeps track of * number of events pending and prevent event storm when there are * too many events.
**/ struct lpfc_fast_path_event *
lpfc_alloc_fast_evt(struct lpfc_hba *phba) { struct lpfc_fast_path_event *ret;
/* If there are lot of fast event do not exhaust memory due to this */ if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) return NULL;
ret = kzalloc(sizeof(struct lpfc_fast_path_event),
GFP_ATOMIC); if (ret) {
atomic_inc(&phba->fast_event_count);
INIT_LIST_HEAD(&ret->work_evt.evt_listp);
ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
} return ret;
}
/** * lpfc_free_fast_evt - Frees event data structure * @phba: Pointer to hba context object. * @evt: Event object which need to be freed. * * This function frees the data structure required for posting * events.
**/ void
lpfc_free_fast_evt(struct lpfc_hba *phba, struct lpfc_fast_path_event *evt) {
/** * lpfc_send_fastpath_evt - Posts events generated from fast path * @phba: Pointer to hba context object. * @evtp: Event data structure. * * This function is called from worker thread, when the interrupt * context need to post an event. This function posts the event * to fc transport netlink interface.
**/ staticvoid
lpfc_send_fastpath_evt(struct lpfc_hba *phba, struct lpfc_work_evt *evtp)
{ unsignedlong evt_category, evt_sub_category; struct lpfc_fast_path_event *fast_evt_data; char *evt_data;
uint32_t evt_data_size; struct Scsi_Host *shost;
/* First, try to post the next mailbox command to SLI4 device */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
lpfc_sli4_post_async_mbox(phba);
if (ha_copy & HA_ERATT) { /* Handle the error attention event */
lpfc_handle_eratt(phba);
if (phba->fw_dump_cmpl) {
complete(phba->fw_dump_cmpl);
phba->fw_dump_cmpl = NULL;
}
}
if (ha_copy & HA_MBATT)
lpfc_sli_handle_mb_event(phba);
/* Process SLI4 events */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { if (test_bit(HBA_RRQ_ACTIVE, &phba->hba_flag))
lpfc_handle_rrq_active(phba); if (test_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag))
lpfc_sli4_els_xri_abort_event_proc(phba); if (test_bit(ASYNC_EVENT, &phba->hba_flag))
lpfc_sli4_async_event_proc(phba); if (test_and_clear_bit(HBA_POST_RECEIVE_BUFFER,
&phba->hba_flag))
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
lpfc_sli4_fcf_redisc_event_proc(phba);
}
vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport
*/ if (vports[i] == NULL && i == 0)
vport = phba->pport; else
vport = vports[i]; if (vport == NULL) break;
spin_lock_irq(&vport->work_port_lock);
work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock); if (hba_pci_err) continue; if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport); if (work_port_events & WORKER_ELS_TMO)
lpfc_els_timeout_handler(vport); if (work_port_events & WORKER_HB_TMO)
lpfc_hb_timeout_handler(phba); if (work_port_events & WORKER_MBOX_TMO)
lpfc_mbox_timeout_handler(phba); if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
lpfc_unblock_fabric_iocbs(phba); if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
lpfc_ramp_down_queue_handler(phba); if (work_port_events & WORKER_DELAYED_DISC_TMO)
lpfc_delayed_disc_timeout_handler(vport);
}
lpfc_destroy_vport_work_array(phba, vports);
pring = lpfc_phba_elsring(phba);
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING); if (pring && (status & HA_RXMASK ||
pring->flag & LPFC_DEFERRED_RING_EVENT ||
test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))) { if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Preserve legacy behavior. */ if (!test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else { /* Driver could have abort request completed in queue * when link goes down. Allow for this transition.
*/ if (phba->link_state >= LPFC_LINK_DOWN ||
phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
(status &
HA_RXMASK));
}
} if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_drain_txq(phba); /* * Turn on Ring interrupts
*/ if (phba->sli_rev <= LPFC_SLI_REV3) {
spin_lock_irq(&phba->hbalock);
control = readl(phba->HCregaddr); if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
lpfc_debugfs_slow_ring_trc(phba, "WRK Enable ring: cntl:x%x hacopy:x%x",
control, ha_copy, 0);
/* * This is only called to handle FC worker events. Since this a rare * occurrence, we allocate a struct lpfc_work_evt structure here instead of * embedding it in the IOCB.
*/ int
lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
uint32_t evt)
{ struct lpfc_work_evt *evtp; unsignedlong flags;
/* * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will * be queued to worker thread for processing
*/
evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); if (!evtp) return 0;
/* Leave Fabric nodes alone on link down */ if ((phba->sli_rev < LPFC_SLI_REV4) &&
(!remove && ndlp->nlp_type & NLP_FABRIC)) continue;
/* Notify transport of connectivity loss to trigger cleanup. */ if (phba->nvmet_support &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
lpfc_nvmet_invalidate_host(phba, ndlp);
/* Decrement the held ndlp if there is a deferred flogi acc */ if (phba->defer_flogi_acc.flag) { if (phba->defer_flogi_acc.ndlp) {
lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
phba->defer_flogi_acc.ndlp = NULL;
}
}
phba->defer_flogi_acc.flag = false;
/* reinitialize initial HBA flag */
clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
clear_bit(HBA_RHBA_CMPL, &phba->hba_flag);
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { /* On Linkup its safe to clean up the ndlp * from Fabric connections.
*/ if (ndlp->nlp_DID != Fabric_DID)
lpfc_unreg_rpi(vport, ndlp);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
} elseif (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device is * marked for PLOGI.
*/
lpfc_unreg_rpi(vport, ndlp);
}
}
}
/* Unblock fabric iocbs if they are blocked */
clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
timer_delete_sync(&phba->fabric_block_timer);
vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
lpfc_linkup_port(vports[i]);
lpfc_destroy_vport_work_array(phba, vports);
/* Clear the pport flogi counter in case the link down was * absorbed without an ACQE. No lock here - in worker thread * and discovery is synchronized.
*/
spin_lock_irq(shost->host_lock);
phba->pport->rcv_flogi_cnt = 0;
spin_unlock_irq(shost->host_lock);
return 0;
}
/* * This routine handles processing a CLEAR_LA mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. SLI3 only.
*/ staticvoid
lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{ struct lpfc_vport *vport = pmb->vport; struct lpfc_sli *psli = &phba->sli;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t control;
/* Since we don't do discovery right now, turn these off here */
psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0320 CLEAR_LA mbxStatus error x%x hba " "state x%x\n",
mb->mbxStatus, vport->port_state);
phba->link_state = LPFC_HBA_ERROR; goto out;
}
if (vport->port_type == LPFC_PHYSICAL_PORT)
phba->link_state = LPFC_HBA_READY;
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool); return;
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock_irq(&phba->hbalock);
return;
}
void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{ struct lpfc_vport *vport = pmb->vport;
LPFC_MBOXQ_t *sparam_mb;
u16 status = pmb->u.mb.mbxStatus; int rc;
mempool_free(pmb, phba->mbox_mem_pool);
if (status) goto out;
/* don't perform discovery for SLI4 loopback diagnostic test */ if ((phba->sli_rev == LPFC_SLI_REV4) &&
!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
(phba->link_flag & LS_LOOPBACK_MODE)) return;
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
test_bit(FC_PUBLIC_LOOP, &vport->fc_flag) &&
!test_bit(FC_LBIT, &vport->fc_flag)) { /* Need to wait for FAN - use discovery timer * for timeout. port_state is identically * LPFC_LOCAL_CFG_LINK while waiting for FAN
*/
lpfc_set_disctmo(vport); return;
}
/* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl.
*/ if (vport->port_state != LPFC_FLOGI) { /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if * bb-credit recovery is in place.
*/ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
!(phba->link_flag & LS_LOOPBACK_MODE)) {
sparam_mb = mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL); if (!sparam_mb) goto sparam_out;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0200 CONFIG_LINK bad hba state x%x\n",
vport->port_state);
lpfc_issue_clear_la(phba, vport); return;
}
/** * lpfc_sli4_clear_fcf_rr_bmask * @phba: pointer to the struct lpfc_hba for this port. * This fucnction resets the round robin bit mask and clears the * fcf priority list. The list deletions are done while holding the * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared * from the lpfc_fcf_pri record.
**/ void
lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
{ struct lpfc_fcf_pri *fcf_pri; struct lpfc_fcf_pri *next_fcf_pri;
memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(fcf_pri, next_fcf_pri,
&phba->fcf.fcf_pri_list, list) {
list_del_init(&fcf_pri->list);
fcf_pri->fcf_rec.flag = 0;
}
spin_unlock_irq(&phba->hbalock);
} staticvoid
lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{ struct lpfc_vport *vport = mboxq->vport;
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2017 REG_FCFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus,
vport->port_state); goto fail_out;
}
/* Start FCoE discovery by sending a FLOGI. */
phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); /* Set the FCFI registered flag */
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_REGISTERED;
spin_unlock_irq(&phba->hbalock);
/* If there is a pending FCoE event, restart FCF table scan. */ if (!test_bit(FCF_RR_INPROG, &phba->hba_flag) &&
lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) goto fail_out;
/* Mark successful completion of FCF table scan */
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
spin_unlock_irq(&phba->hbalock);
clear_bit(FCF_TS_INPROG, &phba->hba_flag); if (vport->port_state != LPFC_FLOGI) {
set_bit(FCF_RR_INPROG, &phba->hba_flag);
lpfc_issue_init_vfi(vport);
} goto out;
/** * lpfc_fab_name_match - Check if the fcf fabric name match. * @fab_name: pointer to fabric name. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's fabric name with provided * fabric name. If the fabric name are identical this function * returns 1 else return 0.
**/ static uint32_t
lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
{ if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) return 0; if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) return 0; if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) return 0; if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) return 0; if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) return 0; if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) return 0; if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) return 0; if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) return 0; return 1;
}
/** * lpfc_sw_name_match - Check if the fcf switch name match. * @sw_name: pointer to switch name. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's switch name with provided * switch name. If the switch name are identical this function * returns 1 else return 0.
**/ static uint32_t
lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
{ if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) return 0; if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) return 0; if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) return 0; if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) return 0; if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) return 0; if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) return 0; if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) return 0; if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) return 0; return 1;
}
/** * lpfc_mac_addr_match - Check if the fcf mac address match. * @mac_addr: pointer to mac address. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's mac address with HBA's * FCF mac address. If the mac addresses are identical this function * returns 1 else return 0.
**/ static uint32_t
lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
{ if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) return 0; if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) return 0; if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) return 0; if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) return 0; if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) return 0; if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) return 0; return 1;
}
/** * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. * @phba: pointer to lpfc hba data structure. * @fcf_index: Index for the lpfc_fcf_record. * @new_fcf_record: pointer to hba fcf record. * * This routine updates the driver FCF priority record from the new HBA FCF * record. The hbalock is asserted held in the code path calling this * routine.
**/ staticvoid
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, struct fcf_record *new_fcf_record
)
{ struct lpfc_fcf_pri *fcf_pri;
/** * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. * @fcf_rec: pointer to driver fcf record. * @new_fcf_record: pointer to fcf record. * * This routine copies the FCF information from the FCF * record to lpfc_hba data structure.
**/ staticvoid
lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record)
{ /* Fabric name */
fcf_rec->fabric_name[0] =
bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
fcf_rec->fabric_name[1] =
bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
fcf_rec->fabric_name[2] =
bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
fcf_rec->fabric_name[3] =
bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
fcf_rec->fabric_name[4] =
bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
fcf_rec->fabric_name[5] =
bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
fcf_rec->fabric_name[6] =
bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
fcf_rec->fabric_name[7] =
bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); /* Mac address */
fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); /* FCF record index */
fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); /* FCF record priority */
fcf_rec->priority = new_fcf_record->fip_priority; /* Switch name */
fcf_rec->switch_name[0] =
bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
fcf_rec->switch_name[1] =
bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
fcf_rec->switch_name[2] =
bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
fcf_rec->switch_name[3] =
bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
fcf_rec->switch_name[4] =
bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
fcf_rec->switch_name[5] =
bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
fcf_rec->switch_name[6] =
bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
fcf_rec->switch_name[7] =
bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
}
/** * __lpfc_update_fcf_record - Update driver fcf record * @phba: pointer to lpfc hba data structure. * @fcf_rec: pointer to driver fcf record. * @new_fcf_record: pointer to hba fcf record. * @addr_mode: address mode to be set to the driver fcf record. * @vlan_id: vlan tag to be set to the driver fcf record. * @flag: flag bits to be set to the driver fcf record. * * This routine updates the driver FCF record from the new HBA FCF record * together with the address mode, vlan_id, and other informations. This * routine is called with the hbalock held.
**/ staticvoid
__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record, uint32_t addr_mode,
uint16_t vlan_id, uint32_t flag)
{
lockdep_assert_held(&phba->hbalock);
/* Copy the fields from the HBA's FCF record */
lpfc_copy_fcf_record(fcf_rec, new_fcf_record); /* Update other fields of driver FCF record */
fcf_rec->addr_mode = addr_mode;
fcf_rec->vlan_id = vlan_id;
fcf_rec->flag |= (flag | RECORD_VALID);
__lpfc_update_fcf_record_pri(phba,
bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
new_fcf_record);
}
/** * lpfc_register_fcf - Register the FCF with hba. * @phba: pointer to lpfc hba data structure. * * This routine issues a register fcfi mailbox command to register * the fcf with HBA.
**/ staticvoid
lpfc_register_fcf(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *fcf_mbxq; int rc;
spin_lock_irq(&phba->hbalock); /* If the FCF is not available do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
spin_unlock_irq(&phba->hbalock);
clear_bit(FCF_TS_INPROG, &phba->hba_flag);
clear_bit(FCF_RR_INPROG, &phba->hba_flag); return;
}
/* The FCF is already registered, start discovery */ if (phba->fcf.fcf_flag & FCF_REGISTERED) {
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
spin_unlock_irq(&phba->hbalock);
clear_bit(FCF_TS_INPROG, &phba->hba_flag); if (phba->pport->port_state != LPFC_FLOGI &&
test_bit(FC_FABRIC, &phba->pport->fc_flag)) {
set_bit(FCF_RR_INPROG, &phba->hba_flag);
lpfc_initial_flogi(phba->pport); return;
} return;
}
spin_unlock_irq(&phba->hbalock);
/** * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. * @phba: pointer to lpfc hba data structure. * @new_fcf_record: pointer to fcf record. * @boot_flag: Indicates if this record used by boot bios. * @addr_mode: The address mode to be used by this FCF * @vlan_id: The vlan id to be used as vlan tagging by this FCF. * * This routine compare the fcf record with connect list obtained from the * config region to decide if this FCF can be used for SAN discovery. It returns * 1 if this record can be used for SAN discovery else return zero. If this FCF * record can be used for SAN discovery, the boot_flag will indicate if this FCF * is used by boot bios and addr_mode will indicate the addressing mode to be * used for this FCF when the function returns. * If the FCF record need to be used with a particular vlan id, the vlan is * set in the vlan_id on return of the function. If not VLAN tagging need to * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
**/ staticint
lpfc_match_fcf_conn_list(struct lpfc_hba *phba, struct fcf_record *new_fcf_record,
uint32_t *boot_flag, uint32_t *addr_mode,
uint16_t *vlan_id)
{ struct lpfc_fcf_conn_entry *conn_entry; int i, j, fcf_vlan_id = 0;
/* Find the lowest VLAN id in the FCF record */ for (i = 0; i < 512; i++) { if (new_fcf_record->vlan_bitmap[i]) {
fcf_vlan_id = i * 8;
j = 0; while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
j++;
fcf_vlan_id++;
} break;
}
}
/* FCF not valid/available or solicitation in progress */ if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
!bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) return 0;
/* * If there are no FCF connection table entry, driver connect to all * FCFs.
*/ if (list_empty(&phba->fcf_conn_rec_list)) {
*boot_flag = 0;
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record);
/* * When there are no FCF connect entries, use driver's default * addressing mode - FPMA.
*/ if (*addr_mode & LPFC_FCF_FPMA)
*addr_mode = LPFC_FCF_FPMA;
/* If FCF record report a vlan id use that vlan id */ if (fcf_vlan_id)
*vlan_id = fcf_vlan_id; else
*vlan_id = LPFC_FCOE_NULL_VID; return 1;
}
list_for_each_entry(conn_entry,
&phba->fcf_conn_rec_list, list) { if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) continue;
if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
new_fcf_record)) continue; if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
!lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
new_fcf_record)) continue; if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { /* * If the vlan bit map does not have the bit set for the * vlan id to be used, then it is not a match.
*/ if (!(new_fcf_record->vlan_bitmap
[conn_entry->conn_rec.vlan_tag / 8] &
(1 << (conn_entry->conn_rec.vlan_tag % 8)))) continue;
}
/* * If connection record does not support any addressing mode, * skip the FCF record.
*/ if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
& (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) continue;
/* * Check if the connection record specifies a required * addressing mode.
*/ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
/* * If SPMA required but FCF not support this continue.
*/ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
!(bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record) & LPFC_FCF_SPMA)) continue;
/* * If FPMA required but FCF not support this continue.
*/ if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
!(bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record) & LPFC_FCF_FPMA)) continue;
}
/* * This fcf record matches filtering criteria.
*/ if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
*boot_flag = 1; else
*boot_flag = 0;
/* * If user did not specify any addressing mode, or if the * preferred addressing mode specified by user is not supported * by FCF, allow fabric to pick the addressing mode.
*/
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record); /* * If the user specified a required address mode, assign that * address mode
*/ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
(!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
*addr_mode = (conn_entry->conn_rec.flags &
FCFCNCT_AM_SPMA) ?
LPFC_FCF_SPMA : LPFC_FCF_FPMA; /* * If the user specified a preferred address mode, use the * addr mode only if FCF support the addr_mode.
*/ elseif ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
(*addr_mode & LPFC_FCF_SPMA))
*addr_mode = LPFC_FCF_SPMA; elseif ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
(*addr_mode & LPFC_FCF_FPMA))
*addr_mode = LPFC_FCF_FPMA;
/* If matching connect list has a vlan id, use it */ if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
*vlan_id = conn_entry->conn_rec.vlan_tag; /* * If no vlan id is specified in connect list, use the vlan id * in the FCF record
*/ elseif (fcf_vlan_id)
*vlan_id = fcf_vlan_id; else
*vlan_id = LPFC_FCOE_NULL_VID;
return 1;
}
return 0;
}
/** * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. * @phba: pointer to lpfc hba data structure. * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. * * This function check if there is any fcoe event pending while driver * scan FCF entries. If there is any pending event, it will restart the * FCF saning and return 1 else return 0.
*/ int
lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
{ /* * If the Link is up and no FCoE events while in the * FCF discovery, no need to restart FCF discovery.
*/ if ((phba->link_state >= LPFC_LINK_UP) &&
(phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) return 0;
lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2768 Pending link or FCF event during current " "handling of the previous event: link_state:x%x, " "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
phba->fcoe_eventtag);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.