static uint qedf_fipvlan_retries = 60;
module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt " "before giving up (default 60)");
static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails " "(default 1002).");
staticint qedf_default_prio = -1;
module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE" " traffic (value between 0 and 7, default 3).");
uint qedf_dump_frames;
module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames " "(default off)");
static uint qedf_queue_depth;
module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered " "by the qedf driver. Default is 0 (use OS default).");
static uint qedf_max_lun = MAX_FIBRE_LUNS;
module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver " "supports. (default 0xffffffff)");
uint qedf_link_down_tmo;
module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the " "link is down by N seconds.");
staticbool qedf_dcbx_no_wait;
module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start " "sending FIP VLAN requests on link up (Default: off).");
static uint qedf_dp_module;
module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed " "qed module during probe.");
static uint qedf_dp_level = QED_LEVEL_NOTICE;
module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " "during probe (0-3: 0 more verbose).");
/* Returns true if we have a valid vlan, false otherwise */ staticbool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
{
while (qedf->fipvlan_retries--) { /* This is to catch if link goes down during fipvlan retries */ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n"); returnfalse;
}
if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); returnfalse;
}
if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
rc = qedf_initiate_fipvlan_req(qedf); if (rc) return;
if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Link is down, resetting vlan_id.\n");
qedf->vlan_id = 0; return;
}
/* * If we get here then we never received a repsonse to our * fip vlan request so set the vlan_id to the default and * tell FCoE that the link is up
*/
QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " "response, falling back to default VLAN %d.\n",
qedf_fallback_vlan);
qedf_set_vlan_id(qedf, qedf_fallback_vlan);
/* * Zero out data_src_addr so we'll update it with the new * lport port_id
*/
eth_zero_addr(qedf->data_src_addr);
fcoe_ctlr_link_up(&qedf->ctlr);
} elseif (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { /* * If we hit here and link_down_tmo_valid is still 1 it means * that link_down_tmo timed out so set it to 0 to make sure any * other readers have accurate state.
*/
atomic_set(&qedf->link_down_tmo_valid, 0);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Calling fcoe_ctlr_link_down().\n");
fcoe_ctlr_link_down(&qedf->ctlr); if (qedf_wait_for_upload(qedf) == false)
QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); /* Reset the number of FIP VLAN retries */
qedf->fipvlan_retries = qedf_fipvlan_retries;
}
}
/* Get granted MAC address from FIP FLOGI payload */
granted_mac = fr_cb(fp)->granted_mac;
/* * We set the source MAC for FCoE traffic based on the Granted MAC * address from the switch. * * If granted_mac is non-zero, we used that. * If the granted_mac is zeroed out, created the FCoE MAC based on * the sel_fcf->fc_map and the d_id fo the FLOGI frame. * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the * d_id of the FLOGI frame.
*/ if (!is_zero_ether_addr(granted_mac)) {
ether_addr_copy(qedf->data_src_addr, granted_mac);
method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
} elseif (qedf->ctlr.sel_fcf->fc_map != 0) {
hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
qedf->data_src_addr[0] = fc_map[0];
qedf->data_src_addr[1] = fc_map[1];
qedf->data_src_addr[2] = fc_map[2];
qedf->data_src_addr[3] = fh->fh_d_id[0];
qedf->data_src_addr[4] = fh->fh_d_id[1];
qedf->data_src_addr[5] = fh->fh_d_id[2];
method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
} else {
fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
}
if (!qedf) {
QEDF_ERR(NULL, "qedf is NULL.\n"); return;
}
/* * If ERR_PTR is set then don't try to stat anything as it will cause * a crash when we access fp.
*/ if (IS_ERR(fp)) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "fp has IS_ERR() set.\n"); goto skip_stat;
}
/* Log stats for FLOGI reject */ if (fc_frame_payload_op(fp) == ELS_LS_RJT)
qedf->flogi_failed++; elseif (fc_frame_payload_op(fp) == ELS_LS_ACC) { /* Set the source MAC we will use for FCoE traffic */
qedf_set_data_src_addr(qedf, fp);
qedf->flogi_pending = 0;
}
/* Complete flogi_compl so we can proceed to sending ADISCs */
complete(&qedf->flogi_compl);
/* * Intercept FLOGI for statistic purposes. Note we use the resp * callback to tell if this is really a flogi.
*/ if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
qedf->flogi_pending++;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending FLOGI to reestablish session with switch.\n");
lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
init_completion(&qedf->flogi_compl);
return 0;
}
/* * This function is called if link_down_tmo is in use. If we get a link up and * link_down_tmo has not expired then use just FLOGI/ADISC to recover our * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
*/ staticvoid qedf_link_recovery(struct work_struct *work)
{ struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, link_recovery.work); struct fc_lport *lport = qedf->lport; struct fc_rport_priv *rdata; bool rc; int retries = 30; int rval, i; struct list_head rdata_login_list;
INIT_LIST_HEAD(&rdata_login_list);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Link down tmo did not expire.\n");
/* * Essentially reset the fcoe_ctlr here without affecting the state * of the libfc structs.
*/
qedf->ctlr.state = FIP_ST_LINK_WAIT;
fcoe_ctlr_link_down(&qedf->ctlr);
/* * Bring the link up before we send the fipvlan request so libfcoe * can select a new fcf in parallel
*/
fcoe_ctlr_link_up(&qedf->ctlr);
/* Since the link when down and up to verify which vlan we're on */
qedf->fipvlan_retries = qedf_fipvlan_retries;
rc = qedf_initiate_fipvlan_req(qedf); /* If getting the VLAN fails, set the VLAN to the fallback one */ if (!rc)
qedf_set_vlan_id(qedf, qedf_fallback_vlan);
/* * We need to wait for an FCF to be selected due to the * fcoe_ctlr_link_up other the FLOGI will be rejected.
*/ while (retries > 0) { if (qedf->ctlr.sel_fcf) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "FCF reselected, proceeding with FLOGI.\n"); break;
}
msleep(500);
retries--;
}
if (retries < 1) {
QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for " "FCF selection.\n"); return;
}
rval = qedf_send_flogi(qedf); if (rval) return;
/* Wait for FLOGI completion before proceeding with sending ADISCs */
i = wait_for_completion_timeout(&qedf->flogi_compl,
qedf->lport->r_a_tov); if (i == 0) {
QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n"); return;
}
/* * Call lport->tt.rport_login which will cause libfc to send an * ADISC since the rport is in state ready.
*/
mutex_lock(&lport->disc.disc_mutex);
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { if (kref_get_unless_zero(&rdata->kref)) {
fc_rport_login(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
mutex_unlock(&lport->disc.disc_mutex);
}
/* Get the latest status of the link */
qed_ops->common->get_link(qedf->cdev, &link);
if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
QEDF_ERR(&qedf->dbg_ctx, "Ignore link update, driver getting unload.\n"); return;
}
if (link.link_up) { if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
qedf_update_link_speed(qedf, &link); else
QEDF_ERR(&qedf->dbg_ctx, "Ignore bw update, link is down.\n");
} else {
QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
}
}
/* * Prevent race where we're removing the module and we get link update * for qed.
*/ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
QEDF_ERR(&qedf->dbg_ctx, "Ignore link update, driver getting unload.\n"); return;
}
if (link->link_up) { if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC, "Ignoring link up event as link is already up.\n"); return;
}
QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
link->speed / 1000);
/* Cancel any pending link down work */
cancel_delayed_work(&qedf->link_update);
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); /* * Flag that we're waiting for the link to come back up before * informing the fcoe layer of the event.
*/ if (qedf_link_down_tmo > 0) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Starting link down tmo.\n");
atomic_set(&qedf->link_down_tmo_valid, 1);
}
qedf->vlan_id = 0;
qedf_update_link_speed(qedf, link);
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
qedf_link_down_tmo * HZ);
}
}
if (get->operational.enabled && get->operational.valid) { /* If DCBX was already negotiated on link up then just exit */ if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "DCBX already set on link up.\n"); return;
}
atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
/* * Set the 8021q priority in the following manner: * * 1. If a modparam is set use that * 2. If the value is not between 0..7 use the default * 3. Use the priority we get from the DCBX app tag
*/
tmp_prio = get->operational.app_prio.fcoe; if (qedf_default_prio > -1)
qedf->prio = qedf_default_prio; elseif (tmp_prio > 7) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "FIP/FCoE prio %d out of range, setting to %d.\n",
tmp_prio, QEDF_DEFAULT_PRIO);
qedf->prio = QEDF_DEFAULT_PRIO;
} else
qedf->prio = tmp_prio;
/* If we got a valid io_req, confirm it belongs to this sc_cmd. */ if (!rval || io_req->sc_cmd != sc_cmd) {
QEDF_ERR(&qedf->dbg_ctx, "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
if (qedf->stop_io_on_error) {
qedf_stop_all_io(qedf);
rc = SUCCESS; goto drop_rdata_kref;
}
init_completion(&io_req->abts_done);
rval = qedf_initiate_abts(io_req, true); if (rval) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); /* * If we fail to queue the ABTS then return this command to * the SCSI layer as it will own and free the xid
*/
rc = SUCCESS;
qedf_scsi_done(qedf, io_req, DID_ERROR); goto drop_rdata_kref;
}
wait_for_completion(&io_req->abts_done);
if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { /* * If we get a reponse to the abort this is success from * the perspective that all references to the command have * been removed from the driver and firmware
*/
rc = SUCCESS;
} else { /* If the abort and cleanup failed then return a failure */
rc = FAILED;
}
while (wait_cnt--) { if (atomic_read(&qedf->num_offloads))
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Waiting for all uploads to complete num_offloads = 0x%x.\n",
atomic_read(&qedf->num_offloads)); else returntrue;
msleep(500);
}
rcu_read_lock();
list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { if (test_bit(QEDF_RPORT_SESSION_READY,
&fcport->flags)) { if (fcport->rdata)
QEDF_ERR(&qedf->dbg_ctx, "Waiting for fcport %p portid=%06x.\n",
fcport, fcport->rdata->ids.port_id);
} else {
QEDF_ERR(&qedf->dbg_ctx, "Waiting for fcport %p.\n", fcport);
}
}
rcu_read_unlock(); returnfalse;
}
/* Performs soft reset of qedf_ctx by simulating a link down/up */ void qedf_ctx_soft_reset(struct fc_lport *lport)
{ struct qedf_ctx *qedf; struct qed_link_output if_link;
qedf = lport_priv(lport);
if (lport->vport) {
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
printk_ratelimited("Cannot issue host reset on NPIV port.\n"); return;
}
qedf->flogi_pending = 0; /* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Queuing link down work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
if (qedf_wait_for_upload(qedf) == false) {
QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
WARN_ON(atomic_read(&qedf->num_offloads));
}
/* Before setting link up query physical link state */
qed_ops->common->get_link(qedf->cdev, &if_link); /* Bail if the physical link is not up */ if (!if_link.link_up) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Physical link is not up.\n");
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); return;
} /* Flush and wait to make sure link down is processed */
flush_delayed_work(&qedf->link_update);
msleep(500);
atomic_set(&qedf->link_state, QEDF_LINK_UP);
qedf->vlan_id = 0;
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Queue link up work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
}
/* Reset the host by gracefully logging out and then logging back in */ staticint qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
{ struct fc_lport *lport; struct qedf_ctx *qedf;
/* Return NULL to caller to let them know fcport was not found */ return NULL;
}
/* Transmits an ELS frame over an offloaded session */ staticint qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
{ struct fc_frame_header *fh; int rc = 0;
/* Filter out traffic to other NPIV ports on the same host */ if (lport->vport)
base_lport = shost_priv(vport_to_shost(lport->vport)); else
base_lport = lport;
/* Flag if the destination is the base port */ if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
vn_port = base_lport;
} else { /* Got through the list of vports attached to the base_lport * and see if we have a match with the destination address.
*/
list_for_each_entry(tmp_lport, &base_lport->vports, list) { if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
vn_port = tmp_lport; break;
}
}
} if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) { struct fc_rport_priv *rdata = NULL;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
kfree_skb(skb);
rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); if (rdata) {
rdata->retries = lport->max_rport_retry_count;
kref_put(&rdata->kref, fc_rport_destroy);
} return -EINVAL;
} /* End NPIV filtering */
if (!qedf->ctlr.sel_fcf) {
kfree_skb(skb); return 0;
}
if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
kfree_skb(skb); return 0;
}
if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
kfree_skb(skb); return 0;
}
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) return 0;
}
/* Check to see if this needs to be sent on an offloaded session */
fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
rc = qedf_xmit_l2_frame(fcport, fp); /* * If the frame was successfully sent over the middle path * then do not try to also send it over the LL2 path
*/ if (rc) return 0;
}
/* * Add VLAN tag to non-offload FCoE frame based on current stored VLAN * for FIP/FCoE traffic.
*/
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE); if (qedf->ctlr.map_dest)
fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else /* insert GW address */
ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
/* Set the source MAC address */
ether_addr_copy(eh->h_source, qedf->data_src_addr);
hp = (struct fcoe_hdr *)(eh + 1);
memset(hp, 0, sizeof(*hp)); if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
hp->fcoe_sof = sof;
conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
/* Set VLAN data */
conn_info.vlan_tag = qedf->vlan_id <<
FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
conn_info.vlan_tag |=
qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
/* Term params needs to be a DMA coherent buffer as qed shared the * physical DMA address with the firmware. The buffer may be used in * the receive path so we may eventually have to move this.
*/
term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
&term_params_dma, GFP_KERNEL); if (!term_params) return;
if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
QEDF_ERR(&(qedf->dbg_ctx), "Not offloading " "portid=0x%x as max number of offloaded sessions " "reached.\n", rdata->ids.port_id); return;
}
/* * Don't try to offload the session again. Can happen when we * get an ADISC
*/ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_WARN(&(qedf->dbg_ctx), "Session already " "offloaded, portid=0x%x.\n",
rdata->ids.port_id); return;
}
if (rport->port_id == FC_FID_DIR_SERV) { /* * qedf_rport structure doesn't exist for * directory server. * We should not come here, as lport will * take care of fabric login
*/
QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not " "exist for dir server port_id=%x\n",
rdata->ids.port_id); break;
}
if (rdata->spp_type != FC_TYPE_FCP) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Not offloading since spp type isn't FCP\n"); break;
} if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Not FCP target so not offloading\n"); break;
}
/* Initial reference held on entry, so this can't fail */
kref_get(&rdata->kref);
fcport->rdata = rdata;
fcport->rport = rport;
/* Add fcport to list of qedf_ctx list of offloaded ports */
spin_lock_irqsave(&qedf->hba_lock, flags);
list_add_rcu(&fcport->peers, &qedf->fcports);
spin_unlock_irqrestore(&qedf->hba_lock, flags);
/* * Set the session ready bit to let everyone know that this * connection is ready for I/O
*/
set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
atomic_inc(&qedf->num_offloads);
break; case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP:
port_id = rdata->ids.port_id; if (port_id == FC_FID_DIR_SERV) break;
if (rdata->spp_type != FC_TYPE_FCP) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "No action since spp type isn't FCP\n"); break;
} if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Not FCP target so no action\n"); break;
}
if (!rport) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "port_id=%x - rport notcreated Yet!!\n", port_id); break;
}
rp = rport->dd_data; /* * Perform session upload. Note that rdata->peers is already * removed from disc->rports list before we get this event.
*/
fcport = (struct qedf_rport *)&rp[1];
spin_lock_irqsave(&fcport->rport_lock, flags); /* Only free this fcport if it is offloaded already */ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
!test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
&fcport->flags)) {
set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
&fcport->flags);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
qedf_cleanup_fcport(qedf, fcport); /* * Remove fcport to list of qedf_ctx list of offloaded * ports
*/
spin_lock_irqsave(&qedf->hba_lock, flags);
list_del_rcu(&fcport->peers);
spin_unlock_irqrestore(&qedf->hba_lock, flags);
staticvoid qedf_abort_io(struct fc_lport *lport)
{ /* NO-OP but need to fill in the template */
}
staticvoid qedf_fcp_cleanup(struct fc_lport *lport)
{ /* * NO-OP but need to fill in template to prevent a NULL * function pointer dereference during link down. I/Os * will be flushed when port is uploaded.
*/
}
/* * fdmi_enabled needs to be set for libfc * to execute FDMI registration
*/
lport->fdmi_enabled = 1;
/* * Setup the necessary fc_host attributes to that will be used to fill * in the FDMI information.
*/
/* Get the PCI-e Device Serial Number Capability */
pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN); if (pos) {
pos += 4; for (i = 0; i < 8; i++)
pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " "because link is not up.\n"); return -EIO;
}
vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx)); if (!vn_port) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " "for vport.\n"); return -ENOMEM;
}
/* Copy some fields from base_qedf */
vport_qedf = lport_priv(vn_port);
memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
/* Set qedf data specific to this vport */
vport_qedf->lport = vn_port; /* Use same hba_lock as base_qedf */
vport_qedf->hba_lock = base_qedf->hba_lock;
vport_qedf->pdev = base_qedf->pdev;
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
init_completion(&vport_qedf->flogi_compl);
INIT_LIST_HEAD(&vport_qedf->fcports);
INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
rc = qedf_vport_libfc_config(vport, vn_port); if (rc) {
QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " "for lport stats.\n"); goto err;
}
/* Detach from scsi-ml */
fc_remove_host(vn_port->host);
scsi_remove_host(vn_port->host);
/* * Only try to release the exchange manager if the vn_port * configuration is complete.
*/ if (vn_port->state == LPORT_ST_READY)
fc_exch_mgr_free(vn_port);
/* Free memory used by statistical counters */
fc_lport_free_stats(vn_port);
/* * During removal we need to wait for all the vports associated with a port * to be destroyed so we avoid a race condition where libfc is still trying * to reap vports while the driver remove function has already reaped the * driver contexts associated with the physical port.
*/ staticvoid qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
{ struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, "Entered.\n"); while (fc_host->npiv_vports_inuse > 0) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, "Waiting for all vports to be reaped.\n");
msleep(1000);
}
}
/** * qedf_fcoe_reset - Resets the fcoe * * @shost: shost the reset is from * * Returns: always 0
*/ staticint qedf_fcoe_reset(struct Scsi_Host *shost)
{ struct fc_lport *lport = shost_priv(shost);
/* We don't collect offload stats for specific NPIV ports */ if (lport->vport) goto out;
fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); if (!fw_fcoe_stats) {
QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " "fw_fcoe_stats.\n"); goto out;
}
mutex_lock(&qedf->stats_mutex);
/* Query firmware for offload stats */
qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
/* * The expectation is that we add our offload stats to the stats * being maintained by libfc each time the fc_get_host_status callback * is invoked. The additions are not carried over for each call to * the fc_get_host_stats callback.
*/
qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
qedf_stats->fcp_input_megabytes +=
do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
qedf_stats->fcp_output_megabytes +=
do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
qedf_stats->invalid_crc_count +=
fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
qedf_stats->dumped_frames =
fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
qedf_stats->error_frames +=
fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
qedf_stats->fcp_input_requests += qedf->input_requests;
qedf_stats->fcp_output_requests += qedf->output_requests;
qedf_stats->fcp_control_requests += qedf->control_requests;
qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
/* Get the pointer to the global CQ this completion is on */
que = qedf->global_queues[fp->sb_id];
/* Be sure all responses have been written to PI */
rmb();
/* Get the current firmware producer index */
prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
return (que->cq_prod_idx != prod_idx);
}
/* * Interrupt handler code.
*/
/* Process completion queue and copy CQE contents for deferred processesing * * Return true if we should wake the I/O thread, false if not.
*/ staticbool qedf_process_completions(struct qedf_fastpath *fp)
{ struct qedf_ctx *qedf = fp->qedf; struct qed_sb_info *sb_info = fp->sb_info; struct status_block *sb = sb_info->sb_virt; struct global_queue *que;
u16 prod_idx; struct fcoe_cqe *cqe; struct qedf_io_work *io_work; unsignedint cpu; struct qedf_ioreq *io_req = NULL;
u16 xid;
u16 new_cqes;
u32 comp_type;
/* Get the current firmware producer index */
prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
/* Get the pointer to the global CQ this completion is on */
que = qedf->global_queues[fp->sb_id];
/* Calculate the amount of new elements since last processing */
new_cqes = (prod_idx >= que->cq_prod_idx) ?
(prod_idx - que->cq_prod_idx) :
0x10000 - que->cq_prod_idx + prod_idx;
/* Save producer index */
que->cq_prod_idx = prod_idx;
while (new_cqes) {
fp->completions++;
cqe = &que->cq[que->cq_cons_idx];
/* * Process unsolicited CQEs directly in the interrupt handler * sine we need the fastpath ID
*/ if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, "Unsolicated CQE.\n");
qedf_process_unsol_compl(qedf, fp->sb_id, cqe); /* * Don't add a work list item. Increment consumer * consumer index and move on.
*/ goto inc_idx;
}
/* * Figure out which percpu thread we should queue this I/O * on.
*/ if (!io_req) /* If there is not io_req associated with this CQE * just queue it on CPU 0
*/
cpu = 0; else {
cpu = io_req->cpu;
io_req->int_cpu = smp_processor_id();
}
io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); if (!io_work) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " "work for I/O completion.\n"); continue;
}
memset(io_work, 0, sizeof(struct qedf_io_work));
INIT_WORK(&io_work->work, qedf_fp_io_handler);
/* Copy contents of CQE for deferred processing */
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.