/* * if delete_sa_index is valid then no one has serviced this * delayed delete
*/
spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
/* * delete_sa_index is invalidated when we find the new sa_index in * the incoming data stream. If it is not invalidated then we are * still looking for the new sa_index because there is no I/O and we * need to just force the rx delete and move on. Otherwise * we could get another rekey which will result in an error 66.
*/ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
uint16_t delete_sa_index = edif_entry->delete_sa_index;
} else {
ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: sa_ctl not found for delete_sa_index: %d\n",
__func__, edif_entry->delete_sa_index);
}
} else {
spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
}
}
/* * create a new list entry for this nport handle and * add an sa_update index to the list - called for sa_update
*/ staticint qla_edif_list_add_sa_update_index(fc_port_t *fcport,
uint16_t sa_index, uint16_t handle)
{ struct edif_list_entry *entry; unsignedlong flags = 0;
/* if the entry exists, then just update the sa_index */
entry = qla_edif_list_find_sa_index(fcport, handle); if (entry) {
entry->update_sa_index = sa_index;
entry->count = 0; return 0;
}
/* * This is the normal path - there should be no existing entry * when update is called. The exception is at startup * when update is called for the first two sa_indexes * followed by a delete of the first sa_index
*/
entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC); if (!entry) return -ENOMEM;
/* remove an entry from the list */ staticvoid qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
{ unsignedlong flags = 0;
/** * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and * wait for tear down to complete. In N2N topology, there is only one * session being active in tracking the remote device. * @vha: host adapter pointer * return code: 0 - found the session and completed the tear down. * 1 - timeout occurred. Caller to use link bounce to reset.
*/ staticint qla_delete_n2n_sess_and_wait(scsi_qla_host_t *vha)
{ struct fc_port *fcport; int rc = -EIO;
ulong expire = jiffies + 23 * HZ;
if (!N2N_TOPO(vha->hw)) return 0;
fcport = NULL;
list_for_each_entry(fcport, &vha->vp_fcports, list) { if (!fcport->n2n_flag) continue;
if (DBELL_INACTIVE(vha)) { /* mark doorbell as active since an app is now present */
vha->e_dbell.db_flags |= EDB_ACTIVE;
} else { goto out;
}
if (N2N_TOPO(vha->hw)) {
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
fcport->n2n_link_reset_cnt = 0;
if (vha->hw->flags.n2n_fw_acc_sec) { bool link_bounce = false; /* * While authentication app was not running, remote device * could still try to login with this local port. Let's * reset the session, reconnect and re-authenticate.
*/ if (qla_delete_n2n_sess_and_wait(vha))
link_bounce = true;
/* bounce the link to start login */ if (!vha->hw->flags.n2n_bigger || link_bounce) {
set_bit(N2N_LINK_RESET, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
} else {
qla2x00_wait_for_hba_online(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_hba_online(vha);
}
} else {
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_edif, vha, 0x2058, "FCSP - nn %8phN pn %8phN portid=%06x.\n",
fcport->node_name, fcport->port_name,
fcport->d_id.b24);
ql_dbg(ql_dbg_edif, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC " "loop_id %#04x s_id %06x logout %d " "keep %d els_logo %d disc state %d auth state %d" "stop state %d\n",
__func__, fcport->se_sess, fcport,
fcport->port_name, fcport->loop_id,
fcport->d_id.b24, fcport->logout_on_delete,
fcport->keep_nport_handle, fcport->send_els_logo,
fcport->disc_state, fcport->edif.auth_state,
fcport->edif.app_stop);
if (atomic_read(&vha->loop_state) == LOOP_DOWN) break;
if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { /* mark as active since an app is now present */
vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
} else {
ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
__func__);
}
if (!fcport) {
SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto errstate_exit;
}
/* * if port is online then this is a REKEY operation * Only do sa update checking
*/ if (atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Skipping PRLI complete based on rekey\n", __func__);
appplogireply.prli_status = 1;
SET_DID_STATUS(bsg_reply->result, DID_OK);
qla_edif_app_chk_sa_update(vha, fcport, &appplogireply); goto errstate_exit;
}
/* make sure in AUTH_PENDING or else reject */ if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
ql_dbg(ql_dbg_edif, vha, 0x911e, "%s wwpn %8phC is not in auth pending state (%x)\n",
__func__, fcport->port_name, fcport->disc_state);
SET_DID_STATUS(bsg_reply->result, DID_OK);
appplogireply.prli_status = 0; goto errstate_exit;
}
SET_DID_STATUS(bsg_reply->result, DID_OK);
appplogireply.prli_status = 1;
fcport->edif.authok = 1; if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
ql_dbg(ql_dbg_edif, vha, 0x911e, "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
__func__, fcport->port_name, fcport->edif.tx_sa_set,
fcport->edif.rx_sa_set);
SET_DID_STATUS(bsg_reply->result, DID_OK);
appplogireply.prli_status = 0; goto errstate_exit;
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, &app_req, sizeof(struct app_sinfo_req)); if (app_req.num_ports == 0) {
ql_dbg(ql_dbg_async, vha, 0x911d, "%s app did not indicate number of ports to return\n",
__func__);
SET_DID_STATUS(bsg_reply->result, DID_ERROR);
rval = -1;
}
staticstruct edif_sa_ctl *
qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame, int dir)
{ struct edif_sa_ctl *sa_ctl; struct qla_sa_update_frame *sap; int index = sa_frame->fast_sa_index; unsignedlong flags = 0;
sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL); if (!sa_ctl) { /* couldn't get space */
ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "unable to allocate SA CTL\n"); return NULL;
}
/* * need to allocate sa_index here and save it * in both sa_ctl->index and sa_frame->fast_sa_index; * If alloc fails then delete sa_ctl and return NULL
*/
INIT_LIST_HEAD(&sa_ctl->next);
sap = &sa_ctl->sa_frame;
*sap = *sa_frame;
sa_ctl->index = index;
sa_ctl->fcport = fcport;
sa_ctl->flags = 0;
sa_ctl->state = 0L;
ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
__func__, sa_ctl, sa_ctl->index, sa_ctl->state);
spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); if (dir == SAU_FLG_TX)
list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list); else
list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
/* add the sa to the correct list */ staticint
qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport, struct qla_sa_update_frame *sa_frame)
{ struct edif_sa_ctl *sa_ctl = NULL; int dir;
uint16_t sa_index;
dir = (sa_frame->flags & SAU_FLG_TX);
/* map the spi to an sa_index */
sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame); if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) { /* process rx delete */
ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
__func__, fcport->loop_id, sa_frame->spi);
/* build and send the aen */
fcport->edif.rx_sa_set = 1;
fcport->edif.rx_sa_pending = 0;
qla_edb_eventcreate(fcport->vha,
VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
QL_VND_SA_STAT_SUCCESS,
QL_VND_RX_SA_KEY, fcport);
/* force a return of good bsg status; */ return RX_DELETE_NO_EDIF_SA_INDEX;
} elseif (sa_index == INVALID_EDIF_SA_INDEX) {
ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
__func__, sa_frame->spi, dir); return INVALID_EDIF_SA_INDEX;
}
ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
__func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
/* This is a local copy of sa_frame. */
sa_frame->fast_sa_index = sa_index; /* create the sa_ctl */
sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir); if (!sa_ctl) {
ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
__func__, sa_frame->spi, dir, sa_index); return -1;
}
set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
if (dir == SAU_FLG_TX)
fcport->edif.tx_rekey_cnt++; else
fcport->edif.rx_rekey_cnt++;
ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
__func__, sa_ctl, sa_ctl->index, sa_ctl->state,
fcport->edif.tx_rekey_cnt,
fcport->edif.rx_rekey_cnt, fcport->loop_id);
/* Check if host is online */ if (!vha->flags.online) {
ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
rval = -EIO;
SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto done;
}
if (DBELL_INACTIVE(vha)) {
ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
rval = -EIO;
SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto done;
}
/* looking for rx index and delete */ if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
(sa_frame.flags & SAU_FLG_INV)) {
uint16_t nport_handle = fcport->loop_id;
uint16_t sa_index = sa_frame.fast_sa_index;
/* * make sure we have an existing rx key, otherwise just process * this as a straight delete just like TX * This is NOT a normal case, it indicates an error recovery or key cleanup * by the ipsec code above us.
*/
edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id); if (!edif_entry) {
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
__func__, fcport->loop_id, sa_index); goto force_rx_delete;
}
/* * if we have a forced delete for rx, remove the sa_index from the edif list * and proceed with normal delete. The rx delay timer should not be running
*/ if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
qla_edif_list_delete_sa_index(fcport, edif_entry);
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
__func__, fcport->loop_id, sa_index);
kfree(edif_entry); goto force_rx_delete;
}
/* * delayed rx delete * * if delete_sa_index is not invalid then there is already * a delayed index in progress, return bsg bad status
*/ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { struct edif_sa_ctl *sa_ctl;
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
__func__, edif_entry->handle, edif_entry->delete_sa_index);
/* free up the sa_ctl that was allocated with the sa_index */
sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
(sa_frame.flags & SAU_FLG_TX)); if (sa_ctl) {
ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: freeing sa_ctl for index %d\n",
__func__, sa_ctl->index);
qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
}
/* configure and start the rx delay timer */
edif_entry->fcport = fcport;
edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
__func__, edif_entry, sa_index, nport_handle);
/* * Start the timer when we queue the delayed rx delete. * This is an activity timer that goes off if we have not * received packets with the new sa_index
*/
add_timer(&edif_entry->timer);
/* * sa_delete for rx key with an active rx key including this one * add the delete rx sa index to the hash so we can look for it * in the rsp queue. Do this after making any changes to the * edif_entry as part of the rx delete.
*/
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
__func__, sa_index, nport_handle, bsg_job);
/* * rx index and update * add the index to the list and continue with normal update
*/
} elseif (((sa_frame.flags & SAU_FLG_TX) == 0) &&
((sa_frame.flags & SAU_FLG_INV) == 0)) { /* sa_update for rx key */
uint32_t nport_handle = fcport->loop_id;
uint16_t sa_index = sa_frame.fast_sa_index; int result;
/* * add the update rx sa index to the hash so we can look for it * in the rsp queue and continue normally
*/
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: adding update sa_index %d, lid 0x%x to edif_list\n",
__func__, sa_index, nport_handle);
result = qla_edif_list_add_sa_update_index(fcport, sa_index,
nport_handle); if (result) {
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
__func__, sa_index, nport_handle);
}
} if (sa_frame.flags & SAU_FLG_GMAC_MODE)
fcport->edif.aes_gmac = 1; else
fcport->edif.aes_gmac = 0;
force_rx_delete: /* * sa_update for both rx and tx keys, sa_delete for tx key * immediately process the request
*/
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) {
rval = -ENOMEM;
SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); goto done;
}
/** * qla_enode_init - initialize enode structs & lock * @vha: host adapter pointer * * should only be called when driver attaching
*/ void
qla_enode_init(scsi_qla_host_t *vha)
{ struct qla_hw_data *ha = vha->hw; char name[32];
if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) { /* list still active - error */
ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
__func__); return;
}
/* initialize lock which protects pur_core & init list */
spin_lock_init(&vha->pur_cinfo.pur_lock);
INIT_LIST_HEAD(&vha->pur_cinfo.head);
/** * qla_enode_stop - stop and clear and enode data * @vha: host adapter pointer * * called when app notified it is exiting
*/ void
qla_enode_stop(scsi_qla_host_t *vha)
{ unsignedlong flags; struct enode *node, *q;
if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { /* doorbell list not enabled */
ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode not active\n", __func__); return;
}
/* grab lock so list doesn't move */
spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
/* hopefully this is a null list at this point */
list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
ql_dbg(ql_dbg_edif, vha, 0x910f, "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
node->dinfo.nodecnt);
list_del_init(&node->list);
qla_enode_free(vha, node);
}
spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
}
if (DBELL_INACTIVE(vha)) { /* doorbell list not enabled */
ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); return;
}
/* grab lock so list doesn't move */
spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */ /* hopefully this is a null list at this point */
list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
ql_dbg(ql_dbg_edif, vha, 0x910f, "%s freeing edb_node type=%x\n",
__func__, node->ntype);
qla_edb_node_free(vha, node);
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
node = kzalloc(sizeof(*node), GFP_ATOMIC); if (!node) { /* couldn't get space */
ql_dbg(ql_dbg_edif, vha, 0x9100, "edb node unable to be allocated\n"); return NULL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.