// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/workqueue.h>
#include "fnic.h"
#include "fdls_fc.h"
#include "fnic_fdls.h"
#include <scsi/fc/fc_fcp.h>
#include <scsi/scsi_transport_fc.h>
#include <linux/utsname.h>
#define FC_FC4_TYPE_SCSI 0x08
#define PORT_SPEED_BIT_8 8
#define PORT_SPEED_BIT_9 9
#define PORT_SPEED_BIT_14 14
#define PORT_SPEED_BIT_15 15
/* FNIC FDMI Register HBA Macros */
#define FNIC_FDMI_NUM_PORTS 1
#define FNIC_FDMI_NUM_HBA_ATTRS 9
#define FNIC_FDMI_TYPE_NODE_NAME 0X1
#define FNIC_FDMI_TYPE_MANUFACTURER 0X2
#define FNIC_FDMI_MANUFACTURER "Cisco Systems"
#define FNIC_FDMI_TYPE_SERIAL_NUMBER 0X3
#define FNIC_FDMI_TYPE_MODEL 0X4
#define FNIC_FDMI_TYPE_MODEL_DES 0X5
#define FNIC_FDMI_MODEL_DESCRIPTION "Cisco Virtual Interface Card"
#define FNIC_FDMI_TYPE_HARDWARE_VERSION 0X6
#define FNIC_FDMI_TYPE_DRIVER_VERSION 0X7
#define FNIC_FDMI_TYPE_ROM_VERSION 0X8
#define FNIC_FDMI_TYPE_FIRMWARE_VERSION 0X9
#define FNIC_FDMI_NN_LEN 8
#define FNIC_FDMI_MANU_LEN 20
#define FNIC_FDMI_SERIAL_LEN 16
#define FNIC_FDMI_MODEL_LEN 12
#define FNIC_FDMI_MODEL_DES_LEN 56
#define FNIC_FDMI_HW_VER_LEN 16
#define FNIC_FDMI_DR_VER_LEN 28
#define FNIC_FDMI_ROM_VER_LEN 8
#define FNIC_FDMI_FW_VER_LEN 16
/* FNIC FDMI Register PA Macros */
#define FNIC_FDMI_TYPE_FC4_TYPES 0X1
#define FNIC_FDMI_TYPE_SUPPORTED_SPEEDS 0X2
#define FNIC_FDMI_TYPE_CURRENT_SPEED 0X3
#define FNIC_FDMI_TYPE_MAX_FRAME_SIZE 0X4
#define FNIC_FDMI_TYPE_OS_NAME 0X5
#define FNIC_FDMI_TYPE_HOST_NAME 0X6
#define FNIC_FDMI_NUM_PORT_ATTRS 6
#define FNIC_FDMI_FC4_LEN 32
#define FNIC_FDMI_SUPP_SPEED_LEN 4
#define FNIC_FDMI_CUR_SPEED_LEN 4
#define FNIC_FDMI_MFS_LEN 4
#define FNIC_FDMI_MFS 0x800
#define FNIC_FDMI_OS_NAME_LEN 16
#define FNIC_FDMI_HN_LEN 24
#define FDLS_FDMI_PLOGI_PENDING 0x1
#define FDLS_FDMI_REG_HBA_PENDING 0x2
#define FDLS_FDMI_RPA_PENDING 0x4
#define FDLS_FDMI_ABORT_PENDING 0x8
#define FDLS_FDMI_MAX_RETRY 3
#define RETRIES_EXHAUSTED(iport) \
(iport->fabric.retry_counter == FABRIC_LOGO_MAX_RETRY)
#define FNIC_TPORT_MAX_NEXUS_RESTART (8)
#define SCHEDULE_OXID_FREE_RETRY_TIME (300)
/* Private Functions */
static void fdls_fdmi_register_hba(struct fnic_iport_s *iport);
static void fdls_fdmi_register_pa(struct fnic_iport_s *iport);
static void fdls_send_rpn_id(struct fnic_iport_s *iport);
static void fdls_process_flogi_rsp(struct fnic_iport_s *iport,
struct fc_frame_header *fchdr,
void *rx_frame);
static void fnic_fdls_start_plogi(struct fnic_iport_s *iport);
static void fnic_fdls_start_flogi(struct fnic_iport_s *iport);
static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport,
uint32_t fcid,
uint64_t wwpn);
static void fdls_target_restart_nexus(struct fnic_tport_s *tport);
static void fdls_start_tport_timer(struct fnic_iport_s *iport,
struct fnic_tport_s *tport, int timeout);
static void fdls_tport_timer_callback(struct timer_list *t);
static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport);
static void fdls_start_fabric_timer(struct fnic_iport_s *iport,
int timeout);
static void fdls_init_plogi_frame(uint8_t *frame, struct fnic_iport_s *iport);
static void fdls_init_els_acc_frame(uint8_t *frame, struct fnic_iport_s *iport);
static void fdls_init_els_rjt_frame(uint8_t *frame, struct fnic_iport_s *iport);
static void fdls_init_logo_frame(uint8_t *frame, struct fnic_iport_s *iport);
static void fdls_init_fabric_abts_frame(uint8_t *frame,
struct fnic_iport_s *iport);
uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport)
{
struct fnic *fnic = iport->fnic;
uint8_t *frame = NULL;
frame = mempool_alloc(fnic->frame_pool, GFP_ATOMIC);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame" );
return NULL;
}
memset(frame, 0, FNIC_FCOE_FRAME_MAXSZ);
return frame;
}
/**
* fdls_alloc_oxid - Allocate an oxid from the bitmap based oxid pool
* @iport: Handle to iport instance
* @oxid_frame_type: Type of frame to allocate
* @active_oxid: the oxid which is in use
*
* Called with fnic lock held
*/
uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
uint16_t *active_oxid)
{
struct fnic *fnic = iport->fnic;
struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
int idx;
uint16_t oxid;
lockdep_assert_held(&fnic->fnic_lock);
/*
* Allocate next available oxid from bitmap
*/
idx = find_next_zero_bit(oxid_pool->bitmap, FNIC_OXID_POOL_SZ, oxid_pool->next_idx);
if (idx == FNIC_OXID_POOL_SZ) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Alloc oxid: all oxid slots are busy iport state:%d\n" ,
iport->state);
return FNIC_UNASSIGNED_OXID;
}
WARN_ON(test_and_set_bit(idx, oxid_pool->bitmap));
oxid_pool->next_idx = (idx + 1) % FNIC_OXID_POOL_SZ; /* cycle through the bitmap */
oxid = FNIC_OXID_ENCODE(idx, oxid_frame_type);
*active_oxid = oxid;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"alloc oxid: 0x%x, iport state: %d\n" ,
oxid, iport->state);
return oxid;
}
/**
* fdls_free_oxid_idx - Free the oxid using the idx
* @iport: Handle to iport instance
* @oxid_idx: The index to free
*
* Free the oxid immediately and make it available for new requests
* Called with fnic lock held
*/
static void fdls_free_oxid_idx(struct fnic_iport_s *iport, uint16_t oxid_idx)
{
struct fnic *fnic = iport->fnic;
struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
lockdep_assert_held(&fnic->fnic_lock);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"free oxid idx: 0x%x\n" , oxid_idx);
WARN_ON(!test_and_clear_bit(oxid_idx, oxid_pool->bitmap));
}
/**
* fdls_reclaim_oxid_handler - Callback handler for delayed_oxid_work
* @work: Handle to work_struct
*
* Scheduled when an oxid is to be freed later
* After freeing expired oxid(s), the handler schedules
* another callback with the remaining time
* of next unexpired entry in the reclaim list.
*/
void fdls_reclaim_oxid_handler(struct work_struct *work)
{
struct fnic_oxid_pool_s *oxid_pool = container_of(work,
struct fnic_oxid_pool_s, oxid_reclaim_work.work);
struct fnic_iport_s *iport = container_of(oxid_pool,
struct fnic_iport_s, oxid_pool);
struct fnic *fnic = iport->fnic;
struct reclaim_entry_s *reclaim_entry, *next;
unsigned long delay_j, cur_jiffies;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Reclaim oxid callback\n" );
spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
/* Though the work was scheduled for one entry,
* walk through and free the expired entries which might have been scheduled
* at around the same time as the first entry
*/
list_for_each_entry_safe(reclaim_entry, next,
&(oxid_pool->oxid_reclaim_list), links) {
/* The list is always maintained in the order of expiry time */
cur_jiffies = jiffies;
if (time_before(cur_jiffies, reclaim_entry->expires))
break ;
list_del(&reclaim_entry->links);
fdls_free_oxid_idx(iport, reclaim_entry->oxid_idx);
kfree(reclaim_entry);
}
/* schedule to free up the next entry */
if (!list_empty(&oxid_pool->oxid_reclaim_list)) {
reclaim_entry = list_first_entry(&oxid_pool->oxid_reclaim_list,
struct reclaim_entry_s, links);
delay_j = reclaim_entry->expires - cur_jiffies;
schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Scheduling next callback at:%ld jiffies\n" , delay_j);
}
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
/**
* fdls_free_oxid - Helper function to free the oxid
* @iport: Handle to iport instance
* @oxid: oxid to free
* @active_oxid: the oxid which is in use
*
* Called with fnic lock held
*/
void fdls_free_oxid(struct fnic_iport_s *iport,
uint16_t oxid, uint16_t *active_oxid)
{
fdls_free_oxid_idx(iport, FNIC_OXID_IDX(oxid));
*active_oxid = FNIC_UNASSIGNED_OXID;
}
/**
* fdls_schedule_oxid_free - Schedule oxid to be freed later
* @iport: Handle to iport instance
* @active_oxid: the oxid which is in use
*
* Gets called in a rare case scenario when both a command
* (fdls or target discovery) timed out and the following ABTS
* timed out as well, without a link change.
*
* Called with fnic lock held
*/
void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid)
{
struct fnic *fnic = iport->fnic;
struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
struct reclaim_entry_s *reclaim_entry;
unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
int oxid_idx = FNIC_OXID_IDX(*active_oxid);
lockdep_assert_held(&fnic->fnic_lock);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Schedule oxid free. oxid: 0x%x\n" , *active_oxid);
*active_oxid = FNIC_UNASSIGNED_OXID;
reclaim_entry = (struct reclaim_entry_s *)
kzalloc(sizeof (struct reclaim_entry_s), GFP_ATOMIC);
if (!reclaim_entry) {
FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
"Failed to allocate memory for reclaim struct for oxid idx: %d\n" ,
oxid_idx);
/* Retry the scheduling */
WARN_ON(test_and_set_bit(oxid_idx, oxid_pool->pending_schedule_free));
schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, 0);
return ;
}
reclaim_entry->oxid_idx = oxid_idx;
reclaim_entry->expires = round_jiffies(jiffies + delay_j);
list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
}
/**
* fdls_schedule_oxid_free_retry_work - Thread to schedule the
* oxid to be freed later
*
* @work: Handle to the work struct
*/
void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
{
struct fnic_oxid_pool_s *oxid_pool = container_of(work,
struct fnic_oxid_pool_s, schedule_oxid_free_retry.work);
struct fnic_iport_s *iport = container_of(oxid_pool,
struct fnic_iport_s, oxid_pool);
struct fnic *fnic = iport->fnic;
struct reclaim_entry_s *reclaim_entry;
unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
unsigned long flags;
int idx;
for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Schedule oxid free. oxid idx: %d\n" , idx);
reclaim_entry = kzalloc(sizeof (*reclaim_entry), GFP_KERNEL);
if (!reclaim_entry) {
schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry,
msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME));
return ;
}
clear_bit(idx, oxid_pool->pending_schedule_free);
reclaim_entry->oxid_idx = idx;
reclaim_entry->expires = round_jiffies(jiffies + delay_j);
spin_lock_irqsave(&fnic->fnic_lock, flags);
list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
}
}
static bool fdls_is_oxid_fabric_req(uint16_t oxid)
{
int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
switch (oxid_frame_type) {
case FNIC_FRAME_TYPE_FABRIC_FLOGI:
case FNIC_FRAME_TYPE_FABRIC_PLOGI:
case FNIC_FRAME_TYPE_FABRIC_RPN:
case FNIC_FRAME_TYPE_FABRIC_RFT:
case FNIC_FRAME_TYPE_FABRIC_RFF:
case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
case FNIC_FRAME_TYPE_FABRIC_LOGO:
break ;
default :
return false ;
}
return true ;
}
static bool fdls_is_oxid_fdmi_req(uint16_t oxid)
{
int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
switch (oxid_frame_type) {
case FNIC_FRAME_TYPE_FDMI_PLOGI:
case FNIC_FRAME_TYPE_FDMI_RHBA:
case FNIC_FRAME_TYPE_FDMI_RPA:
break ;
default :
return false ;
}
return true ;
}
static bool fdls_is_oxid_tgt_req(uint16_t oxid)
{
int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
switch (oxid_frame_type) {
case FNIC_FRAME_TYPE_TGT_PLOGI:
case FNIC_FRAME_TYPE_TGT_PRLI:
case FNIC_FRAME_TYPE_TGT_ADISC:
case FNIC_FRAME_TYPE_TGT_LOGO:
break ;
default :
return false ;
}
return true ;
}
static void fdls_reset_oxid_pool(struct fnic_iport_s *iport)
{
struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
oxid_pool->next_idx = 0;
}
void fnic_del_fabric_timer_sync(struct fnic *fnic)
{
fnic->iport.fabric.del_timer_inprogress = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
timer_delete_sync(&fnic->iport.fabric.retry_timer);
spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
fnic->iport.fabric.del_timer_inprogress = 0;
}
void fnic_del_tport_timer_sync(struct fnic *fnic,
struct fnic_tport_s *tport)
{
tport->del_timer_inprogress = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
timer_delete_sync(&tport->retry_timer);
spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
tport->del_timer_inprogress = 0;
}
static void
fdls_start_fabric_timer(struct fnic_iport_s *iport, int timeout)
{
u64 fabric_tov;
struct fnic *fnic = iport->fnic;
if (iport->fabric.timer_pending) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"iport fcid: 0x%x: Canceling fabric disc timer\n" ,
iport->fcid);
fnic_del_fabric_timer_sync(fnic);
iport->fabric.timer_pending = 0;
}
if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
iport->fabric.retry_counter++;
fabric_tov = jiffies + msecs_to_jiffies(timeout);
mod_timer(&iport->fabric.retry_timer, round_jiffies(fabric_tov));
iport->fabric.timer_pending = 1;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"fabric timer is %d " , timeout);
}
static void
fdls_start_tport_timer(struct fnic_iport_s *iport,
struct fnic_tport_s *tport, int timeout)
{
u64 fabric_tov;
struct fnic *fnic = iport->fnic;
if (tport->timer_pending) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"tport fcid 0x%x: Canceling disc timer\n" ,
tport->fcid);
fnic_del_tport_timer_sync(fnic, tport);
tport->timer_pending = 0;
}
if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED))
tport->retry_counter++;
fabric_tov = jiffies + msecs_to_jiffies(timeout);
mod_timer(&tport->retry_timer, round_jiffies(fabric_tov));
tport->timer_pending = 1;
}
void fdls_init_plogi_frame(uint8_t *frame,
struct fnic_iport_s *iport)
{
struct fc_std_flogi *pplogi;
uint8_t s_id[3];
pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*pplogi = (struct fc_std_flogi) {
.fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFC},
.fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
.els = {
.fl_cmd = ELS_PLOGI,
.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI,
.sp_lo_ver = FNIC_FC_PH_VER_LO,
.sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT),
.sp_features = cpu_to_be16(FC_SP_FT_CIRO),
.sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ),
.sp_tot_seq = cpu_to_be16(FNIC_FC_CONCUR_SEQS),
.sp_rel_off = cpu_to_be16(FNIC_FC_RO_INFO),
.sp_e_d_tov = cpu_to_be32(FC_DEF_E_D_TOV)},
.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ),
.fl_cssp[2].cp_rdfs = cpu_to_be16(0x800),
.fl_cssp[2].cp_con_seq = cpu_to_be16(0xFF),
.fl_cssp[2].cp_open_seq = 1}
};
FNIC_STD_SET_NPORT_NAME(&pplogi->els.fl_wwpn, iport->wwpn);
FNIC_STD_SET_NODE_NAME(&pplogi->els.fl_wwnn, iport->wwnn);
FNIC_LOGI_SET_RDF_SIZE(pplogi->els, iport->max_payload_size);
hton24(s_id, iport->fcid);
FNIC_STD_SET_S_ID(pplogi->fchdr, s_id);
}
static void fdls_init_els_acc_frame(uint8_t *frame,
struct fnic_iport_s *iport)
{
struct fc_std_els_acc_rsp *pels_acc;
uint8_t s_id[3];
pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*pels_acc = (struct fc_std_els_acc_rsp) {
.fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP,
.fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}},
.acc.la_cmd = ELS_LS_ACC,
};
hton24(s_id, iport->fcid);
FNIC_STD_SET_S_ID(pels_acc->fchdr, s_id);
FNIC_STD_SET_RX_ID(pels_acc->fchdr, FNIC_UNASSIGNED_RXID);
}
static void fdls_init_els_rjt_frame(uint8_t *frame,
struct fnic_iport_s *iport)
{
struct fc_std_els_rjt_rsp *pels_rjt;
pels_rjt = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*pels_rjt = (struct fc_std_els_rjt_rsp) {
.fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, .fh_type = FC_TYPE_ELS,
.fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}},
.rej.er_cmd = ELS_LS_RJT,
};
FNIC_STD_SET_RX_ID(pels_rjt->fchdr, FNIC_UNASSIGNED_RXID);
}
static void fdls_init_logo_frame(uint8_t *frame,
struct fnic_iport_s *iport)
{
struct fc_std_logo *plogo;
uint8_t s_id[3];
plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*plogo = (struct fc_std_logo) {
.fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}},
.els.fl_cmd = ELS_LOGO,
};
hton24(s_id, iport->fcid);
FNIC_STD_SET_S_ID(plogo->fchdr, s_id);
memcpy(plogo->els.fl_n_port_id, s_id, 3);
FNIC_STD_SET_NPORT_NAME(&plogo->els.fl_n_port_wwn,
iport->wwpn);
}
static void fdls_init_fabric_abts_frame(uint8_t *frame,
struct fnic_iport_s *iport)
{
struct fc_frame_header *pfabric_abts;
pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*pfabric_abts = (struct fc_frame_header) {
.fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */
.fh_s_id = {0x00, 0x00, 0x00},
.fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS,
.fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00,
.fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000,
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID),
.fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */
};
}
static void
fdls_send_rscn_resp(struct fnic_iport_s *iport,
struct fc_frame_header *rscn_fchdr)
{
uint8_t *frame;
struct fc_std_els_acc_rsp *pels_acc;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_els_acc_rsp);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send RSCN response" );
return ;
}
pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_els_acc_frame(frame, iport);
FNIC_STD_SET_D_ID(pels_acc->fchdr, rscn_fchdr->fh_s_id);
oxid = FNIC_STD_GET_OX_ID(rscn_fchdr);
FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send RSCN response with oxid: 0x%x" ,
iport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
}
static void
fdls_send_logo_resp(struct fnic_iport_s *iport,
struct fc_frame_header *req_fchdr)
{
uint8_t *frame;
struct fc_std_els_acc_rsp *plogo_resp;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_els_acc_rsp);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send LOGO response" );
return ;
}
plogo_resp = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_els_acc_frame(frame, iport);
FNIC_STD_SET_D_ID(plogo_resp->fchdr, req_fchdr->fh_s_id);
oxid = FNIC_STD_GET_OX_ID(req_fchdr);
FNIC_STD_SET_OX_ID(plogo_resp->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send LOGO response with oxid: 0x%x" ,
iport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
}
void
fdls_send_tport_abts(struct fnic_iport_s *iport,
struct fnic_tport_s *tport)
{
uint8_t *frame;
uint8_t s_id[3];
uint8_t d_id[3];
struct fnic *fnic = iport->fnic;
struct fc_frame_header *ptport_abts;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_frame_header);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send tport ABTS" );
return ;
}
ptport_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*ptport_abts = (struct fc_frame_header) {
.fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */
.fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS,
.fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00,
.fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000,
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID),
.fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */
};
hton24(s_id, iport->fcid);
hton24(d_id, tport->fcid);
FNIC_STD_SET_S_ID(*ptport_abts, s_id);
FNIC_STD_SET_D_ID(*ptport_abts, d_id);
tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED;
FNIC_STD_SET_OX_ID(*ptport_abts, tport->active_oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send tport abts: tport->state: %d " ,
iport->fcid, tport->state);
fnic_send_fcoe_frame(iport, frame, frame_size);
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov);
}
static void fdls_send_fabric_abts(struct fnic_iport_s *iport)
{
uint8_t *frame;
uint8_t s_id[3];
uint8_t d_id[3];
struct fnic *fnic = iport->fnic;
struct fc_frame_header *pfabric_abts;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_frame_header);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send fabric ABTS" );
return ;
}
pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_fabric_abts_frame(frame, iport);
hton24(s_id, iport->fcid);
switch (iport->fabric.state) {
case FDLS_STATE_FABRIC_LOGO:
hton24(d_id, FC_FID_FLOGI);
FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
break ;
case FDLS_STATE_FABRIC_FLOGI:
hton24(d_id, FC_FID_FLOGI);
FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
break ;
case FDLS_STATE_FABRIC_PLOGI:
FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
hton24(d_id, FC_FID_DIR_SERV);
FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
break ;
case FDLS_STATE_RPN_ID:
FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
hton24(d_id, FC_FID_DIR_SERV);
FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
break ;
case FDLS_STATE_SCR:
FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
hton24(d_id, FC_FID_FCTRL);
FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
break ;
case FDLS_STATE_REGISTER_FC4_TYPES:
FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
hton24(d_id, FC_FID_DIR_SERV);
FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
break ;
case FDLS_STATE_REGISTER_FC4_FEATURES:
FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
hton24(d_id, FC_FID_DIR_SERV);
FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
break ;
case FDLS_STATE_GPN_FT:
FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
hton24(d_id, FC_FID_DIR_SERV);
FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
break ;
default :
return ;
}
oxid = iport->active_oxid_fabric_req;
FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send fabric abts. iport->fabric.state: %d oxid: 0x%x" ,
iport->fcid, iport->fabric.state, oxid);
iport->fabric.flags |= FNIC_FDLS_FABRIC_ABORT_ISSUED;
fnic_send_fcoe_frame(iport, frame, frame_size);
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
iport->fabric.timer_pending = 1;
}
static uint8_t *fdls_alloc_init_fdmi_abts_frame(struct fnic_iport_s *iport,
uint16_t oxid)
{
struct fc_frame_header *pfdmi_abts;
uint8_t d_id[3];
uint8_t *frame;
struct fnic *fnic = iport->fnic;
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send FDMI ABTS" );
return NULL;
}
pfdmi_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_fabric_abts_frame(frame, iport);
hton24(d_id, FC_FID_MGMT_SERV);
FNIC_STD_SET_D_ID(*pfdmi_abts, d_id);
FNIC_STD_SET_OX_ID(*pfdmi_abts, oxid);
return frame;
}
static void fdls_send_fdmi_abts(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fnic *fnic = iport->fnic;
unsigned long fdmi_tov;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_frame_header);
if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
frame = fdls_alloc_init_fdmi_abts_frame(iport,
iport->active_oxid_fdmi_plogi);
if (frame == NULL)
return ;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send FDMI PLOGI abts. iport->fabric.state: %d oxid: 0x%x" ,
iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_plogi);
fnic_send_fcoe_frame(iport, frame, frame_size);
} else {
if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) {
frame = fdls_alloc_init_fdmi_abts_frame(iport,
iport->active_oxid_fdmi_rhba);
if (frame == NULL)
return ;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send FDMI RHBA abts. iport->fabric.state: %d oxid: 0x%x" ,
iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_rhba);
fnic_send_fcoe_frame(iport, frame, frame_size);
}
if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) {
frame = fdls_alloc_init_fdmi_abts_frame(iport,
iport->active_oxid_fdmi_rpa);
if (frame == NULL) {
if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)
goto arm_timer;
else
return ;
}
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send FDMI RPA abts. iport->fabric.state: %d oxid: 0x%x" ,
iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_rpa);
fnic_send_fcoe_frame(iport, frame, frame_size);
}
}
arm_timer:
fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: iport->fabric.fdmi_pending: 0x%x" ,
iport->fcid, iport->fabric.fdmi_pending);
}
static void fdls_send_fabric_flogi(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_flogi *pflogi;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_flogi);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send FLOGI" );
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
pflogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*pflogi = (struct fc_std_flogi) {
.fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFE},
.fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
.els.fl_cmd = ELS_FLOGI,
.els.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI,
.sp_lo_ver = FNIC_FC_PH_VER_LO,
.sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT),
.sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ)},
.els.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ)
};
FNIC_STD_SET_NPORT_NAME(&pflogi->els.fl_wwpn, iport->wwpn);
FNIC_STD_SET_NODE_NAME(&pflogi->els.fl_wwnn, iport->wwnn);
FNIC_LOGI_SET_RDF_SIZE(pflogi->els, iport->max_payload_size);
FNIC_LOGI_SET_R_A_TOV(pflogi->els, iport->r_a_tov);
FNIC_LOGI_SET_E_D_TOV(pflogi->els, iport->e_d_tov);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI,
&iport->active_oxid_fabric_req);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send FLOGI" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
FNIC_STD_SET_OX_ID(pflogi->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send fabric FLOGI with oxid: 0x%x" , iport->fcid,
oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
atomic64_inc(&iport->iport_stats.fabric_flogi_sent);
err_out:
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
}
static void fdls_send_fabric_plogi(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_flogi *pplogi;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_flogi);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send PLOGI" );
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_plogi_frame(frame, iport);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_PLOGI,
&iport->active_oxid_fabric_req);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send fabric PLOGI" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send fabric PLOGI with oxid: 0x%x" , iport->fcid,
oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
atomic64_inc(&iport->iport_stats.fabric_plogi_sent);
err_out:
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
}
static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_flogi *pplogi;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_flogi);
uint8_t d_id[3];
u64 fdmi_tov;
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send FDMI PLOGI" );
goto err_out;
}
pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_plogi_frame(frame, iport);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_PLOGI,
&iport->active_oxid_fdmi_plogi);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send FDMI PLOGI" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
goto err_out;
}
FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
hton24(d_id, FC_FID_MGMT_SERV);
FNIC_STD_SET_D_ID(pplogi->fchdr, d_id);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send FDMI PLOGI with oxid: 0x%x" ,
iport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
err_out:
fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
iport->fabric.fdmi_pending = FDLS_FDMI_PLOGI_PENDING;
}
static void fdls_send_rpn_id(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_rpn_id *prpn_id;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_rpn_id);
uint8_t fcid[3];
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send RPN_ID" );
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
prpn_id = (struct fc_std_rpn_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*prpn_id = (struct fc_std_rpn_id) {
.fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
.fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
.fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
.ct_fs_subtype = FC_NS_SUBTYPE,
.ct_cmd = cpu_to_be16(FC_NS_RPN_ID)}
};
hton24(fcid, iport->fcid);
FNIC_STD_SET_S_ID(prpn_id->fchdr, fcid);
FNIC_STD_SET_PORT_ID(prpn_id->rpn_id, fcid);
FNIC_STD_SET_PORT_NAME(prpn_id->rpn_id, iport->wwpn);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RPN,
&iport->active_oxid_fabric_req);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send RPN_ID" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
FNIC_STD_SET_OX_ID(prpn_id->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send RPN ID with oxid: 0x%x" , iport->fcid,
oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
err_out:
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
}
static void fdls_send_scr(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_scr *pscr;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_scr);
uint8_t fcid[3];
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send SCR" );
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
pscr = (struct fc_std_scr *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*pscr = (struct fc_std_scr) {
.fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ,
.fh_d_id = {0xFF, 0xFF, 0xFD}, .fh_type = FC_TYPE_ELS,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
.scr = {.scr_cmd = ELS_SCR,
.scr_reg_func = ELS_SCRF_FULL}
};
hton24(fcid, iport->fcid);
FNIC_STD_SET_S_ID(pscr->fchdr, fcid);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_SCR,
&iport->active_oxid_fabric_req);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send SCR" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
FNIC_STD_SET_OX_ID(pscr->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send SCR with oxid: 0x%x" , iport->fcid,
oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
atomic64_inc(&iport->iport_stats.fabric_scr_sent);
err_out:
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
}
static void fdls_send_gpn_ft(struct fnic_iport_s *iport, int fdls_state)
{
uint8_t *frame;
struct fc_std_gpn_ft *pgpn_ft;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_gpn_ft);
uint8_t fcid[3];
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send GPN FT" );
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
pgpn_ft = (struct fc_std_gpn_ft *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*pgpn_ft = (struct fc_std_gpn_ft) {
.fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
.fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
.fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
.ct_fs_subtype = FC_NS_SUBTYPE,
.ct_cmd = cpu_to_be16(FC_NS_GPN_FT)},
.gpn_ft.fn_fc4_type = 0x08
};
hton24(fcid, iport->fcid);
FNIC_STD_SET_S_ID(pgpn_ft->fchdr, fcid);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_GPN_FT,
&iport->active_oxid_fabric_req);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send GPN FT" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
FNIC_STD_SET_OX_ID(pgpn_ft->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send GPN FT with oxid: 0x%x" , iport->fcid,
oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
err_out:
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
fdls_set_state((&iport->fabric), fdls_state);
}
static void
fdls_send_tgt_adisc(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
{
uint8_t *frame;
struct fc_std_els_adisc *padisc;
uint8_t s_id[3];
uint8_t d_id[3];
uint16_t oxid;
struct fnic *fnic = iport->fnic;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_els_adisc);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send TGT ADISC" );
tport->flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
padisc = (struct fc_std_els_adisc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
hton24(s_id, iport->fcid);
hton24(d_id, tport->fcid);
memcpy(padisc->els.adisc_port_id, s_id, 3);
FNIC_STD_SET_S_ID(padisc->fchdr, s_id);
FNIC_STD_SET_D_ID(padisc->fchdr, d_id);
FNIC_STD_SET_F_CTL(padisc->fchdr, FNIC_ELS_REQ_FCTL << 16);
FNIC_STD_SET_R_CTL(padisc->fchdr, FC_RCTL_ELS_REQ);
FNIC_STD_SET_TYPE(padisc->fchdr, FC_TYPE_ELS);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_ADISC, &tport->active_oxid);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send TGT ADISC" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
tport->flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
FNIC_STD_SET_OX_ID(padisc->fchdr, oxid);
FNIC_STD_SET_RX_ID(padisc->fchdr, FNIC_UNASSIGNED_RXID);
tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
FNIC_STD_SET_NPORT_NAME(&padisc->els.adisc_wwpn,
iport->wwpn);
FNIC_STD_SET_NODE_NAME(&padisc->els.adisc_wwnn,
iport->wwnn);
padisc->els.adisc_cmd = ELS_ADISC;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send ADISC to tgt fcid: 0x%x" ,
iport->fcid, tport->fcid);
atomic64_inc(&iport->iport_stats.tport_adisc_sent);
fnic_send_fcoe_frame(iport, frame, frame_size);
err_out:
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov);
}
bool fdls_delete_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
{
struct fnic_tport_event_s *tport_del_evt;
struct fnic *fnic = iport->fnic;
if ((tport->state == FDLS_TGT_STATE_OFFLINING)
|| (tport->state == FDLS_TGT_STATE_OFFLINE)) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"tport fcid 0x%x: tport state is offlining/offline\n" ,
tport->fcid);
return false ;
}
fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
/*
* By setting this flag, the tport will not be seen in a look-up
* in an RSCN. Even if we move to multithreaded model, this tport
* will be destroyed and a new RSCN will have to create a new one
*/
tport->flags |= FNIC_FDLS_TPORT_TERMINATING;
if (tport->timer_pending) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"tport fcid 0x%x: Canceling disc timer\n" ,
tport->fcid);
fnic_del_tport_timer_sync(fnic, tport);
tport->timer_pending = 0;
}
spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
fnic_rport_exch_reset(iport->fnic, tport->fcid);
spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
if (tport->flags & FNIC_FDLS_SCSI_REGISTERED) {
tport_del_evt =
kzalloc(sizeof (struct fnic_tport_event_s), GFP_ATOMIC);
if (!tport_del_evt) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Failed to allocate memory for tport fcid: 0x%0x\n" ,
tport->fcid);
return false ;
}
tport_del_evt->event = TGT_EV_RPORT_DEL;
tport_del_evt->arg1 = (void *) tport;
list_add_tail(&tport_del_evt->links, &fnic->tport_event_list);
queue_work(fnic_event_queue, &fnic->tport_work);
} else {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"tport 0x%x not reg with scsi_transport. Freeing locally" ,
tport->fcid);
list_del(&tport->links);
kfree(tport);
}
return true ;
}
static void
fdls_send_tgt_plogi(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
{
uint8_t *frame;
struct fc_std_flogi *pplogi;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_flogi);
uint8_t d_id[3];
uint32_t timeout;
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send TGT PLOGI" );
tport->flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_plogi_frame(frame, iport);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PLOGI, &tport->active_oxid);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate oxid to send PLOGI to fcid: 0x%x" ,
iport->fcid, tport->fcid);
mempool_free(frame, fnic->frame_pool);
tport->flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
hton24(d_id, tport->fcid);
FNIC_STD_SET_D_ID(pplogi->fchdr, d_id);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send tgt PLOGI to tgt: 0x%x with oxid: 0x%x" ,
iport->fcid, tport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
atomic64_inc(&iport->iport_stats.tport_plogi_sent);
err_out:
timeout = max(2 * iport->e_d_tov, iport->plogi_timeout);
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_tport_timer(iport, tport, timeout);
}
static uint16_t
fnic_fc_plogi_rsp_rdf(struct fnic_iport_s *iport,
struct fc_std_flogi *plogi_rsp)
{
uint16_t b2b_rdf_size =
be16_to_cpu(FNIC_LOGI_RDF_SIZE(plogi_rsp->els));
uint16_t spc3_rdf_size =
be16_to_cpu(plogi_rsp->els.fl_cssp[2].cp_rdfs) & FNIC_FC_C3_RDF;
struct fnic *fnic = iport->fnic;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"MFS: b2b_rdf_size: 0x%x spc3_rdf_size: 0x%x" ,
b2b_rdf_size, spc3_rdf_size);
return min(b2b_rdf_size, spc3_rdf_size);
}
static void fdls_send_register_fc4_types(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_rft_id *prft_id;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_rft_id);
uint8_t fcid[3];
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send RFT" );
return ;
}
prft_id = (struct fc_std_rft_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*prft_id = (struct fc_std_rft_id) {
.fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
.fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
.fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
.ct_fs_subtype = FC_NS_SUBTYPE,
.ct_cmd = cpu_to_be16(FC_NS_RFT_ID)}
};
hton24(fcid, iport->fcid);
FNIC_STD_SET_S_ID(prft_id->fchdr, fcid);
FNIC_STD_SET_PORT_ID(prft_id->rft_id, fcid);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFT,
&iport->active_oxid_fabric_req);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send RFT" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
return ;
}
FNIC_STD_SET_OX_ID(prft_id->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send RFT with oxid: 0x%x" , iport->fcid,
oxid);
prft_id->rft_id.fr_fts.ff_type_map[0] =
cpu_to_be32(1 << FC_TYPE_FCP);
prft_id->rft_id.fr_fts.ff_type_map[1] =
cpu_to_be32(1 << (FC_TYPE_CT % FC_NS_BPW));
fnic_send_fcoe_frame(iport, frame, frame_size);
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
}
static void fdls_send_register_fc4_features(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_rff_id *prff_id;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_rff_id);
uint8_t fcid[3];
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send RFF" );
return ;
}
prff_id = (struct fc_std_rff_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*prff_id = (struct fc_std_rff_id) {
.fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
.fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
.fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
.ct_fs_subtype = FC_NS_SUBTYPE,
.ct_cmd = cpu_to_be16(FC_NS_RFF_ID)},
.rff_id.fr_feat = 0x2,
.rff_id.fr_type = FC_TYPE_FCP
};
hton24(fcid, iport->fcid);
FNIC_STD_SET_S_ID(prff_id->fchdr, fcid);
FNIC_STD_SET_PORT_ID(prff_id->rff_id, fcid);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFF,
&iport->active_oxid_fabric_req);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send RFF" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
return ;
}
FNIC_STD_SET_OX_ID(prff_id->fchdr, oxid);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send RFF with oxid: 0x%x" , iport->fcid,
oxid);
prff_id->rff_id.fr_type = FC_TYPE_FCP;
fnic_send_fcoe_frame(iport, frame, frame_size);
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
}
static void
fdls_send_tgt_prli(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
{
uint8_t *frame;
struct fc_std_els_prli *pprli;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_els_prli);
uint8_t s_id[3];
uint8_t d_id[3];
uint32_t timeout;
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send TGT PRLI" );
tport->flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
pprli = (struct fc_std_els_prli *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*pprli = (struct fc_std_els_prli) {
.fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
.els_prli = {.prli_cmd = ELS_PRLI,
.prli_spp_len = 16,
.prli_len = cpu_to_be16(0x14)},
.sp = {.spp_type = 0x08, .spp_flags = 0x0020,
.spp_params = cpu_to_be32(0xA2)}
};
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PRLI, &tport->active_oxid);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send TGT PRLI to 0x%x" ,
iport->fcid, tport->fcid);
mempool_free(frame, fnic->frame_pool);
tport->flags |= FNIC_FDLS_RETRY_FRAME;
goto err_out;
}
tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
hton24(s_id, iport->fcid);
hton24(d_id, tport->fcid);
FNIC_STD_SET_OX_ID(pprli->fchdr, oxid);
FNIC_STD_SET_S_ID(pprli->fchdr, s_id);
FNIC_STD_SET_D_ID(pprli->fchdr, d_id);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send PRLI to tgt: 0x%x with oxid: 0x%x" ,
iport->fcid, tport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
atomic64_inc(&iport->iport_stats.tport_prli_sent);
err_out:
timeout = max(2 * iport->e_d_tov, iport->plogi_timeout);
/* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
fdls_start_tport_timer(iport, tport, timeout);
}
/**
* fdls_send_fabric_logo - Send flogo to the fcf
* @iport: Handle to fnic iport
*
* This function does not change or check the fabric state.
* It the caller's responsibility to set the appropriate iport fabric
* state when this is called. Normally it is FDLS_STATE_FABRIC_LOGO.
* Currently this assumes to be called with fnic lock held.
*/
void fdls_send_fabric_logo(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_logo *plogo;
struct fnic *fnic = iport->fnic;
uint8_t d_id[3];
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_logo);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send fabric LOGO" );
return ;
}
plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_logo_frame(frame, iport);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_LOGO,
&iport->active_oxid_fabric_req);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send fabric LOGO" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
return ;
}
FNIC_STD_SET_OX_ID(plogo->fchdr, oxid);
hton24(d_id, FC_FID_FLOGI);
FNIC_STD_SET_D_ID(plogo->fchdr, d_id);
iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send fabric LOGO with oxid: 0x%x" ,
iport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
}
/**
* fdls_tgt_logout - Send plogo to the remote port
* @iport: Handle to fnic iport
* @tport: Handle to remote port
*
* This function does not change or check the fabric/tport state.
* It the caller's responsibility to set the appropriate tport/fabric
* state when this is called. Normally that is fdls_tgt_state_plogo.
* This could be used to send plogo to nameserver process
* also not just target processes
*/
void fdls_tgt_logout(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
{
uint8_t *frame;
struct fc_std_logo *plogo;
struct fnic *fnic = iport->fnic;
uint8_t d_id[3];
uint16_t oxid;
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
sizeof (struct fc_std_logo);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send fabric LOGO" );
return ;
}
plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_logo_frame(frame, iport);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_LOGO, &tport->active_oxid);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send tgt LOGO" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
return ;
}
FNIC_STD_SET_OX_ID(plogo->fchdr, oxid);
hton24(d_id, tport->fcid);
FNIC_STD_SET_D_ID(plogo->fchdr, d_id);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send tgt LOGO with oxid: 0x%x" ,
iport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
atomic64_inc(&iport->iport_stats.tport_logo_sent);
}
static void fdls_tgt_discovery_start(struct fnic_iport_s *iport)
{
struct fnic_tport_s *tport, *next;
u32 old_link_down_cnt = iport->fnic->link_down_cnt;
struct fnic *fnic = iport->fnic;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Starting FDLS target discovery" , iport->fcid);
list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
if ((old_link_down_cnt != iport->fnic->link_down_cnt)
|| (iport->state != FNIC_IPORT_STATE_READY)) {
break ;
}
/* if we marked the tport as deleted due to GPN_FT
* We should not send ADISC anymore
*/
if ((tport->state == FDLS_TGT_STATE_OFFLINING) ||
(tport->state == FDLS_TGT_STATE_OFFLINE))
continue ;
/* For tports which have received RSCN */
if (tport->flags & FNIC_FDLS_TPORT_SEND_ADISC) {
tport->retry_counter = 0;
fdls_set_tport_state(tport, FDLS_TGT_STATE_ADISC);
tport->flags &= ~FNIC_FDLS_TPORT_SEND_ADISC;
fdls_send_tgt_adisc(iport, tport);
continue ;
}
if (fdls_get_tport_state(tport) != FDLS_TGT_STATE_INIT) {
/* Not a new port, skip */
continue ;
}
tport->retry_counter = 0;
fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
fdls_send_tgt_plogi(iport, tport);
}
fdls_set_state((&iport->fabric), FDLS_STATE_TGT_DISCOVERY);
}
/*
* Function to restart the IT nexus if we received any out of
* sequence PLOGI/PRLI response from the target.
* The memory for the new tport structure is allocated
* inside fdls_create_tport and added to the iport's tport list.
* This will get freed later during tport_offline/linkdown
* or module unload. The new_tport pointer will go out of scope
* safely since the memory it is
* pointing to it will be freed later
*/
static void fdls_target_restart_nexus(struct fnic_tport_s *tport)
{
struct fnic_iport_s *iport = tport->iport;
struct fnic_tport_s *new_tport = NULL;
uint32_t fcid;
uint64_t wwpn;
int nexus_restart_count;
struct fnic *fnic = iport->fnic;
bool retval = true ;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"tport fcid: 0x%x state: %d restart_count: %d" ,
tport->fcid, tport->state, tport->nexus_restart_count);
fcid = tport->fcid;
wwpn = tport->wwpn;
nexus_restart_count = tport->nexus_restart_count;
retval = fdls_delete_tport(iport, tport);
if (retval != true ) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Error deleting tport: 0x%x" , fcid);
return ;
}
if (nexus_restart_count >= FNIC_TPORT_MAX_NEXUS_RESTART) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Exceeded nexus restart retries tport: 0x%x" ,
fcid);
return ;
}
/*
* Allocate memory for the new tport and add it to
* iport's tport list.
* This memory will be freed during tport_offline/linkdown
* or module unload. The pointer new_tport is safe to go
* out of scope when this function returns, since the memory
* it is pointing to is guaranteed to be freed later
* as mentioned above.
*/
new_tport = fdls_create_tport(iport, fcid, wwpn);
if (!new_tport) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Error creating new tport: 0x%x" , fcid);
return ;
}
new_tport->nexus_restart_count = nexus_restart_count + 1;
fdls_send_tgt_plogi(iport, new_tport);
fdls_set_tport_state(new_tport, FDLS_TGT_STATE_PLOGI);
}
struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
uint32_t fcid)
{
struct fnic_tport_s *tport, *next;
list_for_each_entry_safe(tport, next, &(iport->tport_list), links) {
if ((tport->fcid == fcid)
&& !(tport->flags & FNIC_FDLS_TPORT_TERMINATING))
return tport;
}
return NULL;
}
static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport,
uint32_t fcid, uint64_t wwpn)
{
struct fnic_tport_s *tport;
struct fnic *fnic = iport->fnic;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"FDLS create tport: fcid: 0x%x wwpn: 0x%llx" , fcid, wwpn);
tport = kzalloc(sizeof (struct fnic_tport_s), GFP_ATOMIC);
if (!tport) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Memory allocation failure while creating tport: 0x%x\n" ,
fcid);
return NULL;
}
tport->max_payload_size = FNIC_FCOE_MAX_FRAME_SZ;
tport->r_a_tov = FC_DEF_R_A_TOV;
tport->e_d_tov = FC_DEF_E_D_TOV;
tport->fcid = fcid;
tport->wwpn = wwpn;
tport->iport = iport;
FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Need to setup tport timer callback" );
timer_setup(&tport->retry_timer, fdls_tport_timer_callback, 0);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Added tport 0x%x" , tport->fcid);
fdls_set_tport_state(tport, FDLS_TGT_STATE_INIT);
list_add_tail(&tport->links, &iport->tport_list);
atomic_set(&tport->in_flight, 0);
return tport;
}
struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
uint64_t wwpn)
{
struct fnic_tport_s *tport, *next;
list_for_each_entry_safe(tport, next, &(iport->tport_list), links) {
if ((tport->wwpn == wwpn)
&& !(tport->flags & FNIC_FDLS_TPORT_TERMINATING))
return tport;
}
return NULL;
}
static void
fnic_fdmi_attr_set(void *attr_start, u16 type, u16 len,
void *data, u32 *off)
{
u16 size = len + FC_FDMI_ATTR_ENTRY_HEADER_LEN;
struct fc_fdmi_attr_entry *fdmi_attr = (struct fc_fdmi_attr_entry *)
((u8 *)attr_start + *off);
put_unaligned_be16(type, &fdmi_attr->type);
put_unaligned_be16(size, &fdmi_attr->len);
memcpy(fdmi_attr->value, data, len);
*off += size;
}
static void fdls_fdmi_register_hba(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_fdmi_rhba *prhba;
struct fc_fdmi_attr_entry *fdmi_attr;
uint8_t fcid[3];
int err;
struct fnic *fnic = iport->fnic;
struct vnic_devcmd_fw_info *fw_info = NULL;
uint16_t oxid;
u32 attr_off_bytes, len;
u8 data[64];
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send FDMI RHBA" );
return ;
}
prhba = (struct fc_std_fdmi_rhba *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*prhba = (struct fc_std_fdmi_rhba) {
.fchdr = {
.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
.fh_d_id = {0xFF, 0XFF, 0XFA},
.fh_type = FC_TYPE_CT,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)
},
.fc_std_ct_hdr = {
.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT,
.ct_fs_subtype = FC_FDMI_SUBTYPE,
.ct_cmd = cpu_to_be16(FC_FDMI_RHBA)
},
};
hton24(fcid, iport->fcid);
FNIC_STD_SET_S_ID(prhba->fchdr, fcid);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RHBA,
&iport->active_oxid_fdmi_rhba);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send FDMI RHBA" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
return ;
}
FNIC_STD_SET_OX_ID(prhba->fchdr, oxid);
put_unaligned_be64(iport->wwpn, &prhba->rhba.hbaid.id);
put_unaligned_be32(FNIC_FDMI_NUM_PORTS, &prhba->rhba.port.numport);
put_unaligned_be64(iport->wwpn, &prhba->rhba.port.port[0].portname);
put_unaligned_be32(FNIC_FDMI_NUM_HBA_ATTRS,
&prhba->rhba.hba_attrs.numattrs);
fdmi_attr = prhba->rhba.hba_attrs.attr;
attr_off_bytes = 0;
put_unaligned_be64(iport->wwnn, data);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_NODE_NAME,
FNIC_FDMI_NN_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"NN set, off=%d" , attr_off_bytes);
strscpy_pad(data, FNIC_FDMI_MANUFACTURER, FNIC_FDMI_MANU_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MANUFACTURER,
FNIC_FDMI_MANU_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"MFG set <%s>, off=%d" , data, attr_off_bytes);
err = vnic_dev_fw_info(fnic->vdev, &fw_info);
if (!err) {
strscpy_pad(data, fw_info->hw_serial_number,
FNIC_FDMI_SERIAL_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SERIAL_NUMBER,
FNIC_FDMI_SERIAL_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"SERIAL set <%s>, off=%d" , data, attr_off_bytes);
}
if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN)
fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1;
strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN,
data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"MODEL set <%s>, off=%d" , data, attr_off_bytes);
strscpy_pad(data, FNIC_FDMI_MODEL_DESCRIPTION, FNIC_FDMI_MODEL_DES_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL_DES,
FNIC_FDMI_MODEL_DES_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"MODEL_DESC set <%s>, off=%d" , data, attr_off_bytes);
if (!err) {
strscpy_pad(data, fw_info->hw_version, FNIC_FDMI_HW_VER_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HARDWARE_VERSION,
FNIC_FDMI_HW_VER_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"HW_VER set <%s>, off=%d" , data, attr_off_bytes);
}
strscpy_pad(data, DRV_VERSION, FNIC_FDMI_DR_VER_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_DRIVER_VERSION,
FNIC_FDMI_DR_VER_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"DRV_VER set <%s>, off=%d" , data, attr_off_bytes);
strscpy_pad(data, "N/A" , FNIC_FDMI_ROM_VER_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_ROM_VERSION,
FNIC_FDMI_ROM_VER_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"ROM_VER set <%s>, off=%d" , data, attr_off_bytes);
if (!err) {
strscpy_pad(data, fw_info->fw_version, FNIC_FDMI_FW_VER_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FIRMWARE_VERSION,
FNIC_FDMI_FW_VER_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"FW_VER set <%s>, off=%d" , data, attr_off_bytes);
}
len = sizeof (struct fc_std_fdmi_rhba) + attr_off_bytes;
frame_size += len;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send FDMI RHBA with oxid: 0x%x fs: %d" , iport->fcid,
oxid, frame_size);
fnic_send_fcoe_frame(iport, frame, frame_size);
iport->fabric.fdmi_pending |= FDLS_FDMI_REG_HBA_PENDING;
}
static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
{
uint8_t *frame;
struct fc_std_fdmi_rpa *prpa;
struct fc_fdmi_attr_entry *fdmi_attr;
uint8_t fcid[3];
struct fnic *fnic = iport->fnic;
u32 port_speed_bm;
u32 port_speed = vnic_dev_port_speed(fnic->vdev);
uint16_t oxid;
u32 attr_off_bytes, len;
u8 tmp_data[16], data[64];
uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send FDMI RPA" );
return ;
}
prpa = (struct fc_std_fdmi_rpa *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
*prpa = (struct fc_std_fdmi_rpa) {
.fchdr = {
.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
.fh_d_id = {0xFF, 0xFF, 0xFA},
.fh_type = FC_TYPE_CT,
.fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
.fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)
},
.fc_std_ct_hdr = {
.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT,
.ct_fs_subtype = FC_FDMI_SUBTYPE,
.ct_cmd = cpu_to_be16(FC_FDMI_RPA)
},
};
hton24(fcid, iport->fcid);
FNIC_STD_SET_S_ID(prpa->fchdr, fcid);
oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RPA,
&iport->active_oxid_fdmi_rpa);
if (oxid == FNIC_UNASSIGNED_OXID) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: Failed to allocate OXID to send FDMI RPA" ,
iport->fcid);
mempool_free(frame, fnic->frame_pool);
return ;
}
FNIC_STD_SET_OX_ID(prpa->fchdr, oxid);
put_unaligned_be64(iport->wwpn, &prpa->rpa.port.portname);
put_unaligned_be32(FNIC_FDMI_NUM_PORT_ATTRS,
&prpa->rpa.hba_attrs.numattrs);
/* MDS does not support GIGE speed.
* Bit shift standard definitions from scsi_transport_fc.h to
* match FC spec.
*/
switch (port_speed) {
case DCEM_PORTSPEED_10G:
case DCEM_PORTSPEED_20G:
/* There is no bit for 20G */
port_speed_bm = FC_PORTSPEED_10GBIT << PORT_SPEED_BIT_14;
break ;
case DCEM_PORTSPEED_25G:
port_speed_bm = FC_PORTSPEED_25GBIT << PORT_SPEED_BIT_8;
break ;
case DCEM_PORTSPEED_40G:
case DCEM_PORTSPEED_4x10G:
port_speed_bm = FC_PORTSPEED_40GBIT << PORT_SPEED_BIT_9;
break ;
case DCEM_PORTSPEED_100G:
port_speed_bm = FC_PORTSPEED_100GBIT << PORT_SPEED_BIT_8;
break ;
default :
port_speed_bm = FC_PORTSPEED_1GBIT << PORT_SPEED_BIT_15;
break ;
}
attr_off_bytes = 0;
fdmi_attr = prpa->rpa.hba_attrs.attr;
put_unaligned_be64(iport->wwnn, data);
memset(data, 0, FNIC_FDMI_FC4_LEN);
data[2] = 1;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FC4_TYPES,
FNIC_FDMI_FC4_LEN, data, &attr_off_bytes);
put_unaligned_be32(port_speed_bm, data);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SUPPORTED_SPEEDS,
FNIC_FDMI_SUPP_SPEED_LEN, data, &attr_off_bytes);
put_unaligned_be32(port_speed_bm, data);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_CURRENT_SPEED,
FNIC_FDMI_CUR_SPEED_LEN, data, &attr_off_bytes);
put_unaligned_be32(FNIC_FDMI_MFS, data);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MAX_FRAME_SIZE,
FNIC_FDMI_MFS_LEN, data, &attr_off_bytes);
snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d" ,
fnic->host->host_no);
strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME,
FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"OS name set <%s>, off=%d" , data, attr_off_bytes);
sprintf(fc_host_system_hostname(fnic->host), "%s" , utsname()->nodename);
strscpy_pad(data, fc_host_system_hostname(fnic->host),
FNIC_FDMI_HN_LEN);
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME,
FNIC_FDMI_HN_LEN, data, &attr_off_bytes);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Host name set <%s>, off=%d" , data, attr_off_bytes);
len = sizeof (struct fc_std_fdmi_rpa) + attr_off_bytes;
frame_size += len;
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"0x%x: FDLS send FDMI RPA with oxid: 0x%x fs: %d" , iport->fcid,
oxid, frame_size);
fnic_send_fcoe_frame(iport, frame, frame_size);
iport->fabric.fdmi_pending |= FDLS_FDMI_RPA_PENDING;
}
void fdls_fabric_timer_callback(struct timer_list *t)
{
struct fnic_fdls_fabric_s *fabric = timer_container_of(fabric, t,
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=98 H=94 G=95
¤ Dauer der Verarbeitung: 0.10 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland