Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/scsi/ibmvscsi/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 184 kB image not shown  

Quelle  ibmvfc.c   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
 *
 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
 *
 * Copyright (C) IBM Corporation, 2008
 */


#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/stringify.h>
#include <linux/bsg-lib.h>
#include <asm/firmware.h>
#include <asm/irq.h>
#include <asm/vio.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include "ibmvfc.h"

static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
static u64 max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
static u16 max_sectors = IBMVFC_MAX_SECTORS;
static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
static unsigned int mq_enabled = IBMVFC_MQ;
static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;

static LIST_HEAD(ibmvfc_head);
static DEFINE_SPINLOCK(ibmvfc_driver_lock);
static struct scsi_transport_template *ibmvfc_transport_template;

MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
MODULE_AUTHOR("Brian King ");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBMVFC_DRIVER_VERSION);

module_param_named(mq, mq_enabled, uint, S_IRUGO);
MODULE_PARM_DESC(mq, "Enable multiqueue support. "
   "[Default=" __stringify(IBMVFC_MQ) "]");
module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
   "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
   "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
   "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
   "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");

module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
   "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(default_timeout,
   "Default timeout in seconds for initialization and EH commands. "
   "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
   "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
module_param_named(max_sectors, max_sectors, ushort, S_IRUGO);
MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. "
   "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]");
module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
   "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
module_param_named(max_lun, max_lun, ullong, S_IRUGO);
MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
   "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
module_param_named(max_targets, max_targets, uint, S_IRUGO);
MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
   "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
   "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable driver debug information. "
   "[Default=" __stringify(IBMVFC_DEBUG) "]");
module_param_named(log_level, log_level, uint, 0);
MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
   "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
module_param_named(cls3_error, cls3_error, uint, 0);
MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
   "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");

static const struct {
 u16 status;
 u16 error;
 u8 result;
 u8 retry;
 int log;
 char *name;
} cmd_status [] = {
 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },

 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },

 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
 { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
 { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
 { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
 { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
 { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },

 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
 { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
};

static void ibmvfc_npiv_login(struct ibmvfc_host *);
static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
static void ibmvfc_tgt_move_login(struct ibmvfc_target *);

static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);

static const char *unknown_error = "unknown error";

static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
     unsigned long length, unsigned long *cookie,
     unsigned long *irq)
{
 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 long rc;

 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
 *cookie = retbuf[0];
 *irq = retbuf[1];

 return rc;
}

static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
{
 u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);

 return (host_caps & cap_flags) ? 1 : 0;
}

static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
         struct ibmvfc_cmd *vfc_cmd)
{
 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
  return &vfc_cmd->v2.iu;
 else
  return &vfc_cmd->v1.iu;
}

static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
       struct ibmvfc_cmd *vfc_cmd)
{
 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
  return &vfc_cmd->v2.rsp;
 else
  return &vfc_cmd->v1.rsp;
}

#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
 * ibmvfc_trc_start - Log a start trace entry
 * @evt: ibmvfc event struct
 *
 **/

static void ibmvfc_trc_start(struct ibmvfc_event *evt)
{
 struct ibmvfc_host *vhost = evt->vhost;
 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
 struct ibmvfc_trace_entry *entry;
 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;

 entry = &vhost->trace[index];
 entry->evt = evt;
 entry->time = jiffies;
 entry->fmt = evt->crq.format;
 entry->type = IBMVFC_TRC_START;

 switch (entry->fmt) {
 case IBMVFC_CMD_FORMAT:
  entry->op_code = iu->cdb[0];
  entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
  entry->lun = scsilun_to_int(&iu->lun);
  entry->tmf_flags = iu->tmf_flags;
  entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
  break;
 case IBMVFC_MAD_FORMAT:
  entry->op_code = be32_to_cpu(mad->opcode);
  break;
 default:
  break;
 }
}

/**
 * ibmvfc_trc_end - Log an end trace entry
 * @evt: ibmvfc event struct
 *
 **/

static void ibmvfc_trc_end(struct ibmvfc_event *evt)
{
 struct ibmvfc_host *vhost = evt->vhost;
 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
 struct ibmvfc_trace_entry *entry;
 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;

 entry = &vhost->trace[index];
 entry->evt = evt;
 entry->time = jiffies;
 entry->fmt = evt->crq.format;
 entry->type = IBMVFC_TRC_END;

 switch (entry->fmt) {
 case IBMVFC_CMD_FORMAT:
  entry->op_code = iu->cdb[0];
  entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
  entry->lun = scsilun_to_int(&iu->lun);
  entry->tmf_flags = iu->tmf_flags;
  entry->u.end.status = be16_to_cpu(vfc_cmd->status);
  entry->u.end.error = be16_to_cpu(vfc_cmd->error);
  entry->u.end.fcp_rsp_flags = rsp->flags;
  entry->u.end.rsp_code = rsp->data.info.rsp_code;
  entry->u.end.scsi_status = rsp->scsi_status;
  break;
 case IBMVFC_MAD_FORMAT:
  entry->op_code = be32_to_cpu(mad->opcode);
  entry->u.end.status = be16_to_cpu(mad->status);
  break;
 default:
  break;

 }
}

#else
#define ibmvfc_trc_start(evt) do { } while (0)
#define ibmvfc_trc_end(evt) do { } while (0)
#endif

/**
 * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
 * @status: status / error class
 * @error: error
 *
 * Return value:
 * index into cmd_status / -EINVAL on failure
 **/

static int ibmvfc_get_err_index(u16 status, u16 error)
{
 int i;

 for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
  if ((cmd_status[i].status & status) == cmd_status[i].status &&
      cmd_status[i].error == error)
   return i;

 return -EINVAL;
}

/**
 * ibmvfc_get_cmd_error - Find the error description for the fcp response
 * @status: status / error class
 * @error: error
 *
 * Return value:
 * error description string
 **/

static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
{
 int rc = ibmvfc_get_err_index(status, error);
 if (rc >= 0)
  return cmd_status[rc].name;
 return unknown_error;
}

/**
 * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
 * @vhost:      ibmvfc host struct
 * @vfc_cmd: ibmvfc command struct
 *
 * Return value:
 * SCSI result value to return for completed command
 **/

static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
{
 int err;
 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
 int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);

 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
     ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
      rsp->data.info.rsp_code))
  return DID_ERROR << 16;

 err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
 if (err >= 0)
  return rsp->scsi_status | (cmd_status[err].result << 16);
 return rsp->scsi_status | (DID_ERROR << 16);
}

/**
 * ibmvfc_retry_cmd - Determine if error status is retryable
 * @status: status / error class
 * @error: error
 *
 * Return value:
 * 1 if error should be retried / 0 if it should not
 **/

static int ibmvfc_retry_cmd(u16 status, u16 error)
{
 int rc = ibmvfc_get_err_index(status, error);

 if (rc >= 0)
  return cmd_status[rc].retry;
 return 1;
}

static const char *unknown_fc_explain = "unknown fc explain";

static const struct {
 u16 fc_explain;
 char *name;
} ls_explain [] = {
 { 0x00, "no additional explanation" },
 { 0x01, "service parameter error - options" },
 { 0x03, "service parameter error - initiator control" },
 { 0x05, "service parameter error - recipient control" },
 { 0x07, "service parameter error - received data field size" },
 { 0x09, "service parameter error - concurrent seq" },
 { 0x0B, "service parameter error - credit" },
 { 0x0D, "invalid N_Port/F_Port_Name" },
 { 0x0E, "invalid node/Fabric Name" },
 { 0x0F, "invalid common service parameters" },
 { 0x11, "invalid association header" },
 { 0x13, "association header required" },
 { 0x15, "invalid originator S_ID" },
 { 0x17, "invalid OX_ID-RX-ID combination" },
 { 0x19, "command (request) already in progress" },
 { 0x1E, "N_Port Login requested" },
 { 0x1F, "Invalid N_Port_ID" },
};

static const struct {
 u16 fc_explain;
 char *name;
} gs_explain [] = {
 { 0x00, "no additional explanation" },
 { 0x01, "port identifier not registered" },
 { 0x02, "port name not registered" },
 { 0x03, "node name not registered" },
 { 0x04, "class of service not registered" },
 { 0x06, "initial process associator not registered" },
 { 0x07, "FC-4 TYPEs not registered" },
 { 0x08, "symbolic port name not registered" },
 { 0x09, "symbolic node name not registered" },
 { 0x0A, "port type not registered" },
 { 0xF0, "authorization exception" },
 { 0xF1, "authentication exception" },
 { 0xF2, "data base full" },
 { 0xF3, "data base empty" },
 { 0xF4, "processing request" },
 { 0xF5, "unable to verify connection" },
 { 0xF6, "devices not in a common zone" },
};

/**
 * ibmvfc_get_ls_explain - Return the FC Explain description text
 * @status: FC Explain status
 *
 * Returns:
 * error string
 **/

static const char *ibmvfc_get_ls_explain(u16 status)
{
 int i;

 for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
  if (ls_explain[i].fc_explain == status)
   return ls_explain[i].name;

 return unknown_fc_explain;
}

/**
 * ibmvfc_get_gs_explain - Return the FC Explain description text
 * @status: FC Explain status
 *
 * Returns:
 * error string
 **/

static const char *ibmvfc_get_gs_explain(u16 status)
{
 int i;

 for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
  if (gs_explain[i].fc_explain == status)
   return gs_explain[i].name;

 return unknown_fc_explain;
}

static const struct {
 enum ibmvfc_fc_type fc_type;
 char *name;
} fc_type [] = {
 { IBMVFC_FABRIC_REJECT, "fabric reject" },
 { IBMVFC_PORT_REJECT, "port reject" },
 { IBMVFC_LS_REJECT, "ELS reject" },
 { IBMVFC_FABRIC_BUSY, "fabric busy" },
 { IBMVFC_PORT_BUSY, "port busy" },
 { IBMVFC_BASIC_REJECT, "basic reject" },
};

static const char *unknown_fc_type = "unknown fc type";

/**
 * ibmvfc_get_fc_type - Return the FC Type description text
 * @status: FC Type error status
 *
 * Returns:
 * error string
 **/

static const char *ibmvfc_get_fc_type(u16 status)
{
 int i;

 for (i = 0; i < ARRAY_SIZE(fc_type); i++)
  if (fc_type[i].fc_type == status)
   return fc_type[i].name;

 return unknown_fc_type;
}

/**
 * ibmvfc_set_tgt_action - Set the next init action for the target
 * @tgt: ibmvfc target struct
 * @action: action to perform
 *
 * Returns:
 * 0 if action changed / non-zero if not changed
 **/

static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
      enum ibmvfc_target_action action)
{
 int rc = -EINVAL;

 switch (tgt->action) {
 case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
  if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
      action == IBMVFC_TGT_ACTION_DEL_RPORT) {
   tgt->action = action;
   rc = 0;
  }
  break;
 case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
  if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
      action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
   tgt->action = action;
   rc = 0;
  }
  break;
 case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
  if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
   tgt->action = action;
   rc = 0;
  }
  break;
 case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
  if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
   tgt->action = action;
   rc = 0;
  }
  break;
 case IBMVFC_TGT_ACTION_DEL_RPORT:
  if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
   tgt->action = action;
   rc = 0;
  }
  break;
 case IBMVFC_TGT_ACTION_DELETED_RPORT:
  break;
 default:
  tgt->action = action;
  rc = 0;
  break;
 }

 if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
  tgt->add_rport = 0;

 return rc;
}

/**
 * ibmvfc_set_host_state - Set the state for the host
 * @vhost: ibmvfc host struct
 * @state: state to set host to
 *
 * Returns:
 * 0 if state changed / non-zero if not changed
 **/

static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
      enum ibmvfc_host_state state)
{
 int rc = 0;

 switch (vhost->state) {
 case IBMVFC_HOST_OFFLINE:
  rc = -EINVAL;
  break;
 default:
  vhost->state = state;
  break;
 }

 return rc;
}

/**
 * ibmvfc_set_host_action - Set the next init action for the host
 * @vhost: ibmvfc host struct
 * @action: action to perform
 *
 **/

static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
       enum ibmvfc_host_action action)
{
 switch (action) {
 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
  if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
   vhost->action = action;
  break;
 case IBMVFC_HOST_ACTION_LOGO_WAIT:
  if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
   vhost->action = action;
  break;
 case IBMVFC_HOST_ACTION_INIT_WAIT:
  if (vhost->action == IBMVFC_HOST_ACTION_INIT)
   vhost->action = action;
  break;
 case IBMVFC_HOST_ACTION_QUERY:
  switch (vhost->action) {
  case IBMVFC_HOST_ACTION_INIT_WAIT:
  case IBMVFC_HOST_ACTION_NONE:
  case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
   vhost->action = action;
   break;
  default:
   break;
  }
  break;
 case IBMVFC_HOST_ACTION_TGT_INIT:
  if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
   vhost->action = action;
  break;
 case IBMVFC_HOST_ACTION_REENABLE:
 case IBMVFC_HOST_ACTION_RESET:
  vhost->action = action;
  break;
 case IBMVFC_HOST_ACTION_INIT:
 case IBMVFC_HOST_ACTION_TGT_DEL:
 case IBMVFC_HOST_ACTION_LOGO:
 case IBMVFC_HOST_ACTION_QUERY_TGTS:
 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
 case IBMVFC_HOST_ACTION_NONE:
 default:
  switch (vhost->action) {
  case IBMVFC_HOST_ACTION_RESET:
  case IBMVFC_HOST_ACTION_REENABLE:
   break;
  default:
   vhost->action = action;
   break;
  }
  break;
 }
}

/**
 * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
 * @vhost: ibmvfc host struct
 *
 * Return value:
 * nothing
 **/

static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
 if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
     vhost->state == IBMVFC_ACTIVE) {
  if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
   scsi_block_requests(vhost->host);
   ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
  }
 } else
  vhost->reinit = 1;

 wake_up(&vhost->work_wait_q);
}

/**
 * ibmvfc_del_tgt - Schedule cleanup and removal of the target
 * @tgt: ibmvfc target struct
 **/

static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
{
 if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
  tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
  tgt->init_retries = 0;
 }
 wake_up(&tgt->vhost->work_wait_q);
}

/**
 * ibmvfc_link_down - Handle a link down event from the adapter
 * @vhost: ibmvfc host struct
 * @state: ibmvfc host state to enter
 *
 **/

static void ibmvfc_link_down(struct ibmvfc_host *vhost,
        enum ibmvfc_host_state state)
{
 struct ibmvfc_target *tgt;

 ENTER;
 scsi_block_requests(vhost->host);
 list_for_each_entry(tgt, &vhost->targets, queue)
  ibmvfc_del_tgt(tgt);
 ibmvfc_set_host_state(vhost, state);
 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
 vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
 wake_up(&vhost->work_wait_q);
 LEAVE;
}

/**
 * ibmvfc_init_host - Start host initialization
 * @vhost: ibmvfc host struct
 *
 * Return value:
 * nothing
 **/

static void ibmvfc_init_host(struct ibmvfc_host *vhost)
{
 struct ibmvfc_target *tgt;

 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
  if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
   dev_err(vhost->dev,
    "Host initialization retries exceeded. Taking adapter offline\n");
   ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
   return;
  }
 }

 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
  memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
  vhost->async_crq.cur = 0;

  list_for_each_entry(tgt, &vhost->targets, queue) {
   if (vhost->client_migrated)
    tgt->need_login = 1;
   else
    ibmvfc_del_tgt(tgt);
  }

  scsi_block_requests(vhost->host);
  ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
  vhost->job_step = ibmvfc_npiv_login;
  wake_up(&vhost->work_wait_q);
 }
}

/**
 * ibmvfc_send_crq - Send a CRQ
 * @vhost: ibmvfc host struct
 * @word1: the first 64 bits of the data
 * @word2: the second 64 bits of the data
 *
 * Return value:
 * 0 on success / other on failure
 **/

static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
{
 struct vio_dev *vdev = to_vio_dev(vhost->dev);
 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}

static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
          u64 word2, u64 word3, u64 word4)
{
 struct vio_dev *vdev = to_vio_dev(vhost->dev);

 return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
      word1, word2, word3, word4);
}

/**
 * ibmvfc_send_crq_init - Send a CRQ init message
 * @vhost: ibmvfc host struct
 *
 * Return value:
 * 0 on success / other on failure
 **/

static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
{
 ibmvfc_dbg(vhost, "Sending CRQ init\n");
 return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
}

/**
 * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
 * @vhost: ibmvfc host struct
 *
 * Return value:
 * 0 on success / other on failure
 **/

static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
{
 ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
 return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
}

/**
 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
 * @vhost: ibmvfc host who owns the event pool
 * @queue:      ibmvfc queue struct
 *
 * Returns zero on success.
 **/

static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
      struct ibmvfc_queue *queue)
{
 int i;
 struct ibmvfc_event_pool *pool = &queue->evt_pool;

 ENTER;
 if (!queue->total_depth)
  return 0;

 pool->size = queue->total_depth;
 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
 if (!pool->events)
  return -ENOMEM;

 pool->iu_storage = dma_alloc_coherent(vhost->dev,
           pool->size * sizeof(*pool->iu_storage),
           &pool->iu_token, 0);

 if (!pool->iu_storage) {
  kfree(pool->events);
  return -ENOMEM;
 }

 INIT_LIST_HEAD(&queue->sent);
 INIT_LIST_HEAD(&queue->free);
 queue->evt_free = queue->evt_depth;
 queue->reserved_free = queue->reserved_depth;
 spin_lock_init(&queue->l_lock);

 for (i = 0; i < pool->size; ++i) {
  struct ibmvfc_event *evt = &pool->events[i];

  /*
 * evt->active states
 *  1 = in flight
 *  0 = being completed
 * -1 = free/freed
 */

  atomic_set(&evt->active, -1);
  atomic_set(&evt->free, 1);
  evt->crq.valid = 0x80;
  evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
  evt->xfer_iu = pool->iu_storage + i;
  evt->vhost = vhost;
  evt->queue = queue;
  evt->ext_list = NULL;
  list_add_tail(&evt->queue_list, &queue->free);
 }

 LEAVE;
 return 0;
}

/**
 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
 * @vhost: ibmvfc host who owns the event pool
 * @queue:      ibmvfc queue struct
 *
 **/

static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
       struct ibmvfc_queue *queue)
{
 int i;
 struct ibmvfc_event_pool *pool = &queue->evt_pool;

 ENTER;
 for (i = 0; i < pool->size; ++i) {
  list_del(&pool->events[i].queue_list);
  BUG_ON(atomic_read(&pool->events[i].free) != 1);
  if (pool->events[i].ext_list)
   dma_pool_free(vhost->sg_pool,
          pool->events[i].ext_list,
          pool->events[i].ext_list_token);
 }

 kfree(pool->events);
 dma_free_coherent(vhost->dev,
     pool->size * sizeof(*pool->iu_storage),
     pool->iu_storage, pool->iu_token);
 LEAVE;
}

/**
 * ibmvfc_free_queue - Deallocate queue
 * @vhost: ibmvfc host struct
 * @queue: ibmvfc queue struct
 *
 * Unmaps dma and deallocates page for messages
 **/

static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
         struct ibmvfc_queue *queue)
{
 struct device *dev = vhost->dev;

 dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
 free_page((unsigned long)queue->msgs.handle);
 queue->msgs.handle = NULL;

 ibmvfc_free_event_pool(vhost, queue);
}

/**
 * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
 * @vhost: ibmvfc host struct
 *
 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
 * the crq with the hypervisor.
 **/

static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
{
 long rc = 0;
 struct vio_dev *vdev = to_vio_dev(vhost->dev);
 struct ibmvfc_queue *crq = &vhost->crq;

 ibmvfc_dbg(vhost, "Releasing CRQ\n");
 free_irq(vdev->irq, vhost);
 tasklet_kill(&vhost->tasklet);
 do {
  if (rc)
   msleep(100);
  rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));

 vhost->state = IBMVFC_NO_CRQ;
 vhost->logged_in = 0;

 ibmvfc_free_queue(vhost, crq);
}

/**
 * ibmvfc_reenable_crq_queue - reenables the CRQ
 * @vhost: ibmvfc host struct
 *
 * Return value:
 * 0 on success / other on failure
 **/

static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
{
 int rc = 0;
 struct vio_dev *vdev = to_vio_dev(vhost->dev);
 unsigned long flags;

 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);

 /* Re-enable the CRQ */
 do {
  if (rc)
   msleep(100);
  rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));

 if (rc)
  dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);

 spin_lock_irqsave(vhost->host->host_lock, flags);
 spin_lock(vhost->crq.q_lock);
 vhost->do_enquiry = 1;
 vhost->using_channels = 0;
 spin_unlock(vhost->crq.q_lock);
 spin_unlock_irqrestore(vhost->host->host_lock, flags);

 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);

 return rc;
}

/**
 * ibmvfc_reset_crq - resets a crq after a failure
 * @vhost: ibmvfc host struct
 *
 * Return value:
 * 0 on success / other on failure
 **/

static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
{
 int rc = 0;
 unsigned long flags;
 struct vio_dev *vdev = to_vio_dev(vhost->dev);
 struct ibmvfc_queue *crq = &vhost->crq;

 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);

 /* Close the CRQ */
 do {
  if (rc)
   msleep(100);
  rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));

 spin_lock_irqsave(vhost->host->host_lock, flags);
 spin_lock(vhost->crq.q_lock);
 vhost->state = IBMVFC_NO_CRQ;
 vhost->logged_in = 0;
 vhost->do_enquiry = 1;
 vhost->using_channels = 0;

 /* Clean out the queue */
 memset(crq->msgs.crq, 0, PAGE_SIZE);
 crq->cur = 0;

 /* And re-open it again */
 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
    crq->msg_token, PAGE_SIZE);

 if (rc == H_CLOSED)
  /* Adapter is good, but other end is not ready */
  dev_warn(vhost->dev, "Partner adapter not ready\n");
 else if (rc != 0)
  dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);

 spin_unlock(vhost->crq.q_lock);
 spin_unlock_irqrestore(vhost->host->host_lock, flags);

 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);

 return rc;
}

/**
 * ibmvfc_valid_event - Determines if event is valid.
 * @pool: event_pool that contains the event
 * @evt: ibmvfc event to be checked for validity
 *
 * Return value:
 * 1 if event is valid / 0 if event is not valid
 **/

static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
         struct ibmvfc_event *evt)
{
 int index = evt - pool->events;
 if (index < 0 || index >= pool->size) /* outside of bounds */
  return 0;
 if (evt != pool->events + index) /* unaligned */
  return 0;
 return 1;
}

/**
 * ibmvfc_free_event - Free the specified event
 * @evt: ibmvfc_event to be freed
 *
 **/

static void ibmvfc_free_event(struct ibmvfc_event *evt)
{
 struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
 unsigned long flags;

 BUG_ON(!ibmvfc_valid_event(pool, evt));
 BUG_ON(atomic_inc_return(&evt->free) != 1);
 BUG_ON(atomic_dec_and_test(&evt->active));

 spin_lock_irqsave(&evt->queue->l_lock, flags);
 list_add_tail(&evt->queue_list, &evt->queue->free);
 if (evt->reserved) {
  evt->reserved = 0;
  evt->queue->reserved_free++;
 } else {
  evt->queue->evt_free++;
 }
 if (evt->eh_comp)
  complete(evt->eh_comp);
 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
}

/**
 * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
 * @evt: ibmvfc event struct
 *
 * This function does not setup any error status, that must be done
 * before this function gets called.
 **/

static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
{
 struct scsi_cmnd *cmnd = evt->cmnd;

 if (cmnd) {
  scsi_dma_unmap(cmnd);
  scsi_done(cmnd);
 }

 ibmvfc_free_event(evt);
}

/**
 * ibmvfc_complete_purge - Complete failed command list
 * @purge_list: list head of failed commands
 *
 * This function runs completions on commands to fail as a result of a
 * host reset or platform migration.
 **/

static void ibmvfc_complete_purge(struct list_head *purge_list)
{
 struct ibmvfc_event *evt, *pos;

 list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
  list_del(&evt->queue_list);
  ibmvfc_trc_end(evt);
  evt->done(evt);
 }
}

/**
 * ibmvfc_fail_request - Fail request with specified error code
 * @evt: ibmvfc event struct
 * @error_code: error code to fail request with
 *
 * Return value:
 * none
 **/

static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
{
 /*
 * Anything we are failing should still be active. Otherwise, it
 * implies we already got a response for the command and are doing
 * something bad like double completing it.
 */

 BUG_ON(!atomic_dec_and_test(&evt->active));
 if (evt->cmnd) {
  evt->cmnd->result = (error_code << 16);
  evt->done = ibmvfc_scsi_eh_done;
 } else
  evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);

 timer_delete(&evt->timer);
}

/**
 * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
 * @vhost: ibmvfc host struct
 * @error_code: error code to fail requests with
 *
 * Return value:
 * none
 **/

static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
{
 struct ibmvfc_event *evt, *pos;
 struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
 unsigned long flags;
 int hwqs = 0;
 int i;

 if (vhost->using_channels)
  hwqs = vhost->scsi_scrqs.active_queues;

 ibmvfc_dbg(vhost, "Purging all requests\n");
 spin_lock_irqsave(&vhost->crq.l_lock, flags);
 list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
  ibmvfc_fail_request(evt, error_code);
 list_splice_init(&vhost->crq.sent, &vhost->purge);
 spin_unlock_irqrestore(&vhost->crq.l_lock, flags);

 for (i = 0; i < hwqs; i++) {
  spin_lock_irqsave(queues[i].q_lock, flags);
  spin_lock(&queues[i].l_lock);
  list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
   ibmvfc_fail_request(evt, error_code);
  list_splice_init(&queues[i].sent, &vhost->purge);
  spin_unlock(&queues[i].l_lock);
  spin_unlock_irqrestore(queues[i].q_lock, flags);
 }
}

/**
 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
 * @vhost: struct ibmvfc host to reset
 **/

static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
{
 ibmvfc_purge_requests(vhost, DID_ERROR);
 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
}

/**
 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
 * @vhost: struct ibmvfc host to reset
 **/

static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
{
 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
     !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
  scsi_block_requests(vhost->host);
  ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
  vhost->job_step = ibmvfc_npiv_logout;
  wake_up(&vhost->work_wait_q);
 } else
  ibmvfc_hard_reset_host(vhost);
}

/**
 * ibmvfc_reset_host - Reset the connection to the server
 * @vhost: ibmvfc host struct
 **/

static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
{
 unsigned long flags;

 spin_lock_irqsave(vhost->host->host_lock, flags);
 __ibmvfc_reset_host(vhost);
 spin_unlock_irqrestore(vhost->host->host_lock, flags);
}

/**
 * ibmvfc_retry_host_init - Retry host initialization if allowed
 * @vhost: ibmvfc host struct
 *
 * Returns: 1 if init will be retried / 0 if not
 *
 **/

static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
{
 int retry = 0;

 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
  vhost->delay_init = 1;
  if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
   dev_err(vhost->dev,
    "Host initialization retries exceeded. Taking adapter offline\n");
   ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
  } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
   __ibmvfc_reset_host(vhost);
  else {
   ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
   retry = 1;
  }
 }

 wake_up(&vhost->work_wait_q);
 return retry;
}

/**
 * __ibmvfc_get_target - Find the specified scsi_target (no locking)
 * @starget: scsi target struct
 *
 * Return value:
 * ibmvfc_target struct / NULL if not found
 **/

static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
{
 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 struct ibmvfc_host *vhost = shost_priv(shost);
 struct ibmvfc_target *tgt;

 list_for_each_entry(tgt, &vhost->targets, queue)
  if (tgt->target_id == starget->id) {
   kref_get(&tgt->kref);
   return tgt;
  }
 return NULL;
}

/**
 * ibmvfc_get_target - Find the specified scsi_target
 * @starget: scsi target struct
 *
 * Return value:
 * ibmvfc_target struct / NULL if not found
 **/

static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
{
 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 struct ibmvfc_target *tgt;
 unsigned long flags;

 spin_lock_irqsave(shost->host_lock, flags);
 tgt = __ibmvfc_get_target(starget);
 spin_unlock_irqrestore(shost->host_lock, flags);
 return tgt;
}

/**
 * ibmvfc_get_host_speed - Get host port speed
 * @shost: scsi host struct
 *
 * Return value:
 *  none
 **/

static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
{
 struct ibmvfc_host *vhost = shost_priv(shost);
 unsigned long flags;

 spin_lock_irqsave(shost->host_lock, flags);
 if (vhost->state == IBMVFC_ACTIVE) {
  switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
  case 1:
   fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
   break;
  case 2:
   fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
   break;
  case 4:
   fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
   break;
  case 8:
   fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
   break;
  case 10:
   fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
   break;
  case 16:
   fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
   break;
  default:
   ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
       be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
   fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
   break;
  }
 } else
  fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 spin_unlock_irqrestore(shost->host_lock, flags);
}

/**
 * ibmvfc_get_host_port_state - Get host port state
 * @shost: scsi host struct
 *
 * Return value:
 *  none
 **/

static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
{
 struct ibmvfc_host *vhost = shost_priv(shost);
 unsigned long flags;

 spin_lock_irqsave(shost->host_lock, flags);
 switch (vhost->state) {
 case IBMVFC_INITIALIZING:
 case IBMVFC_ACTIVE:
  fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
  break;
 case IBMVFC_LINK_DOWN:
  fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
  break;
 case IBMVFC_LINK_DEAD:
 case IBMVFC_HOST_OFFLINE:
  fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
  break;
 case IBMVFC_HALTED:
  fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
  break;
 case IBMVFC_NO_CRQ:
  fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
  break;
 default:
  ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
  fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
  break;
 }
 spin_unlock_irqrestore(shost->host_lock, flags);
}

/**
 * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
 * @rport: rport struct
 * @timeout: timeout value
 *
 * Return value:
 *  none
 **/

static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
{
 if (timeout)
  rport->dev_loss_tmo = timeout;
 else
  rport->dev_loss_tmo = 1;
}

/**
 * ibmvfc_release_tgt - Free memory allocated for a target
 * @kref: kref struct
 *
 **/

static void ibmvfc_release_tgt(struct kref *kref)
{
 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
 kfree(tgt);
}

/**
 * ibmvfc_get_starget_node_name - Get SCSI target's node name
 * @starget: scsi target struct
 *
 * Return value:
 *  none
 **/

static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
{
 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
 if (tgt)
  kref_put(&tgt->kref, ibmvfc_release_tgt);
}

/**
 * ibmvfc_get_starget_port_name - Get SCSI target's port name
 * @starget: scsi target struct
 *
 * Return value:
 *  none
 **/

static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
{
 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
 if (tgt)
  kref_put(&tgt->kref, ibmvfc_release_tgt);
}

/**
 * ibmvfc_get_starget_port_id - Get SCSI target's port ID
 * @starget: scsi target struct
 *
 * Return value:
 *  none
 **/

static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
{
 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
 if (tgt)
  kref_put(&tgt->kref, ibmvfc_release_tgt);
}

/**
 * ibmvfc_wait_while_resetting - Wait while the host resets
 * @vhost: ibmvfc host struct
 *
 * Return value:
 *  0 on success / other on failure
 **/

static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
{
 long timeout = wait_event_timeout(vhost->init_wait_q,
       ((vhost->state == IBMVFC_ACTIVE ||
         vhost->state == IBMVFC_HOST_OFFLINE ||
         vhost->state == IBMVFC_LINK_DEAD) &&
        vhost->action == IBMVFC_HOST_ACTION_NONE),
       (init_timeout * HZ));

 return timeout ? 0 : -EIO;
}

/**
 * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
 * @shost: scsi host struct
 *
 * Return value:
 *  0 on success / other on failure
 **/

static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
{
 struct ibmvfc_host *vhost = shost_priv(shost);

 dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
 ibmvfc_reset_host(vhost);
 return ibmvfc_wait_while_resetting(vhost);
}

/**
 * ibmvfc_gather_partition_info - Gather info about the LPAR
 * @vhost:      ibmvfc host struct
 *
 * Return value:
 * none
 **/

static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
{
 struct device_node *rootdn;
 const char *name;
 const unsigned int *num;

 rootdn = of_find_node_by_path("/");
 if (!rootdn)
  return;

 name = of_get_property(rootdn, "ibm,partition-name", NULL);
 if (name)
  strscpy(vhost->partition_name, name, sizeof(vhost->partition_name));
 num = of_get_property(rootdn, "ibm,partition-no", NULL);
 if (num)
  vhost->partition_number = *num;
 of_node_put(rootdn);
}

/**
 * ibmvfc_set_login_info - Setup info for NPIV login
 * @vhost: ibmvfc host struct
 *
 * Return value:
 * none
 **/

static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
{
 struct ibmvfc_npiv_login *login_info = &vhost->login_info;
 struct ibmvfc_queue *async_crq = &vhost->async_crq;
 struct device_node *of_node = vhost->dev->of_node;
 const char *location;
 u16 max_cmds;

 max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
 if (mq_enabled)
  max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
   vhost->scsi_scrqs.desired_queues;

 memset(login_info, 0, sizeof(*login_info));

 login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
 login_info->max_dma_len = cpu_to_be64(max_sectors << 9);
 login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
 login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
 login_info->partition_num = cpu_to_be32(vhost->partition_number);
 login_info->vfc_frame_version = cpu_to_be32(1);
 login_info->fcp_version = cpu_to_be16(3);
 login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
 if (vhost->client_migrated)
  login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);

 login_info->max_cmds = cpu_to_be32(max_cmds);
 login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);

 if (vhost->mq_enabled || vhost->using_channels)
  login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);

 login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
 login_info->async.len = cpu_to_be32(async_crq->size *
         sizeof(*async_crq->msgs.async));
 strscpy(login_info->partition_name, vhost->partition_name,
  sizeof(login_info->partition_name));

 strscpy(login_info->device_name,
  dev_name(&vhost->host->shost_gendev), sizeof(login_info->device_name));

 location = of_get_property(of_node, "ibm,loc-code", NULL);
 location = location ? location : dev_name(vhost->dev);
 strscpy(login_info->drc_name, location, sizeof(login_info->drc_name));
}

/**
 * __ibmvfc_get_event - Gets the next free event in pool
 * @queue:      ibmvfc queue struct
 * @reserved: event is for a reserved management command
 *
 * Returns a free event from the pool.
 **/

static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
{
 struct ibmvfc_event *evt = NULL;
 unsigned long flags;

 spin_lock_irqsave(&queue->l_lock, flags);
 if (reserved && queue->reserved_free) {
  evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
  evt->reserved = 1;
  queue->reserved_free--;
 } else if (queue->evt_free) {
  evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
  queue->evt_free--;
 } else {
  goto out;
 }

 atomic_set(&evt->free, 0);
 list_del(&evt->queue_list);
out:
 spin_unlock_irqrestore(&queue->l_lock, flags);
 return evt;
}

#define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
#define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)

/**
 * ibmvfc_locked_done - Calls evt completion with host_lock held
 * @evt: ibmvfc evt to complete
 *
 * All non-scsi command completion callbacks have the expectation that the
 * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
 * MAD evt with the host_lock.
 **/

static void ibmvfc_locked_done(struct ibmvfc_event *evt)
{
 unsigned long flags;

 spin_lock_irqsave(evt->vhost->host->host_lock, flags);
 evt->_done(evt);
 spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
}

/**
 * ibmvfc_init_event - Initialize fields in an event struct that are always
 * required.
 * @evt: The event
 * @done: Routine to call when the event is responded to
 * @format: SRP or MAD format
 **/

static void ibmvfc_init_event(struct ibmvfc_event *evt,
         void (*done) (struct ibmvfc_event *), u8 format)
{
 evt->cmnd = NULL;
 evt->sync_iu = NULL;
 evt->eh_comp = NULL;
 evt->crq.format = format;
 if (format == IBMVFC_CMD_FORMAT)
  evt->done = done;
 else {
  evt->_done = done;
  evt->done = ibmvfc_locked_done;
 }
 evt->hwq = 0;
}

/**
 * ibmvfc_map_sg_list - Initialize scatterlist
 * @scmd: scsi command struct
 * @nseg: number of scatterlist segments
 * @md: memory descriptor list to initialize
 **/

static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
          struct srp_direct_buf *md)
{
 int i;
 struct scatterlist *sg;

 scsi_for_each_sg(scmd, sg, nseg, i) {
  md[i].va = cpu_to_be64(sg_dma_address(sg));
  md[i].len = cpu_to_be32(sg_dma_len(sg));
  md[i].key = 0;
 }
}

/**
 * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
 * @scmd: struct scsi_cmnd with the scatterlist
 * @evt: ibmvfc event struct
 * @vfc_cmd: vfc_cmd that contains the memory descriptor
 * @dev: device for which to map dma memory
 *
 * Returns:
 * 0 on success / non-zero on failure
 **/

static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
         struct ibmvfc_event *evt,
         struct ibmvfc_cmd *vfc_cmd, struct device *dev)
{

 int sg_mapped;
 struct srp_direct_buf *data = &vfc_cmd->ioba;
 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);

 if (cls3_error)
  vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);

 sg_mapped = scsi_dma_map(scmd);
 if (!sg_mapped) {
  vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
  return 0;
 } else if (unlikely(sg_mapped < 0)) {
  if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
   scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
  return sg_mapped;
 }

 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
  vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
  iu->add_cdb_len |= IBMVFC_WRDATA;
 } else {
  vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
  iu->add_cdb_len |= IBMVFC_RDDATA;
 }

 if (sg_mapped == 1) {
  ibmvfc_map_sg_list(scmd, sg_mapped, data);
  return 0;
 }

 vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);

 if (!evt->ext_list) {
  evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
            &evt->ext_list_token);

  if (!evt->ext_list) {
   scsi_dma_unmap(scmd);
   if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
    scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
   return -ENOMEM;
  }
 }

 ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);

 data->va = cpu_to_be64(evt->ext_list_token);
 data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
 data->key = 0;
 return 0;
}

/**
 * ibmvfc_timeout - Internal command timeout handler
 * @t: struct ibmvfc_event that timed out
 *
 * Called when an internally generated command times out
 **/

static void ibmvfc_timeout(struct timer_list *t)
{
 struct ibmvfc_event *evt = timer_container_of(evt, t, timer);
 struct ibmvfc_host *vhost = evt->vhost;
 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
 ibmvfc_reset_host(vhost);
}

/**
 * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
 * @evt: event to be sent
 * @vhost: ibmvfc host struct
 * @timeout: timeout in seconds - 0 means do not time command
 *
 * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
 **/

static int ibmvfc_send_event(struct ibmvfc_event *evt,
        struct ibmvfc_host *vhost, unsigned long timeout)
{
 __be64 *crq_as_u64 = (__be64 *) &evt->crq;
 unsigned long flags;
 int rc;

 /* Copy the IU into the transfer area */
 *evt->xfer_iu = evt->iu;
 if (evt->crq.format == IBMVFC_CMD_FORMAT)
  evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
  evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
 else
  BUG();

 timer_setup(&evt->timer, ibmvfc_timeout, 0);

 if (timeout) {
  evt->timer.expires = jiffies + (timeout * HZ);
  add_timer(&evt->timer);
 }

 spin_lock_irqsave(&evt->queue->l_lock, flags);
 list_add_tail(&evt->queue_list, &evt->queue->sent);
 atomic_set(&evt->active, 1);

 mb();

 if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
  rc = ibmvfc_send_sub_crq(vhost,
      evt->queue->vios_cookie,
      be64_to_cpu(crq_as_u64[0]),
      be64_to_cpu(crq_as_u64[1]),
      0, 0);
 else
  rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
         be64_to_cpu(crq_as_u64[1]));

 if (rc) {
  atomic_set(&evt->active, 0);
  list_del(&evt->queue_list);
  spin_unlock_irqrestore(&evt->queue->l_lock, flags);
  timer_delete(&evt->timer);

  /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
 * Firmware will send a CRQ with a transport event (0xFF) to
 * tell this client what has happened to the transport. This
 * will be handled in ibmvfc_handle_crq()
 */

  if (rc == H_CLOSED) {
   if (printk_ratelimit())
    dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
   if (evt->cmnd)
    scsi_dma_unmap(evt->cmnd);
   ibmvfc_free_event(evt);
   return SCSI_MLQUEUE_HOST_BUSY;
  }

  dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
  if (evt->cmnd) {
   evt->cmnd->result = DID_ERROR << 16;
   evt->done = ibmvfc_scsi_eh_done;
  } else
   evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);

  evt->done(evt);
 } else {
  spin_unlock_irqrestore(&evt->queue->l_lock, flags);
  ibmvfc_trc_start(evt);
 }

 return 0;
}

/**
 * ibmvfc_log_error - Log an error for the failed command if appropriate
 * @evt: ibmvfc event to log
 *
 **/

static void ibmvfc_log_error(struct ibmvfc_event *evt)
{
 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
 struct ibmvfc_host *vhost = evt->vhost;
 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
 struct scsi_cmnd *cmnd = evt->cmnd;
 const char *err = unknown_error;
 int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
 int logerr = 0;
 int rsp_code = 0;

 if (index >= 0) {
  logerr = cmd_status[index].log;
  err = cmd_status[index].name;
 }

 if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
  return;

 if (rsp->flags & FCP_RSP_LEN_VALID)
  rsp_code = rsp->data.info.rsp_code;

 scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
      "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
      cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
      rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
}

/**
 * ibmvfc_relogin - Log back into the specified device
 * @sdev: scsi device struct
 *
 **/

static void ibmvfc_relogin(struct scsi_device *sdev)
{
 struct ibmvfc_host *vhost = shost_priv(sdev->host);
 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
 struct ibmvfc_target *tgt;
 unsigned long flags;

 spin_lock_irqsave(vhost->host->host_lock, flags);
 list_for_each_entry(tgt, &vhost->targets, queue) {
  if (rport == tgt->rport) {
   ibmvfc_del_tgt(tgt);
   break;
  }
 }

 ibmvfc_reinit_host(vhost);
 spin_unlock_irqrestore(vhost->host->host_lock, flags);
}

/**
 * ibmvfc_scsi_done - Handle responses from commands
 * @evt: ibmvfc event to be handled
 *
 * Used as a callback when sending scsi cmds.
 **/

static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
{
 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
 struct scsi_cmnd *cmnd = evt->cmnd;
 u32 rsp_len = 0;
 u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);

 if (cmnd) {
  if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
   scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
  else if (rsp->flags & FCP_RESID_UNDER)
   scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
  else
   scsi_set_resid(cmnd, 0);

  if (vfc_cmd->status) {
   cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);

   if (rsp->flags & FCP_RSP_LEN_VALID)
    rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
   if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
    sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
   if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
    memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
   if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
       (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
    ibmvfc_relogin(cmnd->device);

   if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
    cmnd->result = (DID_ERROR << 16);

   ibmvfc_log_error(evt);
  }

  if (!cmnd->result &&
      (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
   cmnd->result = (DID_ERROR << 16);

  scsi_dma_unmap(cmnd);
  scsi_done(cmnd);
 }

 ibmvfc_free_event(evt);
}

/**
 * ibmvfc_host_chkready - Check if the host can accept commands
 * @vhost:  struct ibmvfc host
 *
 * Returns:
 * 1 if host can accept command / 0 if not
 **/

static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
{
 int result = 0;

 switch (vhost->state) {
 case IBMVFC_LINK_DEAD:
 case IBMVFC_HOST_OFFLINE:
  result = DID_NO_CONNECT << 16;
  break;
 case IBMVFC_NO_CRQ:
 case IBMVFC_INITIALIZING:
 case IBMVFC_HALTED:
 case IBMVFC_LINK_DOWN:
  result = DID_REQUEUE << 16;
  break;
 case IBMVFC_ACTIVE:
  result = 0;
  break;
 }

 return result;
}

static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
{
 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
 struct ibmvfc_host *vhost = evt->vhost;
 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
 size_t offset;

 memset(vfc_cmd, 0, sizeof(*vfc_cmd));
 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
  offset = offsetof(struct ibmvfc_cmd, v2.rsp);
  vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
 } else
  offset = offsetof(struct ibmvfc_cmd, v1.rsp);
 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
 vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
 vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
 vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
 vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
 vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
 vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
 int_to_scsilun(sdev->lun, &iu->lun);

 return vfc_cmd;
}

/**
 * ibmvfc_queuecommand - The queuecommand function of the scsi template
 * @shost: scsi host struct
 * @cmnd: struct scsi_cmnd to be executed
 *
 * Returns:
 * 0 on success / other on failure
 **/

static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
 struct ibmvfc_host *vhost = shost_priv(shost);
 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 struct ibmvfc_cmd *vfc_cmd;
 struct ibmvfc_fcp_cmd_iu *iu;
 struct ibmvfc_event *evt;
 u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
 u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
 u16 scsi_channel;
 int rc;

 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
     unlikely((rc = ibmvfc_host_chkready(vhost)))) {
  cmnd->result = rc;
  scsi_done(cmnd);
  return 0;
 }

 cmnd->result = (DID_OK << 16);
 if (vhost->using_channels) {
  scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
  evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
  if (!evt)
   return SCSI_MLQUEUE_HOST_BUSY;

  evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
 } else {
  evt = ibmvfc_get_event(&vhost->crq);
  if (!evt)
   return SCSI_MLQUEUE_HOST_BUSY;
 }

 ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
 evt->cmnd = cmnd;

 vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
 iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);

 iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);

 if (cmnd->flags & SCMD_TAGGED) {
  vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
  iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
 }

 vfc_cmd->correlation = cpu_to_be64((u64)evt);

 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
  return ibmvfc_send_event(evt, vhost, 0);

 ibmvfc_free_event(evt);
 if (rc == -ENOMEM)
  return SCSI_MLQUEUE_HOST_BUSY;

 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
  scmd_printk(KERN_ERR, cmnd,
       "Failed to map DMA buffer for command. rc=%d\n", rc);

 cmnd->result = DID_ERROR << 16;
 scsi_done(cmnd);
 return 0;
}

/**
 * ibmvfc_sync_completion - Signal that a synchronous command has completed
 * @evt: ibmvfc event struct
 *
 **/

static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
{
 /* copy the response back */
 if (evt->sync_iu)
  *evt->sync_iu = *evt->xfer_iu;

 complete(&evt->comp);
}

/**
 * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
 * @evt: struct ibmvfc_event
 *
 **/

static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
{
 struct ibmvfc_host *vhost = evt->vhost;

 ibmvfc_free_event(evt);
 vhost->aborting_passthru = 0;
 dev_info(vhost->dev, "Passthru command cancelled\n");
}

/**
 * ibmvfc_bsg_timeout - Handle a BSG timeout
 * @job: struct bsg_job that timed out
 *
 * Returns:
 * 0 on success / other on failure
 **/

static int ibmvfc_bsg_timeout(struct bsg_job *job)
{
 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
 unsigned long port_id = (unsigned long)job->dd_data;
 struct ibmvfc_event *evt;
 struct ibmvfc_tmf *tmf;
 unsigned long flags;
 int rc;

 ENTER;
 spin_lock_irqsave(vhost->host->host_lock, flags);
 if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
  __ibmvfc_reset_host(vhost);
  spin_unlock_irqrestore(vhost->host->host_lock, flags);
  return 0;
 }

 vhost->aborting_passthru = 1;
 evt = ibmvfc_get_reserved_event(&vhost->crq);
 if (!evt) {
  spin_unlock_irqrestore(vhost->host->host_lock, flags);
  return -ENOMEM;
 }

 ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);

 tmf = &evt->iu.tmf;
 memset(tmf, 0, sizeof(*tmf));
 tmf->common.version = cpu_to_be32(1);
 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
 tmf->common.length = cpu_to_be16(sizeof(*tmf));
 tmf->scsi_id = cpu_to_be64(port_id);
 tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
 tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
 rc = ibmvfc_send_event(evt, vhost, default_timeout);

 if (rc != 0) {
  vhost->aborting_passthru = 0;
  dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
  rc = -EIO;
 } else
  dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
    port_id);

 spin_unlock_irqrestore(vhost->host->host_lock, flags);

 LEAVE;
 return rc;
}

/**
 * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
 * @vhost: struct ibmvfc_host to send command
 * @port_id: port ID to send command
 *
 * Returns:
 * 0 on success / other on failure
 **/

static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
{
 struct ibmvfc_port_login *plogi;
 struct ibmvfc_target *tgt;
 struct ibmvfc_event *evt;
 union ibmvfc_iu rsp_iu;
 unsigned long flags;
 int rc = 0, issue_login = 1;

 ENTER;
 spin_lock_irqsave(vhost->host->host_lock, flags);
 list_for_each_entry(tgt, &vhost->targets, queue) {
  if (tgt->scsi_id == port_id) {
   issue_login = 0;
   break;
  }
 }

 if (!issue_login)
  goto unlock_out;
 if (unlikely((rc = ibmvfc_host_chkready(vhost))))
  goto unlock_out;

 evt = ibmvfc_get_reserved_event(&vhost->crq);
 if (!evt) {
  rc = -ENOMEM;
  goto unlock_out;
 }
 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 plogi = &evt->iu.plogi;
 memset(plogi, 0, sizeof(*plogi));
 plogi->common.version = cpu_to_be32(1);
 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
 plogi->common.length = cpu_to_be16(sizeof(*plogi));
 plogi->scsi_id = cpu_to_be64(port_id);
 evt->sync_iu = &rsp_iu;
 init_completion(&evt->comp);

 rc = ibmvfc_send_event(evt, vhost, default_timeout);
 spin_unlock_irqrestore(vhost->host->host_lock, flags);

 if (rc)
  return -EIO;

 wait_for_completion(&evt->comp);

 if (rsp_iu.plogi.common.status)
  rc = -EIO;

 spin_lock_irqsave(vhost->host->host_lock, flags);
 ibmvfc_free_event(evt);
unlock_out:
 spin_unlock_irqrestore(vhost->host->host_lock, flags);
 LEAVE;
 return rc;
}

/**
 * ibmvfc_bsg_request - Handle a BSG request
 * @job: struct bsg_job to be executed
 *
 * Returns:
 * 0 on success / other on failure
 **/

static int ibmvfc_bsg_request(struct bsg_job *job)
{
 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
 struct fc_rport *rport = fc_bsg_to_rport(job);
 struct ibmvfc_passthru_mad *mad;
 struct ibmvfc_event *evt;
 union ibmvfc_iu rsp_iu;
 unsigned long flags, port_id = -1;
 struct fc_bsg_request *bsg_request = job->request;
 struct fc_bsg_reply *bsg_reply = job->reply;
 unsigned int code = bsg_request->msgcode;
 int rc = 0, req_seg, rsp_seg, issue_login = 0;
 u32 fc_flags, rsp_len;

 ENTER;
 bsg_reply->reply_payload_rcv_len = 0;
 if (rport)
  port_id = rport->port_id;

 switch (code) {
 case FC_BSG_HST_ELS_NOLOGIN:
  port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
   (bsg_request->rqst_data.h_els.port_id[1] << 8) |
   bsg_request->rqst_data.h_els.port_id[2];
  fallthrough;
 case FC_BSG_RPT_ELS:
  fc_flags = IBMVFC_FC_ELS;
  break;
 case FC_BSG_HST_CT:
  issue_login = 1;
  port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
   (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
   bsg_request->rqst_data.h_ct.port_id[2];
  fallthrough;
 case FC_BSG_RPT_CT:
  fc_flags = IBMVFC_FC_CT_IU;
  break;
 default:
  return -ENOTSUPP;
 }

 if (port_id == -1)
  return -EINVAL;
 if (!mutex_trylock(&vhost->passthru_mutex))
  return -EBUSY;

 job->dd_data = (void *)port_id;
 req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
        job->request_payload.sg_cnt, DMA_TO_DEVICE);

 if (!req_seg) {
  mutex_unlock(&vhost->passthru_mutex);
  return -ENOMEM;
 }

 rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
        job->reply_payload.sg_cnt, DMA_FROM_DEVICE);

 if (!rsp_seg) {
  dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
        job->request_payload.sg_cnt, DMA_TO_DEVICE);
  mutex_unlock(&vhost->passthru_mutex);
  return -ENOMEM;
 }

 if (req_seg > 1 || rsp_seg > 1) {
  rc = -EINVAL;
  goto out;
 }

 if (issue_login)
  rc = ibmvfc_bsg_plogi(vhost, port_id);

 spin_lock_irqsave(vhost->host->host_lock, flags);

 if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
     unlikely((rc = ibmvfc_host_chkready(vhost)))) {
  spin_unlock_irqrestore(vhost->host->host_lock, flags);
  goto out;
 }

 evt = ibmvfc_get_reserved_event(&vhost->crq);
 if (!evt) {
  spin_unlock_irqrestore(vhost->host->host_lock, flags);
  rc = -ENOMEM;
  goto out;
 }
 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 mad = &evt->iu.passthru;

 memset(mad, 0, sizeof(*mad));
 mad->common.version = cpu_to_be32(1);
 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));

 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
  offsetof(struct ibmvfc_passthru_mad, iu));
 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));

 mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
 mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
 mad->iu.flags = cpu_to_be32(fc_flags);
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=97 H=94 G=95

¤ Dauer der Verarbeitung: 0.21 Sekunden  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.