Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/scsi/smartpqi/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 307 kB image not shown  

Quelle  smartpqi_init.c   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0
/*
 *    driver for Microchip PQI-based storage controllers
 *    Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
 *    Copyright (c) 2016-2018 Microsemi Corporation
 *    Copyright (c) 2016 PMC-Sierra, Inc.
 *
 *    Questions/Comments/Bugfixes to storagedev@microchip.com
 *
 */


#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
#include <linux/crash_dump.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport_sas.h>
#include <linux/unaligned.h>
#include "smartpqi.h"
#include "smartpqi_sis.h"

#if !defined(BUILD_TIMESTAMP)
#define BUILD_TIMESTAMP
#endif

#define DRIVER_VERSION  "2.1.34-035"
#define DRIVER_MAJOR  2
#define DRIVER_MINOR  1
#define DRIVER_RELEASE  34
#define DRIVER_REVISION  35

#define DRIVER_NAME  "Microchip SmartPQI Driver (v" \
    DRIVER_VERSION BUILD_TIMESTAMP ")"
#define DRIVER_NAME_SHORT "smartpqi"

#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))

#define PQI_POST_RESET_DELAY_SECS   5
#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10

#define PQI_NO_COMPLETION ((void *)-1)

MODULE_AUTHOR("Microchip");
MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
 DRIVER_VERSION);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");

struct pqi_cmd_priv {
 int this_residual;
};

static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
{
 return scsi_cmd_priv(cmd);
}

static void pqi_verify_structures(void);
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info);
static void pqi_ctrl_offline_worker(struct work_struct *work);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
 struct pqi_queue_group *queue_group, enum pqi_io_path path,
 struct pqi_io_request *io_request);
static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
 struct pqi_iu_header *request, unsigned int flags,
 struct pqi_raid_error_info *error_info);
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
 unsigned int cdb_length, struct pqi_queue_group *queue_group,
 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
 struct pqi_scsi_dev_raid_map_data *rmd);
static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
 struct pqi_scsi_dev_raid_map_data *rmd);
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
static void pqi_tmf_worker(struct work_struct *work);

/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1

static struct scsi_transport_template *pqi_sas_transport_template;

static atomic_t pqi_controller_count = ATOMIC_INIT(0);

enum pqi_lockup_action {
 NONE,
 REBOOT,
 PANIC
};

static enum pqi_lockup_action pqi_lockup_action = NONE;

static struct {
 enum pqi_lockup_action action;
 char   *name;
} pqi_lockup_actions[] = {
 {
  .action = NONE,
  .name = "none",
 },
 {
  .action = REBOOT,
  .name = "reboot",
 },
 {
  .action = PANIC,
  .name = "panic",
 },
};

static unsigned int pqi_supported_event_types[] = {
 PQI_EVENT_TYPE_HOTPLUG,
 PQI_EVENT_TYPE_HARDWARE,
 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
 PQI_EVENT_TYPE_LOGICAL_DEVICE,
 PQI_EVENT_TYPE_OFA,
 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
};

static int pqi_disable_device_id_wildcards;
module_param_named(disable_device_id_wildcards,
 pqi_disable_device_id_wildcards, int, 0644);
MODULE_PARM_DESC(disable_device_id_wildcards,
 "Disable device ID wildcards.");

static int pqi_disable_heartbeat;
module_param_named(disable_heartbeat,
 pqi_disable_heartbeat, int, 0644);
MODULE_PARM_DESC(disable_heartbeat,
 "Disable heartbeat.");

static int pqi_disable_ctrl_shutdown;
module_param_named(disable_ctrl_shutdown,
 pqi_disable_ctrl_shutdown, int, 0644);
MODULE_PARM_DESC(disable_ctrl_shutdown,
 "Disable controller shutdown when controller locked up.");

static char *pqi_lockup_action_param;
module_param_named(lockup_action,
 pqi_lockup_action_param, charp, 0644);
MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
 "\t\tSupported: none, reboot, panic\n"
 "\t\tDefault: none");

static int pqi_expose_ld_first;
module_param_named(expose_ld_first,
 pqi_expose_ld_first, int, 0644);
MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");

static int pqi_hide_vsep;
module_param_named(hide_vsep,
 pqi_hide_vsep, int, 0644);
MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");

static int pqi_disable_managed_interrupts;
module_param_named(disable_managed_interrupts,
 pqi_disable_managed_interrupts, int, 0644);
MODULE_PARM_DESC(disable_managed_interrupts,
 "Disable the kernel automatically assigning SMP affinity to IRQs.");

static unsigned int pqi_ctrl_ready_timeout_secs;
module_param_named(ctrl_ready_timeout,
 pqi_ctrl_ready_timeout_secs, uint, 0644);
MODULE_PARM_DESC(ctrl_ready_timeout,
 "Timeout in seconds for driver to wait for controller ready.");

static char *raid_levels[] = {
 "RAID-0",
 "RAID-4",
 "RAID-1(1+0)",
 "RAID-5",
 "RAID-5+1",
 "RAID-6",
 "RAID-1(Triple)",
};

static char *pqi_raid_level_to_string(u8 raid_level)
{
 if (raid_level < ARRAY_SIZE(raid_levels))
  return raid_levels[raid_level];

 return "RAID UNKNOWN";
}

#define SA_RAID_0  0
#define SA_RAID_4  1
#define SA_RAID_1  2 /* also used for RAID 10 */
#define SA_RAID_5  3 /* also used for RAID 50 */
#define SA_RAID_51  4
#define SA_RAID_6  5 /* also used for RAID 60 */
#define SA_RAID_TRIPLE  6 /* also used for RAID 1+0 Triple */
#define SA_RAID_MAX  SA_RAID_TRIPLE
#define SA_RAID_UNKNOWN  0xff

static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
{
 pqi_prep_for_scsi_done(scmd);
 scsi_done(scmd);
}

static inline void pqi_disable_write_same(struct scsi_device *sdev)
{
 sdev->no_write_same = 1;
}

static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
{
 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
}

static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
{
 return !device->is_physical_device;
}

static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
{
 return scsi3addr[2] != 0;
}

static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
{
 return !ctrl_info->controller_online;
}

static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
 if (ctrl_info->controller_online)
  if (!sis_is_firmware_running(ctrl_info))
   pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
}

static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
{
 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
}

#define PQI_DRIVER_SCRATCH_PQI_MODE   0x1
#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED  0x2

static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
{
 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
}

static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
 enum pqi_ctrl_mode mode)
{
 u32 driver_scratch;

 driver_scratch = sis_read_driver_scratch(ctrl_info);

 if (mode == PQI_MODE)
  driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
 else
  driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;

 sis_write_driver_scratch(ctrl_info, driver_scratch);
}

static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
{
 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
}

static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
{
 u32 driver_scratch;

 driver_scratch = sis_read_driver_scratch(ctrl_info);

 if (is_supported)
  driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
 else
  driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;

 sis_write_driver_scratch(ctrl_info, driver_scratch);
}

static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
{
 ctrl_info->scan_blocked = true;
 mutex_lock(&ctrl_info->scan_mutex);
}

static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
{
 ctrl_info->scan_blocked = false;
 mutex_unlock(&ctrl_info->scan_mutex);
}

static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
{
 return ctrl_info->scan_blocked;
}

static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
{
 mutex_lock(&ctrl_info->lun_reset_mutex);
}

static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
{
 mutex_unlock(&ctrl_info->lun_reset_mutex);
}

static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
{
 struct Scsi_Host *shost;
 unsigned int num_loops;
 int msecs_sleep;

 shost = ctrl_info->scsi_host;

 scsi_block_requests(shost);

 num_loops = 0;
 msecs_sleep = 20;
 while (scsi_host_busy(shost)) {
  num_loops++;
  if (num_loops == 10)
   msecs_sleep = 500;
  msleep(msecs_sleep);
 }
}

static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
{
 scsi_unblock_requests(ctrl_info->scsi_host);
}

static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
{
 atomic_inc(&ctrl_info->num_busy_threads);
}

static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
{
 atomic_dec(&ctrl_info->num_busy_threads);
}

static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
{
 return ctrl_info->block_requests;
}

static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{
 ctrl_info->block_requests = true;
}

static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
{
 ctrl_info->block_requests = false;
 wake_up_all(&ctrl_info->block_requests_wait);
}

static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
{
 if (!pqi_ctrl_blocked(ctrl_info))
  return;

 atomic_inc(&ctrl_info->num_blocked_threads);
 wait_event(ctrl_info->block_requests_wait,
  !pqi_ctrl_blocked(ctrl_info));
 atomic_dec(&ctrl_info->num_blocked_threads);
}

#define PQI_QUIESCE_WARNING_TIMEOUT_SECS  10

static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
{
 unsigned long start_jiffies;
 unsigned long warning_timeout;
 bool displayed_warning;

 displayed_warning = false;
 start_jiffies = jiffies;
 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;

 while (atomic_read(&ctrl_info->num_busy_threads) >
  atomic_read(&ctrl_info->num_blocked_threads)) {
  if (time_after(jiffies, warning_timeout)) {
   dev_warn(&ctrl_info->pci_dev->dev,
    "waiting %u seconds for driver activity to quiesce\n",
    jiffies_to_msecs(jiffies - start_jiffies) / 1000);
   displayed_warning = true;
   warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
  }
  usleep_range(1000, 2000);
 }

 if (displayed_warning)
  dev_warn(&ctrl_info->pci_dev->dev,
   "driver activity quiesced after waiting for %u seconds\n",
   jiffies_to_msecs(jiffies - start_jiffies) / 1000);
}

static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
{
 return device->device_offline;
}

static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
{
 mutex_lock(&ctrl_info->ofa_mutex);
}

static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
{
 mutex_unlock(&ctrl_info->ofa_mutex);
}

static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
{
 mutex_lock(&ctrl_info->ofa_mutex);
 mutex_unlock(&ctrl_info->ofa_mutex);
}

static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
{
 return mutex_is_locked(&ctrl_info->ofa_mutex);
}

static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
{
 device->in_remove = true;
}

static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
{
 return device->in_remove;
}

static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
{
 device->in_reset[lun] = true;
}

static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
{
 device->in_reset[lun] = false;
}

static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
{
 return device->in_reset[lun];
}

static inline int pqi_event_type_to_event_index(unsigned int event_type)
{
 int index;

 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
  if (event_type == pqi_supported_event_types[index])
   return index;

 return -1;
}

static inline bool pqi_is_supported_event(unsigned int event_type)
{
 return pqi_event_type_to_event_index(event_type) != -1;
}

static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
 unsigned long delay)
{
 if (pqi_ctrl_offline(ctrl_info))
  return;

 schedule_delayed_work(&ctrl_info->rescan_work, delay);
}

static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
{
 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
}

#define PQI_RESCAN_WORK_DELAY (10 * HZ)

static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
{
 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
}

static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
{
 cancel_delayed_work_sync(&ctrl_info->rescan_work);
}

static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{
 if (!ctrl_info->heartbeat_counter)
  return 0;

 return readl(ctrl_info->heartbeat_counter);
}

static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{
 return readb(ctrl_info->soft_reset_status);
}

static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{
 u8 status;

 status = pqi_read_soft_reset_status(ctrl_info);
 status &= ~PQI_SOFT_RESET_ABORT;
 writeb(status, ctrl_info->soft_reset_status);
}

static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
{
 bool io_high_prio;
 int priority_class;

 io_high_prio = false;

 if (device->ncq_prio_enable) {
  priority_class =
   IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
  if (priority_class == IOPRIO_CLASS_RT) {
   /* Set NCQ priority for read/write commands. */
   switch (scmd->cmnd[0]) {
   case WRITE_16:
   case READ_16:
   case WRITE_12:
   case READ_12:
   case WRITE_10:
   case READ_10:
   case WRITE_6:
   case READ_6:
    io_high_prio = true;
    break;
   }
  }
 }

 return io_high_prio;
}

static int pqi_map_single(struct pci_dev *pci_dev,
 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
 size_t buffer_length, enum dma_data_direction data_direction)
{
 dma_addr_t bus_address;

 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
  return 0;

 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
  data_direction);
 if (dma_mapping_error(&pci_dev->dev, bus_address))
  return -ENOMEM;

 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
 put_unaligned_le32(buffer_length, &sg_descriptor->length);
 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);

 return 0;
}

static void pqi_pci_unmap(struct pci_dev *pci_dev,
 struct pqi_sg_descriptor *descriptors, int num_descriptors,
 enum dma_data_direction data_direction)
{
 int i;

 if (data_direction == DMA_NONE)
  return;

 for (i = 0; i < num_descriptors; i++)
  dma_unmap_single(&pci_dev->dev,
   (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
   get_unaligned_le32(&descriptors[i].length),
   data_direction);
}

static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
 struct pqi_raid_path_request *request, u8 cmd,
 u8 *scsi3addr, void *buffer, size_t buffer_length,
 u16 vpd_page, enum dma_data_direction *dir)
{
 u8 *cdb;
 size_t cdb_length = buffer_length;

 memset(request, 0, sizeof(*request));

 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
  sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
  &request->header.iu_length);
 put_unaligned_le32(buffer_length, &request->buffer_length);
 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;

 cdb = request->cdb;

 switch (cmd) {
 case INQUIRY:
  request->data_direction = SOP_READ_FLAG;
  cdb[0] = INQUIRY;
  if (vpd_page & VPD_PAGE) {
   cdb[1] = 0x1;
   cdb[2] = (u8)vpd_page;
  }
  cdb[4] = (u8)cdb_length;
  break;
 case CISS_REPORT_LOG:
 case CISS_REPORT_PHYS:
  request->data_direction = SOP_READ_FLAG;
  cdb[0] = cmd;
  if (cmd == CISS_REPORT_PHYS) {
   if (ctrl_info->rpl_extended_format_4_5_supported)
    cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
   else
    cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
  } else {
   cdb[1] = ctrl_info->ciss_report_log_flags;
  }
  put_unaligned_be32(cdb_length, &cdb[6]);
  break;
 case CISS_GET_RAID_MAP:
  request->data_direction = SOP_READ_FLAG;
  cdb[0] = CISS_READ;
  cdb[1] = CISS_GET_RAID_MAP;
  put_unaligned_be32(cdb_length, &cdb[6]);
  break;
 case SA_FLUSH_CACHE:
  request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
  request->data_direction = SOP_WRITE_FLAG;
  cdb[0] = BMIC_WRITE;
  cdb[6] = BMIC_FLUSH_CACHE;
  put_unaligned_be16(cdb_length, &cdb[7]);
  break;
 case BMIC_SENSE_DIAG_OPTIONS:
  cdb_length = 0;
  fallthrough;
 case BMIC_IDENTIFY_CONTROLLER:
 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
 case BMIC_SENSE_FEATURE:
  request->data_direction = SOP_READ_FLAG;
  cdb[0] = BMIC_READ;
  cdb[6] = cmd;
  put_unaligned_be16(cdb_length, &cdb[7]);
  break;
 case BMIC_SET_DIAG_OPTIONS:
  cdb_length = 0;
  fallthrough;
 case BMIC_WRITE_HOST_WELLNESS:
  request->data_direction = SOP_WRITE_FLAG;
  cdb[0] = BMIC_WRITE;
  cdb[6] = cmd;
  put_unaligned_be16(cdb_length, &cdb[7]);
  break;
 case BMIC_CSMI_PASSTHRU:
  request->data_direction = SOP_BIDIRECTIONAL;
  cdb[0] = BMIC_WRITE;
  cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
  cdb[6] = cmd;
  put_unaligned_be16(cdb_length, &cdb[7]);
  break;
 default:
  dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
  break;
 }

 switch (request->data_direction) {
 case SOP_READ_FLAG:
  *dir = DMA_FROM_DEVICE;
  break;
 case SOP_WRITE_FLAG:
  *dir = DMA_TO_DEVICE;
  break;
 case SOP_NO_DIRECTION_FLAG:
  *dir = DMA_NONE;
  break;
 default:
  *dir = DMA_BIDIRECTIONAL;
  break;
 }

 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
  buffer, buffer_length, *dir);
}

static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
{
 io_request->scmd = NULL;
 io_request->status = 0;
 io_request->error_info = NULL;
 io_request->raid_bypass = false;
}

static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
 struct pqi_io_request *io_request;
 u16 i;

 if (scmd) { /* SML I/O request */
  u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));

  i = blk_mq_unique_tag_to_tag(blk_tag);
  io_request = &ctrl_info->io_request_pool[i];
  if (atomic_inc_return(&io_request->refcount) > 1) {
   atomic_dec(&io_request->refcount);
   return NULL;
  }
 } else { /* IOCTL or driver internal request */
  /*
 * benignly racy - may have to wait for an open slot.
 * command slot range is scsi_ml_can_queue -
 *         [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
 */

  i = 0;
  while (1) {
   io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
   if (atomic_inc_return(&io_request->refcount) == 1)
    break;
   atomic_dec(&io_request->refcount);
   i = (i + 1) % PQI_RESERVED_IO_SLOTS;
  }
 }

 if (io_request)
  pqi_reinit_io_request(io_request);

 return io_request;
}

static void pqi_free_io_request(struct pqi_io_request *io_request)
{
 atomic_dec(&io_request->refcount);
}

static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
 struct pqi_raid_error_info *error_info)
{
 int rc;
 struct pqi_raid_path_request request;
 enum dma_data_direction dir;

 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
  buffer, buffer_length, vpd_page, &dir);
 if (rc)
  return rc;

 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);

 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);

 return rc;
}

/* helper functions for pqi_send_scsi_raid_request */

static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
 u8 cmd, void *buffer, size_t buffer_length)
{
 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
  buffer, buffer_length, 0, NULL);
}

static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
 u8 cmd, void *buffer, size_t buffer_length,
 struct pqi_raid_error_info *error_info)
{
 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
  buffer, buffer_length, 0, error_info);
}

static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
 struct bmic_identify_controller *buffer)
{
 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
  buffer, sizeof(*buffer));
}

static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
 struct bmic_sense_subsystem_info *sense_info)
{
 return pqi_send_ctrl_raid_request(ctrl_info,
  BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
  sizeof(*sense_info));
}

static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
  buffer, buffer_length, vpd_page, NULL);
}

static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device,
 struct bmic_identify_physical_device *buffer, size_t buffer_length)
{
 int rc;
 enum dma_data_direction dir;
 u16 bmic_device_index;
 struct pqi_raid_path_request request;

 rc = pqi_build_raid_path_request(ctrl_info, &request,
  BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
  buffer_length, 0, &dir);
 if (rc)
  return rc;

 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
 request.cdb[2] = (u8)bmic_device_index;
 request.cdb[9] = (u8)(bmic_device_index >> 8);

 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);

 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);

 return rc;
}

static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
{
 u32 bytes;

 bytes = get_unaligned_le16(limit);
 if (bytes == 0)
  bytes = ~0;
 else
  bytes *= 1024;

 return bytes;
}

#pragma pack(1)

struct bmic_sense_feature_buffer {
 struct bmic_sense_feature_buffer_header header;
 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
};

#pragma pack()

#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
 offsetofend(struct bmic_sense_feature_buffer, \
  aio_subpage.max_write_raid_1_10_3drive)

#define MINIMUM_AIO_SUBPAGE_LENGTH \
 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
  max_write_raid_1_10_3drive) - \
  sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))

static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
{
 int rc;
 enum dma_data_direction dir;
 struct pqi_raid_path_request request;
 struct bmic_sense_feature_buffer *buffer;

 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
 if (!buffer)
  return -ENOMEM;

 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
  buffer, sizeof(*buffer), 0, &dir);
 if (rc)
  goto error;

 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;

 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);

 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);

 if (rc)
  goto error;

 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
  buffer->header.subpage_code !=
   BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
  get_unaligned_le16(&buffer->header.buffer_length) <
   MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
  buffer->aio_subpage.header.page_code !=
   BMIC_SENSE_FEATURE_IO_PAGE ||
  buffer->aio_subpage.header.subpage_code !=
   BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
  get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
   MINIMUM_AIO_SUBPAGE_LENGTH) {
  goto error;
 }

 ctrl_info->max_transfer_encrypted_sas_sata =
  pqi_aio_limit_to_bytes(
   &buffer->aio_subpage.max_transfer_encrypted_sas_sata);

 ctrl_info->max_transfer_encrypted_nvme =
  pqi_aio_limit_to_bytes(
   &buffer->aio_subpage.max_transfer_encrypted_nvme);

 ctrl_info->max_write_raid_5_6 =
  pqi_aio_limit_to_bytes(
   &buffer->aio_subpage.max_write_raid_5_6);

 ctrl_info->max_write_raid_1_10_2drive =
  pqi_aio_limit_to_bytes(
   &buffer->aio_subpage.max_write_raid_1_10_2drive);

 ctrl_info->max_write_raid_1_10_3drive =
  pqi_aio_limit_to_bytes(
   &buffer->aio_subpage.max_write_raid_1_10_3drive);

error:
 kfree(buffer);

 return rc;
}

static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
 enum bmic_flush_cache_shutdown_event shutdown_event)
{
 int rc;
 struct bmic_flush_cache *flush_cache;

 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
 if (!flush_cache)
  return -ENOMEM;

 flush_cache->shutdown_event = shutdown_event;

 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
  sizeof(*flush_cache));

 kfree(flush_cache);

 return rc;
}

int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
 struct pqi_raid_error_info *error_info)
{
 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
  buffer, buffer_length, error_info);
}

#define PQI_FETCH_PTRAID_DATA  (1 << 31)

static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
{
 int rc;
 struct bmic_diag_options *diag;

 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
 if (!diag)
  return -ENOMEM;

 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
  diag, sizeof(*diag));
 if (rc)
  goto out;

 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);

 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
  sizeof(*diag));

out:
 kfree(diag);

 return rc;
}

static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
 void *buffer, size_t buffer_length)
{
 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
  buffer, buffer_length);
}

#pragma pack(1)

struct bmic_host_wellness_driver_version {
 u8 start_tag[4];
 u8 driver_version_tag[2];
 __le16 driver_version_length;
 char driver_version[32];
 u8 dont_write_tag[2];
 u8 end_tag[2];
};

#pragma pack()

static int pqi_write_driver_version_to_host_wellness(
 struct pqi_ctrl_info *ctrl_info)
{
 int rc;
 struct bmic_host_wellness_driver_version *buffer;
 size_t buffer_length;

 buffer_length = sizeof(*buffer);

 buffer = kmalloc(buffer_length, GFP_KERNEL);
 if (!buffer)
  return -ENOMEM;

 buffer->start_tag[0] = '<';
 buffer->start_tag[1] = 'H';
 buffer->start_tag[2] = 'W';
 buffer->start_tag[3] = '>';
 buffer->driver_version_tag[0] = 'D';
 buffer->driver_version_tag[1] = 'V';
 put_unaligned_le16(sizeof(buffer->driver_version),
  &buffer->driver_version_length);
 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION,
  sizeof(buffer->driver_version));
 buffer->dont_write_tag[0] = 'D';
 buffer->dont_write_tag[1] = 'W';
 buffer->end_tag[0] = 'Z';
 buffer->end_tag[1] = 'Z';

 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);

 kfree(buffer);

 return rc;
}

#pragma pack(1)

struct bmic_host_wellness_time {
 u8 start_tag[4];
 u8 time_tag[2];
 __le16 time_length;
 u8 time[8];
 u8 dont_write_tag[2];
 u8 end_tag[2];
};

#pragma pack()

static int pqi_write_current_time_to_host_wellness(
 struct pqi_ctrl_info *ctrl_info)
{
 int rc;
 struct bmic_host_wellness_time *buffer;
 size_t buffer_length;
 time64_t local_time;
 unsigned int year;
 struct tm tm;

 buffer_length = sizeof(*buffer);

 buffer = kmalloc(buffer_length, GFP_KERNEL);
 if (!buffer)
  return -ENOMEM;

 buffer->start_tag[0] = '<';
 buffer->start_tag[1] = 'H';
 buffer->start_tag[2] = 'W';
 buffer->start_tag[3] = '>';
 buffer->time_tag[0] = 'T';
 buffer->time_tag[1] = 'D';
 put_unaligned_le16(sizeof(buffer->time),
  &buffer->time_length);

 local_time = ktime_get_real_seconds();
 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
 year = tm.tm_year + 1900;

 buffer->time[0] = bin2bcd(tm.tm_hour);
 buffer->time[1] = bin2bcd(tm.tm_min);
 buffer->time[2] = bin2bcd(tm.tm_sec);
 buffer->time[3] = 0;
 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
 buffer->time[5] = bin2bcd(tm.tm_mday);
 buffer->time[6] = bin2bcd(year / 100);
 buffer->time[7] = bin2bcd(year % 100);

 buffer->dont_write_tag[0] = 'D';
 buffer->dont_write_tag[1] = 'W';
 buffer->end_tag[0] = 'Z';
 buffer->end_tag[1] = 'Z';

 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);

 kfree(buffer);

 return rc;
}

#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)

static void pqi_update_time_worker(struct work_struct *work)
{
 int rc;
 struct pqi_ctrl_info *ctrl_info;

 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
  update_time_work);

 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
 if (rc)
  dev_warn(&ctrl_info->pci_dev->dev,
   "error updating time on controller\n");

 schedule_delayed_work(&ctrl_info->update_time_work,
  PQI_UPDATE_TIME_WORK_INTERVAL);
}

static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{
 schedule_delayed_work(&ctrl_info->update_time_work, 0);
}

static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{
 cancel_delayed_work_sync(&ctrl_info->update_time_work);
}

static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
 size_t buffer_length)
{
 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
}

static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
{
 int rc;
 size_t lun_list_length;
 size_t lun_data_length;
 size_t new_lun_list_length;
 void *lun_data = NULL;
 struct report_lun_header *report_lun_header;

 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
 if (!report_lun_header) {
  rc = -ENOMEM;
  goto out;
 }

 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
 if (rc)
  goto out;

 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);

again:
 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;

 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
 if (!lun_data) {
  rc = -ENOMEM;
  goto out;
 }

 if (lun_list_length == 0) {
  memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
  goto out;
 }

 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
 if (rc)
  goto out;

 new_lun_list_length =
  get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);

 if (new_lun_list_length > lun_list_length) {
  lun_list_length = new_lun_list_length;
  kfree(lun_data);
  goto again;
 }

out:
 kfree(report_lun_header);

 if (rc) {
  kfree(lun_data);
  lun_data = NULL;
 }

 *buffer = lun_data;

 return rc;
}

static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
 int rc;
 unsigned int i;
 u8 rpl_response_format;
 u32 num_physicals;
 void *rpl_list;
 struct report_lun_header *rpl_header;
 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;

 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
 if (rc)
  return rc;

 if (ctrl_info->rpl_extended_format_4_5_supported) {
  rpl_header = rpl_list;
  rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
  if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
   *buffer = rpl_list;
   return 0;
  } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
   dev_err(&ctrl_info->pci_dev->dev,
    "RPL returned unsupported data format %u\n",
    rpl_response_format);
   return -EINVAL;
  } else {
   dev_warn(&ctrl_info->pci_dev->dev,
    "RPL returned extended format 2 instead of 4\n");
  }
 }

 rpl_8byte_wwid_list = rpl_list;
 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);

 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
         num_physicals), GFP_KERNEL);
 if (!rpl_16byte_wwid_list)
  return -ENOMEM;

 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
  &rpl_16byte_wwid_list->header.list_length);
 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;

 for (i = 0; i < num_physicals; i++) {
  memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
  memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
  memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
  rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
  rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
  rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
  rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
  rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
 }

 kfree(rpl_8byte_wwid_list);
 *buffer = rpl_16byte_wwid_list;

 return 0;
}

static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
}

static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
 struct report_phys_lun_16byte_wwid_list **physdev_list,
 struct report_log_lun_list **logdev_list)
{
 int rc;
 size_t logdev_list_length;
 size_t logdev_data_length;
 struct report_log_lun_list *internal_logdev_list;
 struct report_log_lun_list *logdev_data;
 struct report_lun_header report_lun_header;

 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
 if (rc)
  dev_err(&ctrl_info->pci_dev->dev,
   "report physical LUNs failed\n");

 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
 if (rc)
  dev_err(&ctrl_info->pci_dev->dev,
   "report logical LUNs failed\n");

 /*
 * Tack the controller itself onto the end of the logical device list
 * by adding a list entry that is all zeros.
 */


 logdev_data = *logdev_list;

 if (logdev_data) {
  logdev_list_length =
   get_unaligned_be32(&logdev_data->header.list_length);
 } else {
  memset(&report_lun_header, 0, sizeof(report_lun_header));
  logdev_data =
   (struct report_log_lun_list *)&report_lun_header;
  logdev_list_length = 0;
 }

 logdev_data_length = sizeof(struct report_lun_header) +
  logdev_list_length;

 internal_logdev_list = kmalloc(logdev_data_length +
  sizeof(struct report_log_lun), GFP_KERNEL);
 if (!internal_logdev_list) {
  kfree(*logdev_list);
  *logdev_list = NULL;
  return -ENOMEM;
 }

 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
  sizeof(struct report_log_lun));
 put_unaligned_be32(logdev_list_length +
  sizeof(struct report_log_lun),
  &internal_logdev_list->header.list_length);

 kfree(*logdev_list);
 *logdev_list = internal_logdev_list;

 return 0;
}

static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
 int bus, int target, int lun)
{
 device->bus = bus;
 device->target = target;
 device->lun = lun;
}

static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
{
 u8 *scsi3addr;
 u32 lunid;
 int bus;
 int target;
 int lun;

 scsi3addr = device->scsi3addr;
 lunid = get_unaligned_le32(scsi3addr);

 if (pqi_is_hba_lunid(scsi3addr)) {
  /* The specified device is the controller. */
  pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
  device->target_lun_valid = true;
  return;
 }

 if (pqi_is_logical_device(device)) {
  if (device->is_external_raid_device) {
   bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
   target = (lunid >> 16) & 0x3fff;
   lun = lunid & 0xff;
  } else {
   bus = PQI_RAID_VOLUME_BUS;
   target = 0;
   lun = lunid & 0x3fff;
  }
  pqi_set_bus_target_lun(device, bus, target, lun);
  device->target_lun_valid = true;
  return;
 }

 /*
 * Defer target and LUN assignment for non-controller physical devices
 * because the SAS transport layer will make these assignments later.
 */

 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
}

static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 int rc;
 u8 raid_level;
 u8 *buffer;

 raid_level = SA_RAID_UNKNOWN;

 buffer = kmalloc(64, GFP_KERNEL);
 if (buffer) {
  rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
   VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
  if (rc == 0) {
   raid_level = buffer[8];
   if (raid_level > SA_RAID_MAX)
    raid_level = SA_RAID_UNKNOWN;
  }
  kfree(buffer);
 }

 device->raid_level = raid_level;
}

static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device, struct raid_map *raid_map)
{
 char *err_msg;
 u32 raid_map_size;
 u32 r5or6_blocks_per_row;

 raid_map_size = get_unaligned_le32(&raid_map->structure_size);

 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
  err_msg = "RAID map too small";
  goto bad_raid_map;
 }

 if (device->raid_level == SA_RAID_1) {
  if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
   err_msg = "invalid RAID-1 map";
   goto bad_raid_map;
  }
 } else if (device->raid_level == SA_RAID_TRIPLE) {
  if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
   err_msg = "invalid RAID-1(Triple) map";
   goto bad_raid_map;
  }
 } else if ((device->raid_level == SA_RAID_5 ||
  device->raid_level == SA_RAID_6) &&
  get_unaligned_le16(&raid_map->layout_map_count) > 1) {
  /* RAID 50/60 */
  r5or6_blocks_per_row =
   get_unaligned_le16(&raid_map->strip_size) *
   get_unaligned_le16(&raid_map->data_disks_per_row);
  if (r5or6_blocks_per_row == 0) {
   err_msg = "invalid RAID-5 or RAID-6 map";
   goto bad_raid_map;
  }
 }

 return 0;

bad_raid_map:
 dev_warn(&ctrl_info->pci_dev->dev,
  "logical device %08x%08x %s\n",
  *((u32 *)&device->scsi3addr),
  *((u32 *)&device->scsi3addr[4]), err_msg);

 return -EINVAL;
}

static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 int rc;
 u32 raid_map_size;
 struct raid_map *raid_map;

 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
 if (!raid_map)
  return -ENOMEM;

 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
  device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
 if (rc)
  goto error;

 raid_map_size = get_unaligned_le32(&raid_map->structure_size);

 if (raid_map_size > sizeof(*raid_map)) {

  kfree(raid_map);

  raid_map = kmalloc(raid_map_size, GFP_KERNEL);
  if (!raid_map)
   return -ENOMEM;

  rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
   device->scsi3addr, raid_map, raid_map_size, 0, NULL);
  if (rc)
   goto error;

  if (get_unaligned_le32(&raid_map->structure_size)
   != raid_map_size) {
   dev_warn(&ctrl_info->pci_dev->dev,
    "requested %u bytes, received %u bytes\n",
    raid_map_size,
    get_unaligned_le32(&raid_map->structure_size));
   rc = -EINVAL;
   goto error;
  }
 }

 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
 if (rc)
  goto error;

 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
 if (!device->raid_io_stats) {
  rc = -ENOMEM;
  goto error;
 }

 device->raid_map = raid_map;

 return 0;

error:
 kfree(raid_map);

 return rc;
}

static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 if (!ctrl_info->lv_drive_type_mix_valid) {
  device->max_transfer_encrypted = ~0;
  return;
 }

 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
  device->max_transfer_encrypted =
   ctrl_info->max_transfer_encrypted_sas_sata;
  break;
 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
  device->max_transfer_encrypted =
   ctrl_info->max_transfer_encrypted_nvme;
  break;
 case LV_DRIVE_TYPE_MIX_UNKNOWN:
 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
 default:
  device->max_transfer_encrypted =
   min(ctrl_info->max_transfer_encrypted_sas_sata,
    ctrl_info->max_transfer_encrypted_nvme);
  break;
 }
}

static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 int rc;
 u8 *buffer;
 u8 bypass_status;

 buffer = kmalloc(64, GFP_KERNEL);
 if (!buffer)
  return;

 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
  VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
 if (rc)
  goto out;

#define RAID_BYPASS_STATUS  4
#define RAID_BYPASS_CONFIGURED  0x1
#define RAID_BYPASS_ENABLED  0x2

 bypass_status = buffer[RAID_BYPASS_STATUS];
 device->raid_bypass_configured =
  (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
 if (device->raid_bypass_configured &&
  (bypass_status & RAID_BYPASS_ENABLED) &&
  pqi_get_raid_map(ctrl_info, device) == 0) {
  device->raid_bypass_enabled = true;
  if (get_unaligned_le16(&device->raid_map->flags) &
   RAID_MAP_ENCRYPTION_ENABLED)
   pqi_set_max_transfer_encrypted(ctrl_info, device);
 }

out:
 kfree(buffer);
}

/*
 * Use vendor-specific VPD to determine online/offline status of a volume.
 */


static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 int rc;
 size_t page_length;
 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
 bool volume_offline = true;
 u32 volume_flags;
 struct ciss_vpd_logical_volume_status *vpd;

 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
 if (!vpd)
  goto no_buffer;

 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
  VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
 if (rc)
  goto out;

 if (vpd->page_code != CISS_VPD_LV_STATUS)
  goto out;

 page_length = offsetof(struct ciss_vpd_logical_volume_status,
  volume_status) + vpd->page_length;
 if (page_length < sizeof(*vpd))
  goto out;

 volume_status = vpd->volume_status;
 volume_flags = get_unaligned_be32(&vpd->flags);
 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;

out:
 kfree(vpd);
no_buffer:
 device->volume_status = volume_status;
 device->volume_offline = volume_offline;
}

#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10

static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device,
 struct bmic_identify_physical_device *id_phys)
{
 int rc;

 memset(id_phys, 0, sizeof(*id_phys));

 rc = pqi_identify_physical_device(ctrl_info, device,
  id_phys, sizeof(*id_phys));
 if (rc) {
  device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
  return rc;
 }

 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);

 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
 memcpy(device->model, &id_phys->model[8], sizeof(device->model));

 device->box_index = id_phys->box_index;
 device->phys_box_on_bus = id_phys->phys_box_on_bus;
 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
 device->queue_depth =
  get_unaligned_le16(&id_phys->current_queue_depth_limit);
 device->active_path_index = id_phys->active_path_number;
 device->path_map = id_phys->redundant_path_present_map;
 memcpy(&device->box,
  &id_phys->alternate_paths_phys_box_on_port,
  sizeof(device->box));
 memcpy(&device->phys_connector,
  &id_phys->alternate_paths_phys_connector,
  sizeof(device->phys_connector));
 device->bay = id_phys->phys_bay_in_box;
 device->lun_count = id_phys->multi_lun_device_lun_count;
 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
  id_phys->phy_count)
  device->phy_id =
   id_phys->phy_to_phy_map[device->active_path_index];
 else
  device->phy_id = 0xFF;

 device->ncq_prio_support =
  ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
  PQI_DEVICE_NCQ_PRIO_SUPPORTED);

 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) &&nbsp;PQI_DEVICE_ERASE_IN_PROGRESS);

 return 0;
}

static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 int rc;
 u8 *buffer;

 buffer = kmalloc(64, GFP_KERNEL);
 if (!buffer)
  return -ENOMEM;

 /* Send an inquiry to the device to see what it is. */
 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
 if (rc)
  goto out;

 scsi_sanitize_inquiry_string(&buffer[8], 8);
 scsi_sanitize_inquiry_string(&buffer[16], 16);

 device->devtype = buffer[0] & 0x1f;
 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
 memcpy(device->model, &buffer[16], sizeof(device->model));

 if (device->devtype == TYPE_DISK) {
  if (device->is_external_raid_device) {
   device->raid_level = SA_RAID_UNKNOWN;
   device->volume_status = CISS_LV_OK;
   device->volume_offline = false;
  } else {
   pqi_get_raid_level(ctrl_info, device);
   pqi_get_raid_bypass_status(ctrl_info, device);
   pqi_get_volume_status(ctrl_info, device);
  }
 }

out:
 kfree(buffer);

 return rc;
}

/*
 * Prevent adding drive to OS for some corner cases such as a drive
 * undergoing a sanitize (erase) operation. Some OSes will continue to poll
 * the drive until the sanitize completes, which can take hours,
 * resulting in long bootup delays. Commands such as TUR, READ_CAP
 * are allowed, but READ/WRITE cause check condition. So the OS
 * cannot check/read the partition table.
 * Note: devices that have completed sanitize must be re-enabled
 *       using the management utility.
 */

static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
{
 return device->erase_in_progress;
}

static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device,
 struct bmic_identify_physical_device *id_phys)
{
 int rc;

 if (device->is_expander_smp_device)
  return 0;

 if (pqi_is_logical_device(device))
  rc = pqi_get_logical_device_info(ctrl_info, device);
 else
  rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);

 return rc;
}

static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device,
 struct bmic_identify_physical_device *id_phys)
{
 int rc;

 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);

 if (rc == 0 && device->lun_count == 0)
  device->lun_count = 1;

 return rc;
}

static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 char *status;
 static const char unknown_state_str[] =
  "Volume is in an unknown state (%u)";
 char unknown_state_buffer[sizeof(unknown_state_str) + 10];

 switch (device->volume_status) {
 case CISS_LV_OK:
  status = "Volume online";
  break;
 case CISS_LV_FAILED:
  status = "Volume failed";
  break;
 case CISS_LV_NOT_CONFIGURED:
  status = "Volume not configured";
  break;
 case CISS_LV_DEGRADED:
  status = "Volume degraded";
  break;
 case CISS_LV_READY_FOR_RECOVERY:
  status = "Volume ready for recovery operation";
  break;
 case CISS_LV_UNDERGOING_RECOVERY:
  status = "Volume undergoing recovery";
  break;
 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
  status = "Wrong physical drive was replaced";
  break;
 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
  status = "A physical drive not properly connected";
  break;
 case CISS_LV_HARDWARE_OVERHEATING:
  status = "Hardware is overheating";
  break;
 case CISS_LV_HARDWARE_HAS_OVERHEATED:
  status = "Hardware has overheated";
  break;
 case CISS_LV_UNDERGOING_EXPANSION:
  status = "Volume undergoing expansion";
  break;
 case CISS_LV_NOT_AVAILABLE:
  status = "Volume waiting for transforming volume";
  break;
 case CISS_LV_QUEUED_FOR_EXPANSION:
  status = "Volume queued for expansion";
  break;
 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
  status = "Volume disabled due to SCSI ID conflict";
  break;
 case CISS_LV_EJECTED:
  status = "Volume has been ejected";
  break;
 case CISS_LV_UNDERGOING_ERASE:
  status = "Volume undergoing background erase";
  break;
 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
  status = "Volume ready for predictive spare rebuild";
  break;
 case CISS_LV_UNDERGOING_RPI:
  status = "Volume undergoing rapid parity initialization";
  break;
 case CISS_LV_PENDING_RPI:
  status = "Volume queued for rapid parity initialization";
  break;
 case CISS_LV_ENCRYPTED_NO_KEY:
  status = "Encrypted volume inaccessible - key not present";
  break;
 case CISS_LV_UNDERGOING_ENCRYPTION:
  status = "Volume undergoing encryption process";
  break;
 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
  status = "Volume undergoing encryption re-keying process";
  break;
 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
  status = "Volume encrypted but encryption is disabled";
  break;
 case CISS_LV_PENDING_ENCRYPTION:
  status = "Volume pending migration to encrypted state";
  break;
 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
  status = "Volume pending encryption rekeying";
  break;
 case CISS_LV_NOT_SUPPORTED:
  status = "Volume not supported on this controller";
  break;
 case CISS_LV_STATUS_UNAVAILABLE:
  status = "Volume status not available";
  break;
 default:
  snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
   unknown_state_str, device->volume_status);
  status = unknown_state_buffer;
  break;
 }

 dev_info(&ctrl_info->pci_dev->dev,
  "scsi %d:%d:%d:%d %s\n",
  ctrl_info->scsi_host->host_no,
  device->bus, device->target, device->lun, status);
}

static void pqi_rescan_worker(struct work_struct *work)
{
 struct pqi_ctrl_info *ctrl_info;

 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
  rescan_work);

 pqi_scan_scsi_devices(ctrl_info);
}

static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 int rc;

 if (pqi_is_logical_device(device))
  rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
   device->target, device->lun);
 else
  rc = pqi_add_sas_device(ctrl_info->sas_host, device);

 return rc;
}

#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)

static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
 int rc;
 int lun;

 for (lun = 0; lun < device->lun_count; lun++) {
  rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
   PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
  if (rc)
   dev_err(&ctrl_info->pci_dev->dev,
    "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
    ctrl_info->scsi_host->host_no, device->bus,
    device->target, lun,
    atomic_read(&device->scsi_cmds_outstanding[lun]));
 }

 if (pqi_is_logical_device(device))
  scsi_remove_device(device->sdev);
 else
  pqi_remove_sas_device(device);

 pqi_device_remove_start(device);
}

/* Assumes the SCSI device list lock is held. */

static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
 int bus, int target, int lun)
{
 struct pqi_scsi_dev *device;

 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
  if (device->bus == bus && device->target == target && device->lun == lun)
   return device;

 return NULL;
}

static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
{
 if (dev1->is_physical_device != dev2->is_physical_device)
  return false;

 if (dev1->is_physical_device)
  return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;

 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
}

enum pqi_find_result {
 DEVICE_NOT_FOUND,
 DEVICE_CHANGED,
 DEVICE_SAME,
};

static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
{
 struct pqi_scsi_dev *device;

 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
  if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
   *matching_device = device;
   if (pqi_device_equal(device_to_find, device)) {
    if (device_to_find->volume_offline)
     return DEVICE_CHANGED;
    return DEVICE_SAME;
   }
   return DEVICE_CHANGED;
  }
 }

 return DEVICE_NOT_FOUND;
}

static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
{
 if (device->is_expander_smp_device)
  return "Enclosure SMP ";

 return scsi_device_type(device->devtype);
}

#define PQI_DEV_INFO_BUFFER_LENGTH 128

static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
 char *action, struct pqi_scsi_dev *device)
{
 ssize_t count;
 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];

 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
  "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);

 if (device->target_lun_valid)
  count += scnprintf(buffer + count,
   PQI_DEV_INFO_BUFFER_LENGTH - count,
   "%d:%d",
   device->target,
   device->lun);
 else
  count += scnprintf(buffer + count,
   PQI_DEV_INFO_BUFFER_LENGTH - count,
   "-:-");

 if (pqi_is_logical_device(device)) {
  count += scnprintf(buffer + count,
   PQI_DEV_INFO_BUFFER_LENGTH - count,
   " %08x%08x",
   *((u32 *)&device->scsi3addr),
   *((u32 *)&device->scsi3addr[4]));
 } else if (ctrl_info->rpl_extended_format_4_5_supported) {
  if (device->device_type == SA_DEVICE_TYPE_NVME)
   count += scnprintf(buffer + count,
     PQI_DEV_INFO_BUFFER_LENGTH - count,
     " %016llx%016llx",
     get_unaligned_be64(&device->wwid[0]),
     get_unaligned_be64(&device->wwid[8]));
  else
   count += scnprintf(buffer + count,
     PQI_DEV_INFO_BUFFER_LENGTH - count,
     " %016llx",
     get_unaligned_be64(&device->wwid[0]));
 } else {
  count += scnprintf(buffer + count,
   PQI_DEV_INFO_BUFFER_LENGTH - count,
   " %016llx",
   get_unaligned_be64(&device->wwid[0]));
 }


 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
  " %s %.8s %.16s ",
  pqi_device_type(device),
  device->vendor,
  device->model);

 if (pqi_is_logical_device(device)) {
  if (device->devtype == TYPE_DISK)
   count += scnprintf(buffer + count,
    PQI_DEV_INFO_BUFFER_LENGTH - count,
    "SSDSmartPathCap%c En%c %-12s",
    device->raid_bypass_configured ? '+' : '-',
    device->raid_bypass_enabled ? '+' : '-',
    pqi_raid_level_to_string(device->raid_level));
 } else {
  count += scnprintf(buffer + count,
   PQI_DEV_INFO_BUFFER_LENGTH - count,
   "AIO%c", device->aio_enabled ? '+' : '-');
  if (device->devtype == TYPE_DISK ||
   device->devtype == TYPE_ZBC)
   count += scnprintf(buffer + count,
    PQI_DEV_INFO_BUFFER_LENGTH - count,
    " qd=%-6d", device->queue_depth);
 }

 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}

static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
{
 u32 raid_map1_size;
 u32 raid_map2_size;

 if (raid_map1 == NULL || raid_map2 == NULL)
  return raid_map1 == raid_map2;

 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);

 if (raid_map1_size != raid_map2_size)
  return false;

 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
}

/* Assumes the SCSI device list lock is held. */

static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
{
 existing_device->device_type = new_device->device_type;
 existing_device->bus = new_device->bus;
 if (new_device->target_lun_valid) {
  existing_device->target = new_device->target;
  existing_device->lun = new_device->lun;
  existing_device->target_lun_valid = true;
 }

 /* By definition, the scsi3addr and wwid fields are already the same. */

 existing_device->is_physical_device = new_device->is_physical_device;
 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
 existing_device->sas_address = new_device->sas_address;
 existing_device->queue_depth = new_device->queue_depth;
 existing_device->device_offline = false;
 existing_device->lun_count = new_device->lun_count;

 if (pqi_is_logical_device(existing_device)) {
  existing_device->is_external_raid_device = new_device->is_external_raid_device;

  if (existing_device->devtype == TYPE_DISK) {
   existing_device->raid_level = new_device->raid_level;
   existing_device->volume_status = new_device->volume_status;
   memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
   if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
    kfree(existing_device->raid_map);
    existing_device->raid_map = new_device->raid_map;
    /* To prevent this from being freed later. */
    new_device->raid_map = NULL;
   }
   if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
    existing_device->raid_io_stats = new_device->raid_io_stats;
    new_device->raid_io_stats = NULL;
   }
   existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
   existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
  }
 } else {
  existing_device->aio_enabled = new_device->aio_enabled;
  existing_device->aio_handle = new_device->aio_handle;
  existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
  existing_device->active_path_index = new_device->active_path_index;
  existing_device->phy_id = new_device->phy_id;
  existing_device->path_map = new_device->path_map;
  existing_device->bay = new_device->bay;
  existing_device->box_index = new_device->box_index;
  existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
  existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
  memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
  memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
 }
}

static inline void pqi_free_device(struct pqi_scsi_dev *device)
{
 if (device) {
  free_percpu(device->raid_io_stats);
  kfree(device->raid_map);
  kfree(device);
 }
}

/*
 * Called when exposing a new device to the OS fails in order to re-adjust
 * our internal SCSI device list to match the SCSI ML's view.
 */


static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *device)
{
 unsigned long flags;

 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 list_del(&device->scsi_device_list_entry);
 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);

 /* Allow the device structure to be freed later. */
 device->keep_device = false;
}

static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
{
 if (device->is_expander_smp_device)
  return device->sas_port != NULL;

 return device->sdev != NULL;
}

static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
{
 unsigned int lun;
 struct pqi_tmf_work *tmf_work;

 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
  INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
}

static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
{
 if (pqi_device_in_remove(device))
  return false;

 if (device->sdev == NULL)
  return false;

 if (!scsi_device_online(device->sdev))
  return false;

 return device->rescan;
}

static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
 int rc;
 unsigned int i;
 unsigned long flags;
 enum pqi_find_result find_result;
 struct pqi_scsi_dev *device;
 struct pqi_scsi_dev *next;
 struct pqi_scsi_dev *matching_device;
 LIST_HEAD(add_list);
 LIST_HEAD(delete_list);

 /*
 * The idea here is to do as little work as possible while holding the
 * spinlock.  That's why we go to great pains to defer anything other
 * than updating the internal device list until after we release the
 * spinlock.
 */


 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);

 /* Assume that all devices in the existing list have gone away. */
 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
  device->device_gone = true;

 for (i = 0; i < num_new_devices; i++) {
  device = new_device_list[i];

  find_result = pqi_scsi_find_entry(ctrl_info, device,
   &matching_device);

  switch (find_result) {
  case DEVICE_SAME:
   /*
 * The newly found device is already in the existing
 * device list.
 */

   device->new_device = false;
   matching_device->device_gone = false;
   pqi_scsi_update_device(ctrl_info, matching_device, device);
   break;
  case DEVICE_NOT_FOUND:
   /*
 * The newly found device is NOT in the existing device
 * list.
 */

   device->new_device = true;
   break;
  case DEVICE_CHANGED:
   /*
 * The original device has gone away and we need to add
 * the new device.
 */

   device->new_device = true;
   break;
  }
 }

 /* Process all devices that have gone away. */
 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
  scsi_device_list_entry) {
  if (device->device_gone) {
   list_del(&device->scsi_device_list_entry);
   list_add_tail(&device->delete_list_entry, &delete_list);
  }
 }

 /* Process all new devices. */
 for (i = 0; i < num_new_devices; i++) {
  device = new_device_list[i];
  if (!device->new_device)
   continue;
  if (device->volume_offline)
   continue;
  list_add_tail(&device->scsi_device_list_entry,
   &ctrl_info->scsi_device_list);
  list_add_tail(&device->add_list_entry, &add_list);
  /* To prevent this device structure from being freed later. */
  device->keep_device = true;
  pqi_init_device_tmf_work(device);
 }

 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);

 /*
 * If OFA is in progress and there are devices that need to be deleted,
 * allow any pending reset operations to continue and unblock any SCSI
 * requests before removal.
 */

 if (pqi_ofa_in_progress(ctrl_info)) {
  list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
   if (pqi_is_device_added(device))
    pqi_device_remove_start(device);
  pqi_ctrl_unblock_device_reset(ctrl_info);
  pqi_scsi_unblock_requests(ctrl_info);
 }

 /* Remove all devices that have gone away. */
 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
  if (device->volume_offline) {
   pqi_dev_info(ctrl_info, "offline", device);
   pqi_show_volume_status(ctrl_info, device);
  } else {
   pqi_dev_info(ctrl_info, "removed", device);
  }
  if (pqi_is_device_added(device))
   pqi_remove_device(ctrl_info, device);
  list_del(&device->delete_list_entry);
  pqi_free_device(device);
 }

 /*
 * Notify the SML of any existing device changes such as;
 * queue depth, device size.
 */

 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
  /*
 * Check for queue depth change.
 */

  if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
   device->advertised_queue_depth = device->queue_depth;
   scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
  }
  spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  /*
 * Check for changes in the device, such as size.
 */

  if (pqi_volume_rescan_needed(device)) {
   device->rescan = false;
   spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
   scsi_rescan_device(device->sdev);
  } else {
   spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  }
 }

--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=99 H=95 G=96

¤ Dauer der Verarbeitung: 0.28 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.