Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/scsi/arcmsr/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 148 kB image not shown  

Quelle  arcmsr_hba.c   Sprache: C

 
/*
*******************************************************************************
**        O.S   : Linux
**   FILE NAME  : arcmsr_hba.c
**        BY    : Nick Cheng, C.L. Huang
**   Description: SCSI RAID Device Driver for Areca RAID Controller
*******************************************************************************
** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
**
**     Web site: www.areca.com.tw
**       E-mail: support@areca.com.tw
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License version 2 as
** published by the Free Software Foundation.
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
** GNU General Public License for more details.
*******************************************************************************
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
** 1. Redistributions of source code must retain the above copyright
**    notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
**    notice, this list of conditions and the following disclaimer in the
**    documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
**    derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
**     Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
*******************************************************************************
*/

#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/spinlock.h>
#include <linux/pci_ids.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/circ_buf.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsicam.h>
#include "arcmsr.h"
MODULE_AUTHOR("Nick Cheng, C.L. Huang ");
MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);

static int msix_enable = 1;
module_param(msix_enable, int, S_IRUGO);
MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");

static int msi_enable = 1;
module_param(msi_enable, int, S_IRUGO);
MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");

static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
module_param(host_can_queue, int, S_IRUGO);
MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");

static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
module_param(cmd_per_lun, int, S_IRUGO);
MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");

static int dma_mask_64 = 0;
module_param(dma_mask_64, int, S_IRUGO);
MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");

static int set_date_time = 0;
module_param(set_date_time, int, S_IRUGO);
MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");

static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT;
module_param(cmd_timeout, int, S_IRUGO);
MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90");

#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12

static wait_queue_head_t wait_q;
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
     struct scsi_cmnd *cmd);
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
static int arcmsr_abort(struct scsi_cmnd *);
static int arcmsr_bus_reset(struct scsi_cmnd *);
static int arcmsr_bios_param(struct scsi_device *sdev,
  struct block_device *bdev, sector_t capacity, int *info);
static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int arcmsr_probe(struct pci_dev *pdev,
    const struct pci_device_id *id);
static int __maybe_unused arcmsr_suspend(struct device *dev);
static int __maybe_unused arcmsr_resume(struct device *dev);
static void arcmsr_remove(struct pci_dev *pdev);
static void arcmsr_shutdown(struct pci_dev *pdev);
static void arcmsr_iop_init(struct AdapterControlBlock *acb);
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
 u32 intmask_org);
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_request_device_map(struct timer_list *t);
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
static void arcmsr_set_iop_datetime(struct timer_list *);
static int arcmsr_sdev_configure(struct scsi_device *sdev,
     struct queue_limits *lim);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
  queue_depth = ARCMSR_MAX_CMD_PERLUN;
 return scsi_change_queue_depth(sdev, queue_depth);
}

static const struct scsi_host_template arcmsr_scsi_host_template = {
 .module   = THIS_MODULE,
 .proc_name  = ARCMSR_NAME,
 .name   = "Areca SAS/SATA RAID driver",
 .info   = arcmsr_info,
 .queuecommand  = arcmsr_queue_command,
 .eh_abort_handler = arcmsr_abort,
 .eh_bus_reset_handler = arcmsr_bus_reset,
 .bios_param  = arcmsr_bios_param,
 .sdev_configure  = arcmsr_sdev_configure,
 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
 .can_queue  = ARCMSR_DEFAULT_OUTSTANDING_CMD,
 .this_id  = ARCMSR_SCSI_INITIATOR_ID,
 .sg_tablesize         = ARCMSR_DEFAULT_SG_ENTRIES,
 .max_sectors  = ARCMSR_MAX_XFER_SECTORS_C,
 .cmd_per_lun  = ARCMSR_DEFAULT_CMD_PERLUN,
 .shost_groups  = arcmsr_host_groups,
 .no_write_same  = 1,
};

static const struct pci_device_id arcmsr_device_id_table[] = {
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
  .driver_data = ACB_ADAPTER_TYPE_B},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
  .driver_data = ACB_ADAPTER_TYPE_B},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
  .driver_data = ACB_ADAPTER_TYPE_B},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203),
  .driver_data = ACB_ADAPTER_TYPE_B},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
  .driver_data = ACB_ADAPTER_TYPE_D},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
  .driver_data = ACB_ADAPTER_TYPE_A},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
  .driver_data = ACB_ADAPTER_TYPE_C},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1883),
  .driver_data = ACB_ADAPTER_TYPE_C},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
  .driver_data = ACB_ADAPTER_TYPE_E},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886_0),
  .driver_data = ACB_ADAPTER_TYPE_F},
 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
  .driver_data = ACB_ADAPTER_TYPE_F},
 {0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);

static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume);

static struct pci_driver arcmsr_pci_driver = {
 .name   = "arcmsr",
 .id_table  = arcmsr_device_id_table,
 .probe   = arcmsr_probe,
 .remove   = arcmsr_remove,
 .driver.pm  = &arcmsr_pm_ops,
 .shutdown  = arcmsr_shutdown,
};
/*
****************************************************************************
****************************************************************************
*/


static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
{
 switch (acb->adapter_type) {
 case ACB_ADAPTER_TYPE_B:
 case ACB_ADAPTER_TYPE_D:
 case ACB_ADAPTER_TYPE_E:
 case ACB_ADAPTER_TYPE_F:
  dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
   acb->dma_coherent2, acb->dma_coherent_handle2);
  break;
 }
}

static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
{
 struct pci_dev *pdev = acb->pdev;
 switch (acb->adapter_type){
 case ACB_ADAPTER_TYPE_A:{
  acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
  if (!acb->pmuA) {
   printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
   return false;
  }
  break;
 }
 case ACB_ADAPTER_TYPE_B:{
  void __iomem *mem_base0, *mem_base1;
  mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
  if (!mem_base0) {
   printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
   return false;
  }
  mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
  if (!mem_base1) {
   iounmap(mem_base0);
   printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
   return false;
  }
  acb->mem_base0 = mem_base0;
  acb->mem_base1 = mem_base1;
  break;
 }
 case ACB_ADAPTER_TYPE_C:{
  acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
  if (!acb->pmuC) {
   printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
   return false;
  }
  if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
   writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
   return true;
  }
  break;
 }
 case ACB_ADAPTER_TYPE_D: {
  void __iomem *mem_base0;
  unsigned long addr, range;

  addr = (unsigned long)pci_resource_start(pdev, 0);
  range = pci_resource_len(pdev, 0);
  mem_base0 = ioremap(addr, range);
  if (!mem_base0) {
   pr_notice("arcmsr%d: memory mapping region fail\n",
    acb->host->host_no);
   return false;
  }
  acb->mem_base0 = mem_base0;
  break;
  }
 case ACB_ADAPTER_TYPE_E: {
  acb->pmuE = ioremap(pci_resource_start(pdev, 1),
   pci_resource_len(pdev, 1));
  if (!acb->pmuE) {
   pr_notice("arcmsr%d: memory mapping region fail \n",
    acb->host->host_no);
   return false;
  }
  writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
  writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
  acb->in_doorbell = 0;
  acb->out_doorbell = 0;
  break;
  }
 case ACB_ADAPTER_TYPE_F: {
  acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
  if (!acb->pmuF) {
   pr_notice("arcmsr%d: memory mapping region fail\n",
    acb->host->host_no);
   return false;
  }
  writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
  writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
  acb->in_doorbell = 0;
  acb->out_doorbell = 0;
  break;
  }
 }
 return true;
}

static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
{
 switch (acb->adapter_type) {
 case ACB_ADAPTER_TYPE_A:
  iounmap(acb->pmuA);
  break;
 case ACB_ADAPTER_TYPE_B:
  iounmap(acb->mem_base0);
  iounmap(acb->mem_base1);
  break;
 case ACB_ADAPTER_TYPE_C:
  iounmap(acb->pmuC);
  break;
 case ACB_ADAPTER_TYPE_D:
  iounmap(acb->mem_base0);
  break;
 case ACB_ADAPTER_TYPE_E:
  iounmap(acb->pmuE);
  break;
 case ACB_ADAPTER_TYPE_F:
  iounmap(acb->pmuF);
  break;
 }
}

static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
{
 irqreturn_t handle_state;
 struct AdapterControlBlock *acb = dev_id;

 handle_state = arcmsr_interrupt(acb);
 return handle_state;
}

static int arcmsr_bios_param(struct scsi_device *sdev,
  struct block_device *bdev, sector_t capacity, int *geom)
{
 int heads, sectors, cylinders, total_capacity;

 if (scsi_partsize(bdev, capacity, geom))
  return 0;

 total_capacity = capacity;
 heads = 64;
 sectors = 32;
 cylinders = total_capacity / (heads * sectors);
 if (cylinders > 1024) {
  heads = 255;
  sectors = 63;
  cylinders = total_capacity / (heads * sectors);
 }
 geom[0] = heads;
 geom[1] = sectors;
 geom[2] = cylinders;
 return 0;
}

static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
{
 struct MessageUnit_A __iomem *reg = acb->pmuA;
 int i;

 for (i = 0; i < 2000; i++) {
  if (readl(®->outbound_intstatus) &
    ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
   writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
    ®->outbound_intstatus);
   return true;
  }
  msleep(10);
 } /* max 20 seconds */

 return false;
}

static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
{
 struct MessageUnit_B *reg = acb->pmuB;
 int i;

 for (i = 0; i < 2000; i++) {
  if (readl(reg->iop2drv_doorbell)
   & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
   writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
     reg->iop2drv_doorbell);
   writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
     reg->drv2iop_doorbell);
   return true;
  }
  msleep(10);
 } /* max 20 seconds */

 return false;
}

static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
{
 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
 int i;

 for (i = 0; i < 2000; i++) {
  if (readl(&phbcmu->outbound_doorbell)
    & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
   writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
    &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
   return true;
  }
  msleep(10);
 } /* max 20 seconds */

 return false;
}

static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
{
 struct MessageUnit_D *reg = pACB->pmuD;
 int i;

 for (i = 0; i < 2000; i++) {
  if (readl(reg->outbound_doorbell)
   & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
   writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
    reg->outbound_doorbell);
   return true;
  }
  msleep(10);
 } /* max 20 seconds */
 return false;
}

static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
{
 int i;
 uint32_t read_doorbell;
 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;

 for (i = 0; i < 2000; i++) {
  read_doorbell = readl(&phbcmu->iobound_doorbell);
  if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
   writel(0, &phbcmu->host_int_status); /*clear interrupt*/
   pACB->in_doorbell = read_doorbell;
   return true;
  }
  msleep(10);
 } /* max 20 seconds */
 return false;
}

static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
{
 struct MessageUnit_A __iomem *reg = acb->pmuA;
 int retry_count = 30;
 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
 do {
  if (arcmsr_hbaA_wait_msgint_ready(acb))
   break;
  else {
   retry_count--;
   printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
   timeout, retry count down = %d \n", acb->host->host_no, retry_count);
  }
 } while (retry_count != 0);
}

static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
{
 struct MessageUnit_B *reg = acb->pmuB;
 int retry_count = 30;
 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
 do {
  if (arcmsr_hbaB_wait_msgint_ready(acb))
   break;
  else {
   retry_count--;
   printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
   timeout,retry count down = %d \n", acb->host->host_no, retry_count);
  }
 } while (retry_count != 0);
}

static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
{
 struct MessageUnit_C __iomem *reg = pACB->pmuC;
 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
 do {
  if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
   break;
  } else {
   retry_count--;
   printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
   timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
  }
 } while (retry_count != 0);
 return;
}

static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
{
 int retry_count = 15;
 struct MessageUnit_D *reg = pACB->pmuD;

 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
 do {
  if (arcmsr_hbaD_wait_msgint_ready(pACB))
   break;

  retry_count--;
  pr_notice("arcmsr%d: wait 'flush adapter "
   "cache' timeout, retry count down = %d\n",
   pACB->host->host_no, retry_count);
 } while (retry_count != 0);
}

static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
{
 int retry_count = 30;
 struct MessageUnit_E __iomem *reg = pACB->pmuE;

 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
 writel(pACB->out_doorbell, ®->iobound_doorbell);
 do {
  if (arcmsr_hbaE_wait_msgint_ready(pACB))
   break;
  retry_count--;
  pr_notice("arcmsr%d: wait 'flush adapter "
   "cache' timeout, retry count down = %d\n",
   pACB->host->host_no, retry_count);
 } while (retry_count != 0);
}

static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
 switch (acb->adapter_type) {

 case ACB_ADAPTER_TYPE_A:
  arcmsr_hbaA_flush_cache(acb);
  break;
 case ACB_ADAPTER_TYPE_B:
  arcmsr_hbaB_flush_cache(acb);
  break;
 case ACB_ADAPTER_TYPE_C:
  arcmsr_hbaC_flush_cache(acb);
  break;
 case ACB_ADAPTER_TYPE_D:
  arcmsr_hbaD_flush_cache(acb);
  break;
 case ACB_ADAPTER_TYPE_E:
 case ACB_ADAPTER_TYPE_F:
  arcmsr_hbaE_flush_cache(acb);
  break;
 }
}

static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
{
 struct MessageUnit_B *reg = acb->pmuB;

 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
  reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
  reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
  reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
  reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
 } else {
  reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
  reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
  reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
  reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
 }
 reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
 reg->message_rbuffer =  MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
 reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
}

static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
{
 struct MessageUnit_D *reg = acb->pmuD;

 reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
 reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
 reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
 reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
 reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
 reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
 reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
 reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
 reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
 reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
 reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
 reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
 reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
 reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
 reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
 reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
 reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
 reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
 reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
 reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
 reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
 reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
 reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
 reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
 reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
}

static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
{
 dma_addr_t host_buffer_dma;
 struct MessageUnit_F __iomem *pmuF;

 memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
 acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
  acb->completeQ_size, 4);
 acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
 acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
 memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
 host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
 pmuF = acb->pmuF;
 /* host buffer low address, bit0:1 all buffer active */
 writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
 /* host buffer high address */
 writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
 /* set host buffer physical address */
 writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
}

static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
{
 bool rtn = true;
 void *dma_coherent;
 dma_addr_t dma_coherent_handle;
 struct pci_dev *pdev = acb->pdev;

 switch (acb->adapter_type) {
 case ACB_ADAPTER_TYPE_B: {
  acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
  dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
   &dma_coherent_handle, GFP_KERNEL);
  if (!dma_coherent) {
   pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
   return false;
  }
  acb->dma_coherent_handle2 = dma_coherent_handle;
  acb->dma_coherent2 = dma_coherent;
  acb->pmuB = (struct MessageUnit_B *)dma_coherent;
  arcmsr_hbaB_assign_regAddr(acb);
  }
  break;
 case ACB_ADAPTER_TYPE_D: {
  acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
  dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
   &dma_coherent_handle, GFP_KERNEL);
  if (!dma_coherent) {
   pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
   return false;
  }
  acb->dma_coherent_handle2 = dma_coherent_handle;
  acb->dma_coherent2 = dma_coherent;
  acb->pmuD = (struct MessageUnit_D *)dma_coherent;
  arcmsr_hbaD_assign_regAddr(acb);
  }
  break;
 case ACB_ADAPTER_TYPE_E: {
  uint32_t completeQ_size;
  completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
  acb->ioqueue_size = roundup(completeQ_size, 32);
  dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
   &dma_coherent_handle, GFP_KERNEL);
  if (!dma_coherent){
   pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
   return false;
  }
  acb->dma_coherent_handle2 = dma_coherent_handle;
  acb->dma_coherent2 = dma_coherent;
  acb->pCompletionQ = dma_coherent;
  acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
  acb->doneq_index = 0;
  }
  break;
 case ACB_ADAPTER_TYPE_F: {
  uint32_t QueueDepth;
  uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};

  arcmsr_wait_firmware_ready(acb);
  QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
  acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
  acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
  dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
   &dma_coherent_handle, GFP_KERNEL);
  if (!dma_coherent) {
   pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
   return false;
  }
  acb->dma_coherent_handle2 = dma_coherent_handle;
  acb->dma_coherent2 = dma_coherent;
  acb->pCompletionQ = dma_coherent;
  acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
  acb->doneq_index = 0;
  arcmsr_hbaF_assign_regAddr(acb);
  }
  break;
 default:
  break;
 }
 return rtn;
}

static int arcmsr_alloc_xor_buffer(struct AdapterControlBlock *acb)
{
 int rc = 0;
 struct pci_dev *pdev = acb->pdev;
 void *dma_coherent;
 dma_addr_t dma_coherent_handle;
 int i, xor_ram;
 struct Xor_sg *pXorPhys;
 void **pXorVirt;
 struct HostRamBuf *pRamBuf;

 // allocate 1 MB * N physically continuous memory for XOR engine.
 xor_ram = (acb->firm_PicStatus >> 24) & 0x0f;
 acb->xor_mega = (xor_ram - 1) * 32 + 128 + 3;
 acb->init2cfg_size = sizeof(struct HostRamBuf) +
  (sizeof(struct XorHandle) * acb->xor_mega);
 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->init2cfg_size,
  &dma_coherent_handle, GFP_KERNEL);
 acb->xorVirt = dma_coherent;
 acb->xorPhys = dma_coherent_handle;
 pXorPhys = (struct Xor_sg *)((unsigned long)dma_coherent +
  sizeof(struct HostRamBuf));
 acb->xorVirtOffset = sizeof(struct HostRamBuf) +
  (sizeof(struct Xor_sg) * acb->xor_mega);
 pXorVirt = (void **)((unsigned long)dma_coherent +
  (unsigned long)acb->xorVirtOffset);
 for (i = 0; i < acb->xor_mega; i++) {
  dma_coherent = dma_alloc_coherent(&pdev->dev,
   ARCMSR_XOR_SEG_SIZE,
   &dma_coherent_handle, GFP_KERNEL);
  if (dma_coherent) {
   pXorPhys->xorPhys = dma_coherent_handle;
   pXorPhys->xorBufLen = ARCMSR_XOR_SEG_SIZE;
   *pXorVirt = dma_coherent;
   pXorPhys++;
   pXorVirt++;
  } else {
   pr_info("arcmsr%d: alloc max XOR buffer = 0x%x MB\n",
    acb->host->host_no, i);
   rc = -ENOMEM;
   break;
  }
 }
 pRamBuf = (struct HostRamBuf *)acb->xorVirt;
 pRamBuf->hrbSignature = 0x53425248; //HRBS
 pRamBuf->hrbSize = i * ARCMSR_XOR_SEG_SIZE;
 pRamBuf->hrbRes[0] = 0;
 pRamBuf->hrbRes[1] = 0;
 return rc;
}

static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
{
 struct pci_dev *pdev = acb->pdev;
 void *dma_coherent;
 dma_addr_t dma_coherent_handle;
 struct CommandControlBlock *ccb_tmp;
 int i = 0, j = 0;
 unsigned long cdb_phyaddr, next_ccb_phy;
 unsigned long roundup_ccbsize;
 unsigned long max_xfer_len;
 unsigned long max_sg_entrys;
 uint32_t  firm_config_version, curr_phy_upper32;

 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
   acb->devstate[i][j] = ARECA_RAID_GONE;

 max_xfer_len = ARCMSR_MAX_XFER_LEN;
 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
 firm_config_version = acb->firm_cfg_version;
 if((firm_config_version & 0xFF) >= 3){
  max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
  max_sg_entrys = (max_xfer_len/4096);
 }
 acb->host->max_sectors = max_xfer_len/512;
 acb->host->sg_tablesize = max_sg_entrys;
 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
  acb->uncache_size += acb->ioqueue_size;
 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
 if(!dma_coherent){
  printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
  return -ENOMEM;
 }
 acb->dma_coherent = dma_coherent;
 acb->dma_coherent_handle = dma_coherent_handle;
 memset(dma_coherent, 0, acb->uncache_size);
 acb->ccbsize = roundup_ccbsize;
 ccb_tmp = dma_coherent;
 curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
 for(i = 0; i < acb->maxFreeCCB; i++){
  cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
  switch (acb->adapter_type) {
  case ACB_ADAPTER_TYPE_A:
  case ACB_ADAPTER_TYPE_B:
   ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
   break;
  case ACB_ADAPTER_TYPE_C:
  case ACB_ADAPTER_TYPE_D:
  case ACB_ADAPTER_TYPE_E:
  case ACB_ADAPTER_TYPE_F:
   ccb_tmp->cdb_phyaddr = cdb_phyaddr;
   break;
  }
  acb->pccb_pool[i] = ccb_tmp;
  ccb_tmp->acb = acb;
  ccb_tmp->smid = (u32)i << 16;
  INIT_LIST_HEAD(&ccb_tmp->list);
  next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
  if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
   acb->maxFreeCCB = i;
   acb->host->can_queue = i;
   break;
  }
  else
   list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
  dma_coherent_handle = next_ccb_phy;
 }
 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
  acb->dma_coherent_handle2 = dma_coherent_handle;
  acb->dma_coherent2 = ccb_tmp;
 }
 switch (acb->adapter_type) {
 case ACB_ADAPTER_TYPE_B:
  acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
  arcmsr_hbaB_assign_regAddr(acb);
  break;
 case ACB_ADAPTER_TYPE_D:
  acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
  arcmsr_hbaD_assign_regAddr(acb);
  break;
 case ACB_ADAPTER_TYPE_E:
  acb->pCompletionQ = acb->dma_coherent2;
  acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
  acb->doneq_index = 0;
  break;
 }
 if ((acb->firm_PicStatus >> 24) & 0x0f) {
  if (arcmsr_alloc_xor_buffer(acb))
   return -ENOMEM;
 }
 return 0;
}

static void arcmsr_message_isr_bh_fn(struct work_struct *work) 
{
 struct AdapterControlBlock *acb = container_of(work,
  struct AdapterControlBlock, arcmsr_do_message_isr_bh);
 char *acb_dev_map = (char *)acb->device_map;
 uint32_t __iomem *signature = NULL;
 char __iomem *devicemap = NULL;
 int target, lun;
 struct scsi_device *psdev;
 char diff, temp;

 switch (acb->adapter_type) {
 case ACB_ADAPTER_TYPE_A: {
  struct MessageUnit_A __iomem *reg  = acb->pmuA;

  signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
  devicemap = (char __iomem *)(®->message_rwbuffer[21]);
  break;
 }
 case ACB_ADAPTER_TYPE_B: {
  struct MessageUnit_B *reg  = acb->pmuB;

  signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
  devicemap = (char __iomem *)(®->message_rwbuffer[21]);
  break;
 }
 case ACB_ADAPTER_TYPE_C: {
  struct MessageUnit_C __iomem *reg  = acb->pmuC;

  signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
  devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
  break;
 }
 case ACB_ADAPTER_TYPE_D: {
  struct MessageUnit_D *reg  = acb->pmuD;

  signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
  devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
  break;
 }
 case ACB_ADAPTER_TYPE_E: {
  struct MessageUnit_E __iomem *reg  = acb->pmuE;

  signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
  devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
  break;
  }
 case ACB_ADAPTER_TYPE_F: {
  signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
  devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
  break;
  }
 }
 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
  return;
 for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
  target++) {
  temp = readb(devicemap);
  diff = (*acb_dev_map) ^ temp;
  if (diff != 0) {
   *acb_dev_map = temp;
   for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
    lun++) {
    if ((diff & 0x01) == 1 &&
     (temp & 0x01) == 1) {
     scsi_add_device(acb->host,
      0, target, lun);
    } else if ((diff & 0x01) == 1
     && (temp & 0x01) == 0) {
     psdev = scsi_device_lookup(acb->host,
      0, target, lun);
     if (psdev != NULL) {
      scsi_remove_device(psdev);
      scsi_device_put(psdev);
     }
    }
    temp >>= 1;
    diff >>= 1;
   }
  }
  devicemap++;
  acb_dev_map++;
 }
 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
}

static int
arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
{
 unsigned long flags;
 int nvec, i;

 if (msix_enable == 0)
  goto msi_int0;
 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
   PCI_IRQ_MSIX);
 if (nvec > 0) {
  pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
  flags = 0;
 } else {
msi_int0:
  if (msi_enable == 1) {
   nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
   if (nvec == 1) {
    dev_info(&pdev->dev, "msi enabled\n");
    goto msi_int1;
   }
  }
  nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
  if (nvec < 1)
   return FAILED;
msi_int1:
  flags = IRQF_SHARED;
 }

 acb->vector_count = nvec;
 for (i = 0; i < nvec; i++) {
  if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
    flags, "arcmsr", acb)) {
   pr_warn("arcmsr%d: request_irq =%d failed!\n",
    acb->host->host_no, pci_irq_vector(pdev, i));
   goto out_free_irq;
  }
 }

 return SUCCESS;
out_free_irq:
 while (--i >= 0)
  free_irq(pci_irq_vector(pdev, i), acb);
 pci_free_irq_vectors(pdev);
 return FAILED;
}

static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
{
 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
 pacb->fw_flag = FW_NORMAL;
 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
 add_timer(&pacb->eternal_timer);
}

static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
{
 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
 pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60);
 add_timer(&pacb->refresh_timer);
}

static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
{
 struct pci_dev *pcidev = acb->pdev;

 if (IS_DMA64) {
  if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
      dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
   goto dma32;
  if (acb->adapter_type <= ACB_ADAPTER_TYPE_B)
   return 0;
  if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
      dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
   printk("arcmsr: set DMA 64 mask failed\n");
   return -ENXIO;
  }
 } else {
dma32:
  if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
      dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
      dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
   printk("arcmsr: set DMA 32-bit mask failed\n");
   return -ENXIO;
  }
 }
 return 0;
}

static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
 struct Scsi_Host *host;
 struct AdapterControlBlock *acb;
 uint8_t bus,dev_fun;
 int error;
 error = pci_enable_device(pdev);
 if(error){
  return -ENODEV;
 }
 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
 if(!host){
      goto pci_disable_dev;
 }
 init_waitqueue_head(&wait_q);
 bus = pdev->bus->number;
 dev_fun = pdev->devfn;
 acb = (struct AdapterControlBlock *) host->hostdata;
 memset(acb,0,sizeof(struct AdapterControlBlock));
 acb->pdev = pdev;
 acb->adapter_type = id->driver_data;
 if (arcmsr_set_dma_mask(acb))
  goto scsi_host_release;
 acb->host = host;
 host->max_lun = ARCMSR_MAX_TARGETLUN;
 host->max_id = ARCMSR_MAX_TARGETID;  /*16:8*/
 host->max_cmd_len = 16;     /*this is issue of 64bit LBA ,over 2T byte*/
 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
  host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
 host->can_queue = host_can_queue; /* max simultaneous cmds */
 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
  cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
 host->cmd_per_lun = cmd_per_lun;
 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
 host->unique_id = (bus << 8) | dev_fun;
 pci_set_drvdata(pdev, host);
 pci_set_master(pdev);
 error = pci_request_regions(pdev, "arcmsr");
 if(error){
  goto scsi_host_release;
 }
 spin_lock_init(&acb->eh_lock);
 spin_lock_init(&acb->ccblist_lock);
 spin_lock_init(&acb->postq_lock);
 spin_lock_init(&acb->doneq_lock);
 spin_lock_init(&acb->rqbuffer_lock);
 spin_lock_init(&acb->wqbuffer_lock);
 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
   ACB_F_MESSAGE_RQBUFFER_CLEARED |
   ACB_F_MESSAGE_WQBUFFER_READED);
 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
 INIT_LIST_HEAD(&acb->ccb_free_list);
 error = arcmsr_remap_pciregion(acb);
 if(!error){
  goto pci_release_regs;
 }
 error = arcmsr_alloc_io_queue(acb);
 if (!error)
  goto unmap_pci_region;
 error = arcmsr_get_firmware_spec(acb);
 if(!error){
  goto free_hbb_mu;
 }
 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
  arcmsr_free_io_queue(acb);
 error = arcmsr_alloc_ccb_pool(acb);
 if(error){
  goto unmap_pci_region;
 }
 error = scsi_add_host(host, &pdev->dev);
 if(error){
  goto free_ccb_pool;
 }
 if (arcmsr_request_irq(pdev, acb) == FAILED)
  goto scsi_host_remove;
 arcmsr_iop_init(acb);
 arcmsr_init_get_devmap_timer(acb);
 if (set_date_time)
  arcmsr_init_set_datetime_timer(acb);
 if(arcmsr_alloc_sysfs_attr(acb))
  goto out_free_sysfs;
 scsi_scan_host(host);
 return 0;
out_free_sysfs:
 if (set_date_time)
  timer_delete_sync(&acb->refresh_timer);
 timer_delete_sync(&acb->eternal_timer);
 flush_work(&acb->arcmsr_do_message_isr_bh);
 arcmsr_stop_adapter_bgrb(acb);
 arcmsr_flush_adapter_cache(acb);
 arcmsr_free_irq(pdev, acb);
scsi_host_remove:
 scsi_remove_host(host);
free_ccb_pool:
 arcmsr_free_ccb_pool(acb);
 goto unmap_pci_region;
free_hbb_mu:
 arcmsr_free_io_queue(acb);
unmap_pci_region:
 arcmsr_unmap_pciregion(acb);
pci_release_regs:
 pci_release_regions(pdev);
scsi_host_release:
 scsi_host_put(host);
pci_disable_dev:
 pci_disable_device(pdev);
 return -ENODEV;
}

static void arcmsr_free_irq(struct pci_dev *pdev,
  struct AdapterControlBlock *acb)
{
 int i;

 for (i = 0; i < acb->vector_count; i++)
  free_irq(pci_irq_vector(pdev, i), acb);
 pci_free_irq_vectors(pdev);
}

static int __maybe_unused arcmsr_suspend(struct device *dev)
{
 struct pci_dev *pdev = to_pci_dev(dev);
 struct Scsi_Host *host = pci_get_drvdata(pdev);
 struct AdapterControlBlock *acb =
  (struct AdapterControlBlock *)host->hostdata;

 arcmsr_disable_outbound_ints(acb);
 arcmsr_free_irq(pdev, acb);
 timer_delete_sync(&acb->eternal_timer);
 if (set_date_time)
  timer_delete_sync(&acb->refresh_timer);
 flush_work(&acb->arcmsr_do_message_isr_bh);
 arcmsr_stop_adapter_bgrb(acb);
 arcmsr_flush_adapter_cache(acb);
 return 0;
}

static int __maybe_unused arcmsr_resume(struct device *dev)
{
 struct pci_dev *pdev = to_pci_dev(dev);
 struct Scsi_Host *host = pci_get_drvdata(pdev);
 struct AdapterControlBlock *acb =
  (struct AdapterControlBlock *)host->hostdata;

 if (arcmsr_set_dma_mask(acb))
  goto controller_unregister;
 if (arcmsr_request_irq(pdev, acb) == FAILED)
  goto controller_stop;
 switch (acb->adapter_type) {
 case ACB_ADAPTER_TYPE_B: {
  struct MessageUnit_B *reg = acb->pmuB;
  uint32_t i;
  for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
   reg->post_qbuffer[i] = 0;
   reg->done_qbuffer[i] = 0;
  }
  reg->postq_index = 0;
  reg->doneq_index = 0;
  break;
  }
 case ACB_ADAPTER_TYPE_E:
  writel(0, &acb->pmuE->host_int_status);
  writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
  acb->in_doorbell = 0;
  acb->out_doorbell = 0;
  acb->doneq_index = 0;
  break;
 case ACB_ADAPTER_TYPE_F:
  writel(0, &acb->pmuF->host_int_status);
  writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
  acb->in_doorbell = 0;
  acb->out_doorbell = 0;
  acb->doneq_index = 0;
  arcmsr_hbaF_assign_regAddr(acb);
  break;
 }
 arcmsr_iop_init(acb);
 arcmsr_init_get_devmap_timer(acb);
 if (set_date_time)
  arcmsr_init_set_datetime_timer(acb);
 return 0;
controller_stop:
 arcmsr_stop_adapter_bgrb(acb);
 arcmsr_flush_adapter_cache(acb);
controller_unregister:
 scsi_remove_host(host);
 arcmsr_free_ccb_pool(acb);
 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
  arcmsr_free_io_queue(acb);
 arcmsr_unmap_pciregion(acb);
 scsi_host_put(host);
 return -ENODEV;
}

static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
{
 struct MessageUnit_A __iomem *reg = acb->pmuA;
 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
  printk(KERN_NOTICE
   "arcmsr%d: wait 'abort all outstanding command' timeout\n"
   , acb->host->host_no);
  return false;
 }
 return true;
}

static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
{
 struct MessageUnit_B *reg = acb->pmuB;

 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  printk(KERN_NOTICE
   "arcmsr%d: wait 'abort all outstanding command' timeout\n"
   , acb->host->host_no);
  return false;
 }
 return true;
}
static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
{
 struct MessageUnit_C __iomem *reg = pACB->pmuC;
 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
  printk(KERN_NOTICE
   "arcmsr%d: wait 'abort all outstanding command' timeout\n"
   , pACB->host->host_no);
  return false;
 }
 return true;
}

static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
{
 struct MessageUnit_D *reg = pACB->pmuD;

 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
  pr_notice("arcmsr%d: wait 'abort all outstanding "
   "command' timeout\n", pACB->host->host_no);
  return false;
 }
 return true;
}

static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
{
 struct MessageUnit_E __iomem *reg = pACB->pmuE;

 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
 writel(pACB->out_doorbell, ®->iobound_doorbell);
 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
  pr_notice("arcmsr%d: wait 'abort all outstanding "
   "command' timeout\n", pACB->host->host_no);
  return false;
 }
 return true;
}

static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
 uint8_t rtnval = 0;
 switch (acb->adapter_type) {
 case ACB_ADAPTER_TYPE_A:
  rtnval = arcmsr_hbaA_abort_allcmd(acb);
  break;
 case ACB_ADAPTER_TYPE_B:
  rtnval = arcmsr_hbaB_abort_allcmd(acb);
  break;
 case ACB_ADAPTER_TYPE_C:
  rtnval = arcmsr_hbaC_abort_allcmd(acb);
  break;
 case ACB_ADAPTER_TYPE_D:
  rtnval = arcmsr_hbaD_abort_allcmd(acb);
  break;
 case ACB_ADAPTER_TYPE_E:
 case ACB_ADAPTER_TYPE_F:
  rtnval = arcmsr_hbaE_abort_allcmd(acb);
  break;
 }
 return rtnval;
}

static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
{
 struct AdapterControlBlock *acb = ccb->acb;
 struct scsi_cmnd *pcmd = ccb->pcmd;
 unsigned long flags;
 atomic_dec(&acb->ccboutstandingcount);
 scsi_dma_unmap(ccb->pcmd);
 ccb->startdone = ARCMSR_CCB_DONE;
 spin_lock_irqsave(&acb->ccblist_lock, flags);
 list_add_tail(&ccb->list, &acb->ccb_free_list);
 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
 scsi_done(pcmd);
}

static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
{
 struct scsi_cmnd *pcmd = ccb->pcmd;

 pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
 if (pcmd->sense_buffer) {
  struct SENSE_DATA *sensebuffer;

  memcpy_and_pad(pcmd->sense_buffer,
          SCSI_SENSE_BUFFERSIZE,
          ccb->arcmsr_cdb.SenseData,
          sizeof(ccb->arcmsr_cdb.SenseData),
          0);

  sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
  sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  sensebuffer->Valid = 1;
 }
}

static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
{
 u32 orig_mask = 0;
 switch (acb->adapter_type) { 
 case ACB_ADAPTER_TYPE_A : {
  struct MessageUnit_A __iomem *reg = acb->pmuA;
  orig_mask = readl(®->outbound_intmask);
  writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
      ®->outbound_intmask);
  }
  break;
 case ACB_ADAPTER_TYPE_B : {
  struct MessageUnit_B *reg = acb->pmuB;
  orig_mask = readl(reg->iop2drv_doorbell_mask);
  writel(0, reg->iop2drv_doorbell_mask);
  }
  break;
 case ACB_ADAPTER_TYPE_C:{
  struct MessageUnit_C __iomem *reg = acb->pmuC;
  /* disable all outbound interrupt */
  orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */
  writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
  }
  break;
 case ACB_ADAPTER_TYPE_D: {
  struct MessageUnit_D *reg = acb->pmuD;
  /* disable all outbound interrupt */
  writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
  }
  break;
 case ACB_ADAPTER_TYPE_E:
 case ACB_ADAPTER_TYPE_F: {
  struct MessageUnit_E __iomem *reg = acb->pmuE;
  orig_mask = readl(®->host_int_mask);
  writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask);
  readl(®->host_int_mask); /* Dummy readl to force pci flush */
  }
  break;
 }
 return orig_mask;
}

static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, 
   struct CommandControlBlock *ccb, bool error)
{
 uint8_t id, lun;
 id = ccb->pcmd->device->id;
 lun = ccb->pcmd->device->lun;
 if (!error) {
  if (acb->devstate[id][lun] == ARECA_RAID_GONE)
   acb->devstate[id][lun] = ARECA_RAID_GOOD;
  ccb->pcmd->result = DID_OK << 16;
  arcmsr_ccb_complete(ccb);
 }else{
  switch (ccb->arcmsr_cdb.DeviceStatus) {
  case ARCMSR_DEV_SELECT_TIMEOUT: {
   acb->devstate[id][lun] = ARECA_RAID_GONE;
   ccb->pcmd->result = DID_NO_CONNECT << 16;
   arcmsr_ccb_complete(ccb);
   }
   break;

  case ARCMSR_DEV_ABORTED:

  case ARCMSR_DEV_INIT_FAIL: {
   acb->devstate[id][lun] = ARECA_RAID_GONE;
   ccb->pcmd->result = DID_BAD_TARGET << 16;
   arcmsr_ccb_complete(ccb);
   }
   break;

  case ARCMSR_DEV_CHECK_CONDITION: {
   acb->devstate[id][lun] = ARECA_RAID_GOOD;
   arcmsr_report_sense_info(ccb);
   arcmsr_ccb_complete(ccb);
   }
   break;

  default:
   printk(KERN_NOTICE
    "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
    but got unknown DeviceStatus = 0x%x \n"
    , acb->host->host_no
    , id
    , lun
    , ccb->arcmsr_cdb.DeviceStatus);
    acb->devstate[id][lun] = ARECA_RAID_GONE;
    ccb->pcmd->result = DID_NO_CONNECT << 16;
    arcmsr_ccb_complete(ccb);
   break;
  }
 }
}

static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
{
 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
  if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
   struct scsi_cmnd *abortcmd = pCCB->pcmd;
   if (abortcmd) {
    abortcmd->result |= DID_ABORT << 16;
    arcmsr_ccb_complete(pCCB);
    printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
    acb->host->host_no, pCCB);
   }
   return;
  }
  printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
    done acb = '0x%p'"
    "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
    " ccboutstandingcount = %d \n"
    , acb->host->host_no
    , acb
    , pCCB
    , pCCB->acb
    , pCCB->startdone
    , atomic_read(&acb->ccboutstandingcount));
  return;
 }
 arcmsr_report_ccb_state(acb, pCCB, error);
}

static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
{
 int i = 0;
 uint32_t flag_ccb;
 struct ARCMSR_CDB *pARCMSR_CDB;
 bool error;
 struct CommandControlBlock *pCCB;
 unsigned long ccb_cdb_phy;

 switch (acb->adapter_type) {

 case ACB_ADAPTER_TYPE_A: {
  struct MessageUnit_A __iomem *reg = acb->pmuA;
  uint32_t outbound_intstatus;
  outbound_intstatus = readl(®->outbound_intstatus) &
     acb->outbound_int_enable;
  /*clear and abort all outbound posted Q*/
  writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
  while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF)
    && (i++ < acb->maxOutstanding)) {
   ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
   if (acb->cdb_phyadd_hipart)
    ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
   pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
   pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
   error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
   arcmsr_drain_donequeue(acb, pCCB, error);
  }
  }
  break;

 case ACB_ADAPTER_TYPE_B: {
  struct MessageUnit_B *reg = acb->pmuB;
  /*clear all outbound posted Q*/
  writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
  for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
   flag_ccb = reg->done_qbuffer[i];
   if (flag_ccb != 0) {
    reg->done_qbuffer[i] = 0;
    ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
    if (acb->cdb_phyadd_hipart)
     ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
    pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
    pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
    error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
    arcmsr_drain_donequeue(acb, pCCB, error);
   }
   reg->post_qbuffer[i] = 0;
  }
  reg->doneq_index = 0;
  reg->postq_index = 0;
  }
  break;
 case ACB_ADAPTER_TYPE_C: {
  struct MessageUnit_C __iomem *reg = acb->pmuC;
  while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
   /*need to do*/
   flag_ccb = readl(®->outbound_queueport_low);
   ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
   if (acb->cdb_phyadd_hipart)
    ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
   pARCMSR_CDB = (struct  ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
   pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
   error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
   arcmsr_drain_donequeue(acb, pCCB, error);
  }
  }
  break;
 case ACB_ADAPTER_TYPE_D: {
  struct MessageUnit_D  *pmu = acb->pmuD;
  uint32_t outbound_write_pointer;
  uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
  unsigned long flags;

  residual = atomic_read(&acb->ccboutstandingcount);
  for (i = 0; i < residual; i++) {
   spin_lock_irqsave(&acb->doneq_lock, flags);
   outbound_write_pointer =
    pmu->done_qbuffer[0].addressLow + 1;
   doneq_index = pmu->doneq_index;
   if ((doneq_index & 0xFFF) !=
    (outbound_write_pointer & 0xFFF)) {
    toggle = doneq_index & 0x4000;
    index_stripped = (doneq_index & 0xFFF) + 1;
    index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
    pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
     ((toggle ^ 0x4000) + 1);
    doneq_index = pmu->doneq_index;
    spin_unlock_irqrestore(&acb->doneq_lock, flags);
    addressLow = pmu->done_qbuffer[doneq_index &
     0xFFF].addressLow;
    ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
    if (acb->cdb_phyadd_hipart)
     ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
    pARCMSR_CDB = (struct  ARCMSR_CDB *)
     (acb->vir2phy_offset + ccb_cdb_phy);
    pCCB = container_of(pARCMSR_CDB,
     struct CommandControlBlock, arcmsr_cdb);
    error = (addressLow &
     ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
     true : false;
    arcmsr_drain_donequeue(acb, pCCB, error);
    writel(doneq_index,
     pmu->outboundlist_read_pointer);
   } else {
    spin_unlock_irqrestore(&acb->doneq_lock, flags);
    mdelay(10);
   }
  }
  pmu->postq_index = 0;
  pmu->doneq_index = 0x40FF;
  }
  break;
 case ACB_ADAPTER_TYPE_E:
  arcmsr_hbaE_postqueue_isr(acb);
  break;
 case ACB_ADAPTER_TYPE_F:
  arcmsr_hbaF_postqueue_isr(acb);
  break;
 }
}

static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
{
 char *acb_dev_map = (char *)acb->device_map;
 int target, lun, i;
 struct scsi_device *psdev;
 struct CommandControlBlock *ccb;
 char temp;

 for (i = 0; i < acb->maxFreeCCB; i++) {
  ccb = acb->pccb_pool[i];
  if (ccb->startdone == ARCMSR_CCB_START) {
   ccb->pcmd->result = DID_NO_CONNECT << 16;
   scsi_dma_unmap(ccb->pcmd);
   scsi_done(ccb->pcmd);
  }
 }
 for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
  temp = *acb_dev_map;
  if (temp) {
   for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
    if (temp & 1) {
     psdev = scsi_device_lookup(acb->host,
      0, target, lun);
     if (psdev != NULL) {
      scsi_remove_device(psdev);
      scsi_device_put(psdev);
     }
    }
    temp >>= 1;
   }
   *acb_dev_map = 0;
  }
  acb_dev_map++;
 }
}

static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
{
 struct pci_dev *pdev;
 struct Scsi_Host *host;

 host = acb->host;
 arcmsr_free_sysfs_attr(acb);
 scsi_remove_host(host);
 flush_work(&acb->arcmsr_do_message_isr_bh);
 timer_delete_sync(&acb->eternal_timer);
 if (set_date_time)
  timer_delete_sync(&acb->refresh_timer);
 pdev = acb->pdev;
 arcmsr_free_irq(pdev, acb);
 arcmsr_free_ccb_pool(acb);
 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
  arcmsr_free_io_queue(acb);
 arcmsr_unmap_pciregion(acb);
 pci_release_regions(pdev);
 scsi_host_put(host);
 pci_disable_device(pdev);
}

static void arcmsr_remove(struct pci_dev *pdev)
{
 struct Scsi_Host *host = pci_get_drvdata(pdev);
 struct AdapterControlBlock *acb =
  (struct AdapterControlBlock *) host->hostdata;
 int poll_count = 0;
 uint16_t dev_id;

 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
 if (dev_id == 0xffff) {
  acb->acb_flags &= ~ACB_F_IOP_INITED;
  acb->acb_flags |= ACB_F_ADAPTER_REMOVED;
  arcmsr_remove_scsi_devices(acb);
  arcmsr_free_pcidev(acb);
  return;
 }
 arcmsr_free_sysfs_attr(acb);
 scsi_remove_host(host);
 flush_work(&acb->arcmsr_do_message_isr_bh);
 timer_delete_sync(&acb->eternal_timer);
 if (set_date_time)
  timer_delete_sync(&acb->refresh_timer);
 arcmsr_disable_outbound_ints(acb);
 arcmsr_stop_adapter_bgrb(acb);
 arcmsr_flush_adapter_cache(acb); 
 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
 acb->acb_flags &= ~ACB_F_IOP_INITED;

 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
  if (!atomic_read(&acb->ccboutstandingcount))
   break;
  arcmsr_interrupt(acb);/* FIXME: need spinlock */
  msleep(25);
 }

 if (atomic_read(&acb->ccboutstandingcount)) {
  int i;

  arcmsr_abort_allcmd(acb);
  arcmsr_done4abort_postqueue(acb);
  for (i = 0; i < acb->maxFreeCCB; i++) {
   struct CommandControlBlock *ccb = acb->pccb_pool[i];
   if (ccb->startdone == ARCMSR_CCB_START) {
    ccb->startdone = ARCMSR_CCB_ABORTED;
    ccb->pcmd->result = DID_ABORT << 16;
    arcmsr_ccb_complete(ccb);
   }
  }
 }
 arcmsr_free_irq(pdev, acb);
 arcmsr_free_ccb_pool(acb);
 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
  arcmsr_free_io_queue(acb);
 arcmsr_unmap_pciregion(acb);
 pci_release_regions(pdev);
 scsi_host_put(host);
 pci_disable_device(pdev);
}

static void arcmsr_shutdown(struct pci_dev *pdev)
{
 struct Scsi_Host *host = pci_get_drvdata(pdev);
 struct AdapterControlBlock *acb =
  (struct AdapterControlBlock *)host->hostdata;
 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
  return;
 timer_delete_sync(&acb->eternal_timer);
 if (set_date_time)
  timer_delete_sync(&acb->refresh_timer);
 arcmsr_disable_outbound_ints(acb);
 arcmsr_free_irq(pdev, acb);
 flush_work(&acb->arcmsr_do_message_isr_bh);
 arcmsr_stop_adapter_bgrb(acb);
 arcmsr_flush_adapter_cache(acb);
}

static int __init arcmsr_module_init(void)
{
 int error = 0;
 error = pci_register_driver(&arcmsr_pci_driver);
 return error;
}

static void __exit arcmsr_module_exit(void)
{
 pci_unregister_driver(&arcmsr_pci_driver);
}
module_init(arcmsr_module_init);
module_exit(arcmsr_module_exit);

static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
      u32 intmask_org)
{
 u32 mask;
 switch (acb->adapter_type) {

 case ACB_ADAPTER_TYPE_A: {
  struct MessageUnit_A __iomem *reg = acb->pmuA;
  mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
        ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
        ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
  writel(mask, ®->outbound_intmask);
  acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
  }
  break;

 case ACB_ADAPTER_TYPE_B: {
  struct MessageUnit_B *reg = acb->pmuB;
  mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
   ARCMSR_IOP2DRV_DATA_READ_OK |
   ARCMSR_IOP2DRV_CDB_DONE |
   ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
  writel(mask, reg->iop2drv_doorbell_mask);
  acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
  }
  break;
 case ACB_ADAPTER_TYPE_C: {
  struct MessageUnit_C __iomem *reg = acb->pmuC;
  mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
  writel(intmask_org & mask, ®->host_int_mask);
  acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
  }
  break;
 case ACB_ADAPTER_TYPE_D: {
  struct MessageUnit_D *reg = acb->pmuD;

  mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
  writel(intmask_org | mask, reg->pcief0_int_enable);
  break;
  }
 case ACB_ADAPTER_TYPE_E:
 case ACB_ADAPTER_TYPE_F: {
  struct MessageUnit_E __iomem *reg = acb->pmuE;

  mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
  writel(intmask_org & mask, ®->host_int_mask);
  break;
  }
 }
}

static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
{
 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
 __le32 address_lo, address_hi;
 int arccdbsize = 0x30;
 __le32 length = 0;
 int i;
 struct scatterlist *sg;
 int nseg;
 ccb->pcmd = pcmd;
 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
 arcmsr_cdb->TargetID = pcmd->device->id;
 arcmsr_cdb->LUN = pcmd->device->lun;
 arcmsr_cdb->Function = 1;
 arcmsr_cdb->msgContext = 0;
 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);

 nseg = scsi_dma_map(pcmd);
 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
  return FAILED;
 scsi_for_each_sg(pcmd, sg, nseg, i) {
  /* Get the physical address of the current data pointer */
  length = cpu_to_le32(sg_dma_len(sg));
  address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
  address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
  if (address_hi == 0) {
   struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;

   pdma_sg->address = address_lo;
   pdma_sg->length = length;
   psge += sizeof (struct SG32ENTRY);
   arccdbsize += sizeof (struct SG32ENTRY);
  } else {
   struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;

   pdma_sg->addresshigh = address_hi;
   pdma_sg->address = address_lo;
   pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
   psge += sizeof (struct SG64ENTRY);
   arccdbsize += sizeof (struct SG64ENTRY);
  }
 }
 arcmsr_cdb->sgcount = (uint8_t)nseg;
 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
 if ( arccdbsize > 256)
  arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
 if (pcmd->sc_data_direction == DMA_TO_DEVICE)
  arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
 ccb->arc_cdb_size = arccdbsize;
 return SUCCESS;
}

static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
{
 uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
 atomic_inc(&acb->ccboutstandingcount);
 ccb->startdone = ARCMSR_CCB_START;
 switch (acb->adapter_type) {
 case ACB_ADAPTER_TYPE_A: {
  struct MessageUnit_A __iomem *reg = acb->pmuA;

  if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
   writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
   ®->inbound_queueport);
  else
   writel(cdb_phyaddr, ®->inbound_queueport);
  break;
 }

 case ACB_ADAPTER_TYPE_B: {
  struct MessageUnit_B *reg = acb->pmuB;
  uint32_t ending_index, index = reg->postq_index;

  ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
  reg->post_qbuffer[ending_index] = 0;
  if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
   reg->post_qbuffer[index] =
    cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
  } else {
   reg->post_qbuffer[index] = cdb_phyaddr;
  }
  index++;
  index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
  reg->postq_index = index;
  writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
  }
  break;
 case ACB_ADAPTER_TYPE_C: {
  struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
  uint32_t ccb_post_stamp, arc_cdb_size;

  arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
  ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
  writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
  writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
  }
  break;
 case ACB_ADAPTER_TYPE_D: {
  struct MessageUnit_D  *pmu = acb->pmuD;
  u16 index_stripped;
  u16 postq_index, toggle;
  unsigned long flags;
  struct InBound_SRB *pinbound_srb;

  spin_lock_irqsave(&acb->postq_lock, flags);
  postq_index = pmu->postq_index;
  pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
  pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
  pinbound_srb->addressLow = cdb_phyaddr;
  pinbound_srb->length = ccb->arc_cdb_size >> 2;
  arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
  toggle = postq_index & 0x4000;
  index_stripped = postq_index + 1;
  index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
  pmu->postq_index = index_stripped ? (index_stripped | toggle) :
   (toggle ^ 0x4000);
  writel(postq_index, pmu->inboundlist_write_pointer);
  spin_unlock_irqrestore(&acb->postq_lock, flags);
  break;
  }
 case ACB_ADAPTER_TYPE_E: {
  struct MessageUnit_E __iomem *pmu = acb->pmuE;
  u32 ccb_post_stamp, arc_cdb_size;

  arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
  ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
  writel(0, &pmu->inbound_queueport_high);
  writel(ccb_post_stamp, &pmu->inbound_queueport_low);
  break;
  }
 case ACB_ADAPTER_TYPE_F: {
  struct MessageUnit_F __iomem *pmu = acb->pmuF;
  u32 ccb_post_stamp, arc_cdb_size;

  if (ccb->arc_cdb_size <= 0x300)
   arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
  else {
   arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
   if (arc_cdb_size > 0xF)
    arc_cdb_size = 0xF;
   arc_cdb_size = (arc_cdb_size << 1) | 1;
  }
  ccb_post_stamp = (ccb->smid | arc_cdb_size);
  writel(0, &pmu->inbound_queueport_high);
  writel(ccb_post_stamp, &pmu->inbound_queueport_low);
  break;
  }
 }
}

static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
{
 struct MessageUnit_A __iomem *reg = acb->pmuA;
 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
  printk(KERN_NOTICE
   "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
   , acb->host->host_no);
 }
}

static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
{
 struct MessageUnit_B *reg = acb->pmuB;
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=99 H=90 G=94

¤ Dauer der Verarbeitung: 0.24 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.