/* * Set to enable driver to expose unconfigured disk to kernel
*/ staticint megaraid_expose_unconf_disks = 0;
module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
MODULE_PARM_DESC(unconf_disks, "Set to expose unconfigured disks to kernel (default=0)");
/* * driver wait time if the adapter's mailbox is busy
*/ staticunsignedint max_mbox_busy_wait = MBOX_BUSY_WAIT;
module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
MODULE_PARM_DESC(busy_wait, "Max wait for mailbox in microseconds if busy (default=10)");
/* * number of sectors per IO command
*/ staticunsignedint megaraid_max_sectors = MBOX_MAX_SECTORS;
module_param_named(max_sectors, megaraid_max_sectors, int, 0);
MODULE_PARM_DESC(max_sectors, "Maximum number of sectors per IO command (default=128)");
/* * number of commands per logical unit
*/ staticunsignedint megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
MODULE_PARM_DESC(cmd_per_lun, "Maximum number of commands per logical unit (default=64)");
/* * Fast driver load option, skip scanning for physical devices during load. * This would result in non-disk devices being skipped during driver load * time. These can be later added though, using /proc/scsi/scsi
*/ staticunsignedint megaraid_fast_load;
module_param_named(fast_load, megaraid_fast_load, int, 0);
MODULE_PARM_DESC(fast_load, "Faster loading of the driver, skips physical devices! (default=0)");
/* * mraid_debug level - threshold for amount of information to be displayed by * the driver. This level can be changed through modules parameters, ioctl or * sysfs/proc interface. By default, print the announcement messages only.
*/ int mraid_debug_level = CL_ANN;
module_param_named(debug_level, mraid_debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/** * megaraid_init - module load hook * * We register ourselves as hotplug enabled module and let PCI subsystem * discover our adapters.
*/ staticint __init
megaraid_init(void)
{ int rval;
// Announce the driver version
con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
MEGARAID_EXT_VERSION));
// check validity of module parameters if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
con_log(CL_ANN, (KERN_WARNING "megaraid mailbox: max commands per lun reset to %d\n",
MBOX_MAX_SCSI_CMDS));
megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
}
// register as a PCI hot-plug driver module
rval = pci_register_driver(&megaraid_pci_driver); if (rval < 0) {
con_log(CL_ANN, (KERN_WARNING "megaraid: could not register hotplug support.\n"));
}
return rval;
}
/** * megaraid_exit - driver unload entry point * * We simply unwrap the megaraid_init routine here.
*/ staticvoid __exit
megaraid_exit(void)
{
con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
// unregister as PCI hotplug driver
pci_unregister_driver(&megaraid_pci_driver);
return;
}
/** * megaraid_probe_one - PCI hotplug entry point * @pdev : handle to this controller's PCI configuration space * @id : pci device id of the class of controllers * * This routine should be called whenever a new adapter is detected by the * PCI hotplug susbsystem.
*/ staticint
megaraid_probe_one(struct pci_dev *pdev, conststruct pci_device_id *id)
{
adapter_t *adapter;
// detected a new controller
con_log(CL_ANN, (KERN_INFO "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device));
if (pci_enable_device(pdev)) {
con_log(CL_ANN, (KERN_WARNING "megaraid: pci_enable_device failed\n"));
return -ENODEV;
}
// Enable bus-mastering on this controller
pci_set_master(pdev);
// Allocate the per driver initialization structure
adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
if (adapter == NULL) {
con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d.\n", __func__, __LINE__));
goto out_probe_one;
}
// set up PCI related soft state and other pre-known parameters
adapter->unique_id = pci_dev_id(pdev);
adapter->irq = pdev->irq;
adapter->pdev = pdev;
atomic_set(&adapter->being_detached, 0);
// Setup the default DMA mask. This would be changed later on // depending on hardware capabilities if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING "megaraid: dma_set_mask failed:%d\n", __LINE__));
goto out_free_adapter;
}
// Initialize the synchronization lock for kernel and LLD
spin_lock_init(&adapter->lock);
// Initialize the command queues: the list of free SCBs and the list // of pending SCBs.
INIT_LIST_HEAD(&adapter->kscb_pool);
spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
// Start the mailbox based controller if (megaraid_init_mbox(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING "megaraid: mailbox adapter did not initialize\n"));
goto out_free_adapter;
}
// Register with LSI Common Management Module if (megaraid_cmm_register(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING "megaraid: could not register with management module\n"));
goto out_fini_mbox;
}
// setup adapter handle in PCI soft state
pci_set_drvdata(pdev, adapter);
// attach with scsi mid-layer if (megaraid_io_attach(adapter) != 0) {
/** * megaraid_detach_one - release framework resources and call LLD release routine * @pdev : handle for our PCI configuration space * * This routine is called during driver unload. We free all the allocated * resources and call the corresponding LLD so that it can also release all * its resources. * * This routine is also called from the PCI hotplug system.
*/ staticvoid
megaraid_detach_one(struct pci_dev *pdev)
{
adapter_t *adapter; struct Scsi_Host *host;
// Start a rollback on this adapter
adapter = pci_get_drvdata(pdev);
if (!adapter) {
con_log(CL_ANN, (KERN_CRIT "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device));
// do not allow any more requests from the management module for this // adapter. // FIXME: How do we account for the request which might still be // pending with us?
atomic_set(&adapter->being_detached, 1);
// detach from the IO sub-system
megaraid_io_detach(adapter);
// Unregister from common management module // // FIXME: this must return success or failure for conditions if there // is a command pending with LLD or not.
megaraid_cmm_unregister(adapter);
// finalize the mailbox based controller and release all resources
megaraid_fini_mbox(adapter);
/** * megaraid_io_attach - attach a device with the IO subsystem * @adapter : controller's soft state * * Attach this device with the IO subsystem.
*/ staticint
megaraid_io_attach(adapter_t *adapter)
{ struct Scsi_Host *host;
/** * megaraid_io_detach - detach a device from the IO subsystem * @adapter : controller's soft state * * Detach this device from the IO subsystem.
*/ staticvoid
megaraid_io_detach(adapter_t *adapter)
{ struct Scsi_Host *host;
/* * Allocate and initialize the init data structure for mailbox * controllers
*/
raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL); if (raid_dev == NULL) return -1;
/* * Attach the adapter soft state to raid device soft state
*/
adapter->raid_device = (caddr_t)raid_dev;
raid_dev->fast_load = megaraid_fast_load;
// Product info if (megaraid_mbox_product_info(adapter) != 0) goto out_free_irq;
// Do we support extended CDBs
adapter->max_cdb_sz = 10; if (megaraid_mbox_extended_cdb(adapter) == 0) {
adapter->max_cdb_sz = 16;
}
/* * Do we support cluster environment, if we do, what is the initiator * id. * NOTE: In a non-cluster aware firmware environment, the LLD should * return 7 as initiator id.
*/
adapter->ha = 0;
adapter->init_id = -1; if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
adapter->ha = 1;
}
/* * Prepare the device ids array to have the mapping between the kernel * device address and megaraid device address. * We export the physical devices on their actual addresses. The * logical drives are exported on a virtual SCSI channel
*/
megaraid_mbox_setup_device_map(adapter);
// If the firmware supports random deletion, update the device id map if (megaraid_mbox_support_random_del(adapter)) {
// Change the logical drives numbers in device_ids array one // slot in device_ids is reserved for target id, that's why // "<=" below for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
adapter->device_ids[adapter->max_channel][i] += 0x80;
}
adapter->device_ids[adapter->max_channel][adapter->init_id] =
0xFF;
raid_dev->random_del_supported = 1;
}
/* * find out the maximum number of scatter-gather elements supported by * this firmware
*/
adapter->sglen = megaraid_mbox_get_max_sg(adapter);
// enumerate RAID and SCSI channels so that all devices on SCSI // channels can later be exported, including disk devices
megaraid_mbox_enum_raid_scsi(adapter);
/* * Other parameters required by upper layer * * maximum number of sectors per IO command
*/
adapter->max_sectors = megaraid_max_sectors;
/* * number of queued commands per LUN.
*/
adapter->cmd_per_lun = megaraid_cmd_per_lun;
/* * Allocate resources required to issue FW calls, when sysfs is * accessed
*/ if (megaraid_sysfs_alloc_resources(adapter) != 0) goto out_free_irq;
// Set the DMA mask to 64-bit. All supported controllers as capable of // DMA in this range
pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
// flush all caches
megaraid_mbox_flush_cache(adapter);
tasklet_kill(&adapter->dpc_h);
megaraid_sysfs_free_resources(adapter);
megaraid_free_cmd_packets(adapter);
free_irq(adapter->irq, adapter);
iounmap(raid_dev->baseaddr);
pci_release_regions(adapter->pdev);
kfree(raid_dev);
return;
}
/** * megaraid_alloc_cmd_packets - allocate shared mailbox * @adapter : soft state of the raid controller * * Allocate and align the shared mailbox. This mailbox is used to issue * all the commands. For IO based controllers, the mailbox is also registered * with the FW. Allocate memory for all commands as well. * This is our big allocator.
*/ staticint
megaraid_alloc_cmd_packets(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct pci_dev *pdev; unsignedlong align;
scb_t *scb;
mbox_ccb_t *ccb; struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i;
pdev = adapter->pdev;
/* * Setup the mailbox * Allocate the common 16-byte aligned memory for the handshake * mailbox.
*/
raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
&raid_dev->una_mbox64_dma,
GFP_KERNEL);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__,
__LINE__)); return -1;
}
/* * Align the mailbox at 16-byte boundary
*/
raid_dev->mbox = &raid_dev->una_mbox64->mbox32;
// Allocate memory for commands issued internally
adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
&adapter->ibuf_dma_h, GFP_KERNEL); if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_common_mbox;
}
// Allocate memory for our SCSI Command Blocks and their associated // memory
/* * Allocate memory for the base list of scb. Later allocate memory for * CCBs and embedded components of each CCB and point the pointers in * scb to the allocated components * NOTE: The code to allocate SCB will be duplicated in all the LLD * since the calling routine does not yet know the number of available * commands.
*/
adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
if (adapter->kscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__,
__LINE__)); goto out_free_ibuf;
}
// memory allocation for our command packets if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__,
__LINE__)); goto out_free_scb_list;
}
// Adjust the scb pointers and link in the free pool
epthru_pci_blk = raid_dev->epthru_pool;
sg_pci_blk = raid_dev->sg_pool;
mbox_pci_blk = raid_dev->mbox_pool;
for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
scb = adapter->kscb_list + i;
ccb = raid_dev->ccb_list + i;
if (raid_dev->mbox_pool_handle == NULL) { goto fail_setup_dma_pool;
}
mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
mbox_pci_blk[i].vaddr = dma_pool_alloc(
raid_dev->mbox_pool_handle,
GFP_KERNEL,
&mbox_pci_blk[i].dma_addr); if (!mbox_pci_blk[i].vaddr) { goto fail_setup_dma_pool;
}
}
/* * Allocate memory for each embedded passthru strucuture pointer * Request for a 128 bytes aligned structure for each passthru command * structure * Since passthru and extended passthru commands are exclusive, they * share common memory pool. Passthru structures piggyback on memory * allocated to extended passthru since passthru is smaller of the two
*/
raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru",
&adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0);
if (raid_dev->epthru_pool_handle == NULL) { goto fail_setup_dma_pool;
}
epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
epthru_pci_blk[i].vaddr = dma_pool_alloc(
raid_dev->epthru_pool_handle,
GFP_KERNEL,
&epthru_pci_blk[i].dma_addr); if (!epthru_pci_blk[i].vaddr) { goto fail_setup_dma_pool;
}
}
// Allocate memory for each scatter-gather list. Request for 512 bytes // alignment for each sg list
raid_dev->sg_pool_handle = dma_pool_create("megaraid mbox sg",
&adapter->pdev->dev, sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
512, 0);
if (raid_dev->sg_pool_handle == NULL) { goto fail_setup_dma_pool;
}
sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
sg_pci_blk[i].vaddr = dma_pool_alloc(
raid_dev->sg_pool_handle,
GFP_KERNEL,
&sg_pci_blk[i].dma_addr); if (!sg_pci_blk[i].vaddr) { goto fail_setup_dma_pool;
}
}
/** * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets * @adapter : HBA soft state * * Teardown the dma pool for mailbox, passthru and extended passthru * structures, and scatter-gather lists.
*/ staticvoid
megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i;
sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
sg_pci_blk[i].dma_addr);
}
dma_pool_destroy(raid_dev->sg_pool_handle);
epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
dma_pool_free(raid_dev->epthru_pool_handle,
epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
}
dma_pool_destroy(raid_dev->epthru_pool_handle);
mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
dma_pool_free(raid_dev->mbox_pool_handle,
mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
}
dma_pool_destroy(raid_dev->mbox_pool_handle);
return;
}
/** * megaraid_alloc_scb - detach and return a scb from the free list * @adapter : controller's soft state * @scp : pointer to the scsi command to be executed * * Return the scb from the head of the free list. %NULL if there are none * available.
*/ static scb_t *
megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
{ struct list_head *head = &adapter->kscb_pool;
scb_t *scb = NULL; unsignedlong flags;
// detach scb from free pool
spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
if (list_empty(head)) {
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); return NULL;
}
/** * megaraid_dealloc_scb - return the scb to the free pool * @adapter : controller's soft state * @scb : scb to be freed * * Return the scb back to the free list of scbs. The caller must 'flush' the * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. * NOTE NOTE: Make sure the scb is not on any list before calling this * routine.
*/ staticinlinevoid
megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
{ unsignedlong flags;
// put scb in the free pool
scb->state = SCB_FREE;
scb->scp = NULL;
spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
/** * megaraid_queue_command_lck - generic queue entry point for all LLDs * @scp : pointer to the scsi command to be executed * * Queue entry point for mailbox based controllers.
*/ staticint megaraid_queue_command_lck(struct scsi_cmnd *scp)
{ void (*done)(struct scsi_cmnd *) = scsi_done;
adapter_t *adapter;
scb_t *scb; int if_busy;
adapter = SCP2ADAPTER(scp);
scp->result = 0;
/* * Allocate and build a SCB request * if_busy flag will be set if megaraid_mbox_build_cmd() command could * not allocate scb. We will return non-zero status in that case. * NOTE: scb can be null even though certain commands completed * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would * return 0 in that case, and we would do the callback right away.
*/
if_busy = 0;
scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy); if (!scb) { // command already completed
done(scp); return 0;
}
/** * megaraid_mbox_build_cmd - transform the mid-layer scsi commands * @adapter : controller's soft state * @scp : mid-layer scsi command pointer * @busy : set if request could not be completed because of lack of * resources * * Transform the mid-layer scsi command to megaraid firmware lingua. * Convert the command issued by mid-layer to format understood by megaraid * firmware. We also complete certain commands without sending them to firmware.
*/ static scb_t *
megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
{
mraid_device_t *rdev = ADAP2RAIDDEV(adapter); int channel; int target; int islogical;
mbox_ccb_t *ccb;
mraid_passthru_t *pthru;
mbox64_t *mbox64;
mbox_t *mbox;
scb_t *scb; char skip[] = "skipping"; char scan[] = "scanning"; char *ss;
/* * Get the appropriate device map for the device this command is * intended for
*/
MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
/* * Logical drive commands
*/ if (islogical) { switch (scp->cmnd[0]) { case TEST_UNIT_READY: /* * Do we support clustering and is the support enabled * If no, return success always
*/ if (!adapter->ha) {
scp->result = (DID_OK << 16); return NULL;
}
/* * The command id will be provided by the command * issuance routine
*/
ccb->raw_mbox[0] = CLUSTER_CMD;
ccb->raw_mbox[2] = RESERVATION_STATUS;
ccb->raw_mbox[3] = target;
return scb;
case MODE_SENSE:
{ struct scatterlist *sgl;
caddr_t vaddr;
sgl = scsi_sglist(scp); if (sg_page(sgl)) {
vaddr = (caddr_t) sg_virt(&sgl[0]);
case INQUIRY: /* * Display the channel scan for logical drives * Do not display scan for a channel if already done.
*/ if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
case READ_CAPACITY: /* * Do not allow LUN > 0 for logical drives and * requests for more than 40 logical drives
*/ if (SCP2LUN(scp)) {
scp->result = (DID_BAD_TARGET << 16); return NULL;
} if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
scp->result = (DID_BAD_TARGET << 16); return NULL;
}
/* Allocate a SCB and initialize passthru */ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
scp->result = (DID_ERROR << 16);
*busy = 1; return NULL;
}
/* * A little HACK: 2nd bit is zero for all scsi read * commands and is set for all scsi write commands
*/
mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64:
MBOXCMD_LREAD64 ;
case RESERVE_6: case RELEASE_6: /* * Do we support clustering and is the support enabled
*/ if (!adapter->ha) {
scp->result = (DID_BAD_TARGET << 16); return NULL;
}
/* * Allocate a SCB and initialize mailbox
*/ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
scp->result = (DID_ERROR << 16);
*busy = 1; return NULL;
}
// Do not allow access to target id > 15 or LUN > 7 if (target > 15 || SCP2LUN(scp) > 7) {
scp->result = (DID_BAD_TARGET << 16); return NULL;
}
// if fast load option was set and scan for last device is // over, reset the fast_load flag so that during a possible // next scan, devices can be made available if (rdev->fast_load && (target == 15) &&
(SCP2CHANNEL(scp) == adapter->max_channel -1)) {
/** * megaraid_mbox_runpendq - execute commands queued in the pending queue * @adapter : controller's soft state * @scb_q : SCB to be queued in the pending list * * Scan the pending list for commands which are not yet issued and try to * post to the controller. The SCB can be a null pointer, which would indicate * no SCB to be queue, just try to execute the ones in the pending list. * * NOTE: We do not actually traverse the pending list. The SCBs are plucked * out from the head of the pending list. If it is successfully issued, the * next SCB is at the head now.
*/ staticvoid
megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
{
scb_t *scb; unsignedlong flags;
if (scb_q) {
scb_q->state = SCB_PENDQ;
list_add_tail(&scb_q->list, &adapter->pend_list);
}
// if the adapter in not in quiescent mode, post the commands to FW if (adapter->quiescent) {
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); return;
}
/** * megaraid_mbox_prepare_epthru - prepare a command for physical devices * @adapter : pointer to controller's soft state * @scb : scsi control block * @scp : scsi command from the mid-layer * * Prepare a command for the scsi physical devices. This routine prepares * commands for devices which can take extended CDBs (>10 bytes).
*/ staticvoid
megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *scp)
{
mbox_ccb_t *ccb;
mraid_epassthru_t *epthru;
uint8_t channel;
uint8_t target;
/** * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs * @adapter : controller's soft state * * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the * completed command and put them on the completed list for later processing. * * Returns: 1 if the interrupt is valid, 0 otherwise
*/ staticint
megaraid_ack_sequence(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox_t *mbox;
scb_t *scb;
uint8_t nstatus;
uint8_t completed[MBOX_MAX_FIRMWARE_STATUS]; struct list_head clist; int handled;
uint32_t dword; unsignedlong flags; int i, j;
mbox = raid_dev->mbox;
// move the SCBs from the firmware completed array to our local list
INIT_LIST_HEAD(&clist);
// loop till F/W has more commands for us to complete
handled = 0;
spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); do { /* * Check if a valid interrupt is pending. If found, force the * interrupt line low.
*/
dword = RDOUTDOOR(raid_dev); if (dword != 0x10001234) break;
handled = 1;
WROUTDOOR(raid_dev, 0x10001234);
nstatus = 0; // wait for valid numstatus to post for (i = 0; i < 0xFFFFF; i++) { if (mbox->numstatus != 0xFF) {
nstatus = mbox->numstatus; break;
}
rmb();
}
mbox->numstatus = 0xFF;
adapter->outstanding_cmds -= nstatus;
for (i = 0; i < nstatus; i++) {
// wait for valid command index to post for (j = 0; j < 0xFFFFF; j++) { if (mbox->completed[i] != 0xFF) break;
rmb();
}
completed[i] = mbox->completed[i];
mbox->completed[i] = 0xFF;
// Get SCB associated with this command id if (completed[i] >= MBOX_MAX_SCSI_CMDS) { // a cmm command
scb = adapter->uscb_list + (completed[i] -
MBOX_MAX_SCSI_CMDS);
} else { // an os command
scb = adapter->kscb_list + completed[i];
}
// put the completed commands in the completed list. DPC would // complete these commands later
spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
// schedule the DPC if there is some work for it if (handled)
tasklet_schedule(&adapter->dpc_h);
return handled;
}
/** * megaraid_isr - isr for memory based mailbox based controllers * @irq : irq * @devp : pointer to our soft state * * Interrupt service routine for memory-mapped mailbox controllers.
*/ static irqreturn_t
megaraid_isr(int irq, void *devp)
{
adapter_t *adapter = devp; int handled;
handled = megaraid_ack_sequence(adapter);
/* Loop through any pending requests */ if (!adapter->quiescent) {
megaraid_mbox_runpendq(adapter, NULL);
}
return IRQ_RETVAL(handled);
}
/** * megaraid_mbox_dpc - the tasklet to complete the commands from completed list * @devp : pointer to HBA soft state * * Pick up the commands from the completed list and send back to the owners. * This is a reentrant function and does not assume any locks are held while * it is being called.
*/ staticvoid
megaraid_mbox_dpc(unsignedlong devp)
{
adapter_t *adapter = (adapter_t *)devp;
mraid_device_t *raid_dev; struct list_head clist; struct scatterlist *sgl;
scb_t *scb;
scb_t *tmp; struct scsi_cmnd *scp;
mraid_passthru_t *pthru;
mraid_epassthru_t *epthru;
mbox_ccb_t *ccb; int islogical; int pdev_index; int pdev_state;
mbox_t *mbox; unsignedlong flags;
uint8_t c; int status;
uioc_t *kioc;
if (!adapter) return;
raid_dev = ADAP2RAIDDEV(adapter);
// move the SCBs from the completed list to our local list
INIT_LIST_HEAD(&clist);
// Make sure f/w has completed a valid command if (scb->state != SCB_ISSUED) {
con_log(CL_ANN, (KERN_CRIT "megaraid critical err: invalid command %d:%d:%p\n",
scb->sno, scb->state, scp));
BUG(); continue; // Must never happen!
}
// check for the management command and complete it right away if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
scb->state = SCB_FREE;
scb->status = status;
// remove from local clist
list_del_init(&scb->list);
kioc = (uioc_t *)scb->gp;
kioc->status = 0;
megaraid_mbox_mm_done(adapter, scb);
continue;
}
// Was an abort issued for this command earlier if (scb->state & SCB_ABORT) {
con_log(CL_ANN, (KERN_NOTICE "megaraid: aborted cmd [%x] completed\n",
scb->sno));
}
/* * If the inquiry came of a disk drive which is not part of * any RAID array, expose it to the kernel. For this to be * enabled, user must set the "megaraid_expose_unconf_disks" * flag to 1 by specifying it on module parameter list. * This would enable data migration off drives from other * configurations.
*/
islogical = MRAID_IS_LOGICAL(adapter, scp); if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
sgl = scsi_sglist(scp); if (sg_page(sgl)) {
c = *(unsignedchar *) sg_virt(&sgl[0]);
} else {
con_log(CL_ANN, (KERN_WARNING "megaraid mailbox: invalid sg:%d\n",
__LINE__));
c = 0;
}
// print a debug message for all failed commands if (status) {
megaraid_mbox_display_scb(adapter, scb);
}
scsi_dma_unmap(scp);
// remove from local clist
list_del_init(&scb->list);
// put back in free list
megaraid_dealloc_scb(adapter, scb);
// send the scsi packet back to kernel
scsi_done(scp);
}
return;
}
/** * megaraid_abort_handler - abort the scsi command * @scp : command to be aborted * * Abort a previous SCSI request. Only commands on the pending list can be * aborted. All the commands issued to the F/W must complete.
**/ staticint
megaraid_abort_handler(struct scsi_cmnd *scp)
{
adapter_t *adapter;
mraid_device_t *raid_dev;
scb_t *scb;
scb_t *tmp; int found; unsignedlong flags; int i;
// If FW has stopped responding, simply return failure if (raid_dev->hw_error) {
con_log(CL_ANN, (KERN_NOTICE "megaraid: hw error, not aborting\n")); return FAILED;
}
// There might a race here, where the command was completed by the // firmware and now it is on the completed list. Before we could // complete the command to the kernel in dpc, the abort came. // Find out if this is the case to avoid the race.
scb = NULL;
spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
// Find out if this command is still on the pending list. If it is and // was never issued, abort and return success. If the command is owned // by the firmware, we must wait for it to complete by the FW.
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
// Check do we even own this command, in which case this would be // owned by the firmware. The only way to locate the FW scb is to // traverse through the list of all SCB, since driver does not // maintain these SCBs on any list
found = 0;
spin_lock_irq(&adapter->lock); for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
scb = adapter->kscb_list + i;
if (!found) {
con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
// FIXME: Should there be a callback for this command? return SUCCESS;
}
// We cannot actually abort a command owned by firmware, return // failure and wait for reset. In host reset handler, we will find out // if the HBA is still live return FAILED;
}
/** * megaraid_reset_handler - device reset handler for mailbox based driver * @scp : reference command * * Reset handler for the mailbox based controller. First try to find out if * the FW is still live, in which case the outstanding commands counter mut go * down to 0. If that happens, also issue the reservation reset command to * relinquish (possible) reservations on the logical drives connected to this * host.
**/ staticint
megaraid_reset_handler(struct scsi_cmnd *scp)
{
adapter_t *adapter;
scb_t *scb;
scb_t *tmp;
mraid_device_t *raid_dev; unsignedlong flags;
uint8_t raw_mbox[sizeof(mbox_t)]; int rval; int recovery_window; int i;
uioc_t *kioc;
// return failure if adapter is not responding if (raid_dev->hw_error) {
con_log(CL_ANN, (KERN_NOTICE "megaraid: hw error, cannot reset\n")); return FAILED;
}
// Under exceptional conditions, FW can take up to 3 minutes to // complete command processing. Wait for additional 2 minutes for the // pending commands counter to go down to 0. If it doesn't, let the // controller be marked offline // Also, reset all the commands currently owned by the driver
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
list_del_init(&scb->list); // from pending list
if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
con_log(CL_ANN, (KERN_WARNING "megaraid: IOCTL packet with %d[%d:%d] being reset\n",
scb->sno, scb->dev_channel, scb->dev_target));
scb->status = -1;
kioc = (uioc_t *)scb->gp;
kioc->status = -EFAULT;
megaraid_mbox_mm_done(adapter, scb);
} else { if (scb->scp == scp) { // Found command
con_log(CL_ANN, (KERN_WARNING "megaraid: %d[%d:%d], reset from pending list\n",
scb->sno, scb->dev_channel, scb->dev_target));
} else {
con_log(CL_ANN, (KERN_WARNING "megaraid: IO packet with %d[%d:%d] being reset\n",
scb->sno, scb->dev_channel, scb->dev_target));
}
// print a message once every 5 seconds only if (!(i % 5)) {
con_log(CL_ANN, ( "megaraid mbox: Wait for %d commands to complete:%d\n",
adapter->outstanding_cmds,
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.