/* * linux/drivers/scsi/esas2r/esas2r_init.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA.
*/
/* * Careful! phys_addr and virt_addr may have been adjusted from the * original allocation in order to return the desired alignment. That * means we have to use the original address (in esas2r_data) and size * (esas2r_param) and calculate the original physical address based on * the difference between the requested and actual allocation size.
*/ if (mem_desc->phys_addr) { int unalign = ((u8 *)mem_desc->virt_addr) -
((u8 *)mem_desc->esas2r_data);
a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
0),
pci_resource_len(a->pcid, 0)); if (a->data_window == NULL) {
esas2r_log(ESAS2R_LOG_CRIT, "ioremap failed for data_window mem region\n");
esas2r_unmap_regions(a); return -EFAULT;
}
return 0;
}
staticvoid esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
{ int i;
/* Set up interrupt mode based on the requested value */ switch (intr_mode) { case INTR_MODE_LEGACY:
use_legacy_interrupts:
a->intr_mode = INTR_MODE_LEGACY; break;
case INTR_MODE_MSI:
i = pci_enable_msi(a->pcid); if (i != 0) {
esas2r_log(ESAS2R_LOG_WARN, "failed to enable MSI for adapter %d, " "falling back to legacy interrupts " "(err=%d)", a->index,
i); goto use_legacy_interrupts;
}
a->intr_mode = INTR_MODE_MSI;
set_bit(AF2_MSI_ENABLED, &a->flags2); break;
default:
esas2r_log(ESAS2R_LOG_WARN, "unknown interrupt_mode %d requested, " "falling back to legacy interrupt",
interrupt_mode); goto use_legacy_interrupts;
}
}
for (last_request = first_request, i = 1; i < num_requests;
last_request++, i++) {
INIT_LIST_HEAD(&last_request->req_list);
list_add_tail(&last_request->comp_list, &a->avail_request); if (!alloc_vda_req(a, last_request)) {
esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate a VDA request!");
esas2r_kill_adapter(index); return 0;
}
}
esas2r_debug("requests: %p to %p (%d, %d)", first_request,
last_request, sizeof(*first_request),
num_requests);
if (esas2r_map_regions(a) != 0) {
esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
esas2r_kill_adapter(index); return 0;
}
a->index = index;
/* interrupts will be disabled until we are done with init */
atomic_inc(&a->dis_ints_cnt);
atomic_inc(&a->disable_cnt);
set_bit(AF_CHPRST_PENDING, &a->flags);
set_bit(AF_DISC_PENDING, &a->flags);
set_bit(AF_FIRST_INIT, &a->flags);
set_bit(AF_LEGACY_SGE_MODE, &a->flags);
/* * Disable chip interrupts to prevent spurious interrupts * until we claim the IRQ.
*/
esas2r_disable_chip_interrupts(a);
esas2r_check_adapter(a);
if (!esas2r_init_adapter_hw(a, true)) {
esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
} else {
esas2r_debug("esas2r_init_adapter ok");
}
esas2r_claim_interrupts(a);
if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
esas2r_enable_chip_interrupts(a);
set_bit(AF2_INIT_DONE, &a->flags2); if (!test_bit(AF_DEGRADED_MODE, &a->flags))
esas2r_kickoff_timer(a);
esas2r_debug("esas2r_init_adapter done for %p (%d)",
a, a->disable_cnt);
if ((test_bit(AF2_INIT_DONE, &a->flags2))
&& (!test_bit(AF_DEGRADED_MODE, &a->flags))) { if (!power_management) {
timer_delete_sync(&a->timer);
tasklet_kill(&a->tasklet);
}
esas2r_power_down(a);
/* * There are versions of firmware that do not handle the sync * cache command correctly. Stall here to ensure that the * cache is lazily flushed.
*/
mdelay(500);
esas2r_debug("chip halted");
}
if (esas2r_map_regions(a) != 0) {
esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
rez = -ENOMEM; goto error_exit;
}
/* Set up interupt mode */
esas2r_setup_interrupts(a, a->intr_mode);
/* * Disable chip interrupts to prevent spurious interrupts until we * claim the IRQ.
*/
esas2r_disable_chip_interrupts(a); if (!esas2r_power_up(a, true)) {
esas2r_debug("yikes, esas2r_power_up failed");
rez = -ENOMEM; goto error_exit;
}
esas2r_claim_interrupts(a);
if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { /* * Now that system interrupt(s) are claimed, we can enable * chip interrupts.
*/
esas2r_enable_chip_interrupts(a);
esas2r_kickoff_timer(a);
} else {
esas2r_debug("yikes, unable to claim IRQ");
esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
rez = -ENOMEM; goto error_exit;
}
if (a->req_table == NULL) {
esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate memory for the request table"); returnfalse;
}
/* initialize PCI configuration space */
esas2r_init_pci_cfg_space(a);
/* * the thunder_stream boards all have a serial flash part that has a * different base address on the AHB bus.
*/ if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
&& (a->pcid->subsystem_device & ATTO_SSDID_TBT))
a->flags2 |= AF2_THUNDERBOLT;
if (test_bit(AF2_THUNDERBOLT, &a->flags2))
a->flags2 |= AF2_SERIAL_FLASH;
if (a->pcid->subsystem_device == ATTO_TLSH_1068)
a->flags2 |= AF2_THUNDERLINK;
/* Uncached Area */
high = (u8 *)*uncached_area;
/* initialize the scatter/gather table pages */
for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
sgl->size = sgl_page_size;
if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { /* Allow the driver to load if the minimum count met. */ if (i < NUM_SGL_MIN) returnfalse; break;
}
}
/* compute the size of the lists */
a->list_size = num_requests + ESAS2R_LIST_EXTRA;
/* allocate the inbound list */
a->inbound_list_md.size = a->list_size * sizeof(struct
esas2r_inbound_list_source_entry);
if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
esas2r_hdebug("failed to allocate IB list"); returnfalse;
}
/* allocate the outbound list */
a->outbound_list_md.size = a->list_size * sizeof(struct atto_vda_ob_rsp);
if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
ESAS2R_LIST_ALIGN)) {
esas2r_hdebug("failed to allocate IB list"); returnfalse;
}
/* allocate the NVRAM structure */
a->nvram = (struct esas2r_sas_nvram *)high;
high += sizeof(struct esas2r_sas_nvram);
/* allocate the discovery buffer */
a->disc_buffer = high;
high += ESAS2R_DISC_BUF_LEN;
high = PTR_ALIGN(high, 8);
/* allocate the outbound list copy pointer */
a->outbound_copy = (u32 volatile *)high;
high += sizeof(u32);
if (!test_bit(AF_NVR_VALID, &a->flags))
esas2r_nvram_set_defaults(a);
/* update the caller's uncached memory area pointer */
*uncached_area = (void *)high;
/* initialize the allocated memory */ if (test_bit(AF_FIRST_INIT, &a->flags)) {
esas2r_targ_db_initialize(a);
/* prime parts of the inbound list */
element =
(struct esas2r_inbound_list_source_entry *)a->
inbound_list_md.
virt_addr;
for (i = 0; i < a->list_size; i++) {
element->address = 0;
element->reserved = 0;
element->length = cpu_to_le32(HWILSE_INTERFACE_F0
| (sizeof(union
atto_vda_req)
/ sizeof(u32)));
element++;
}
/* init the AE requests */ for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
i++) {
INIT_LIST_HEAD(&rq->req_list); if (!alloc_vda_req(a, rq)) {
esas2r_hdebug( "failed to allocate a VDA request!"); returnfalse;
}
esas2r_rq_init_request(rq, a);
/* override the completion function */
rq->comp_cb = esas2r_ae_complete;
}
}
returntrue;
}
/* This code will verify that the chip is operational. */ bool esas2r_check_adapter(struct esas2r_adapter *a)
{
u32 starttime;
u32 doorbell;
u64 ppaddr;
u32 dw;
/* * if the chip reset detected flag is set, we can bypass a bunch of * stuff.
*/ if (test_bit(AF_CHPRST_DETECTED, &a->flags)) goto skip_chip_reset;
/* * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver * may have left them enabled or we may be recovering from a fault.
*/
esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
/* * wait for the firmware to become ready by forcing an interrupt and * waiting for a response.
*/
starttime = jiffies_to_msecs(jiffies);
while (true) {
esas2r_force_interrupt(a);
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell == 0xFFFFFFFF) { /* * Give the firmware up to two seconds to enable * register access after a reset.
*/ if ((jiffies_to_msecs(jiffies) - starttime) > 2000) return esas2r_set_degraded_mode(a, "unable to access registers");
} elseif (doorbell & DRBL_FORCE_INT) {
u32 ver = (doorbell & DRBL_FW_VER_MSK);
/* * This driver supports version 0 and version 1 of * the API
*/
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell);
if (ver == DRBL_FW_VER_0) {
set_bit(AF_LEGACY_SGE_MODE, &a->flags);
return esas2r_set_degraded_mode(a, "firmware start has timed out");
}
}
/* purge any asynchronous events since we will repost them later */
esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
starttime = jiffies_to_msecs(jiffies);
while (true) {
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_DOWN) {
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell); break;
}
if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
esas2r_hdebug("timeout waiting for interface down"); break;
}
}
skip_chip_reset: /* * first things first, before we go changing any of these registers * disable the communication lists.
*/
dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
dw &= ~MU_ILC_ENABLE;
esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
dw &= ~MU_OLC_ENABLE;
esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
/* * notify the firmware that we're done setting up the communication * list registers. wait here until the firmware is done configuring * its lists. it will signal that it is done by enabling the lists.
*/
esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
starttime = jiffies_to_msecs(jiffies);
while (true) {
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_INIT) {
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell); break;
}
if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
esas2r_hdebug( "timeout waiting for communication list init");
esas2r_bugon(); return esas2r_set_degraded_mode(a, "timeout waiting for communication list init");
}
}
/* * flag whether the firmware supports the power down doorbell. we * determine this by reading the inbound doorbell enable mask.
*/
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); if (doorbell & DRBL_POWER_DOWN)
set_bit(AF2_VDA_POWER_DOWN, &a->flags2); else
clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
/* * enable assertion of outbound queue and doorbell interrupts in the * main interrupt cause register.
*/
esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); returntrue;
}
/* Process the initialization message just completed and format the next one. */ staticbool esas2r_format_init_msg(struct esas2r_adapter *a, struct esas2r_request *rq)
{
u32 msg = a->init_msg; struct atto_vda_cfg_init *ci;
/* * wait for the device wait time to expire here if requested. this is * usually requested during initial driver load and possibly when * resuming from a low power state. deferred device waiting will use * interrupts. chip reset recovery always defers device waiting to * avoid being in a TASKLET too long.
*/ if (init_poll) {
u32 currtime = a->disc_start_time;
u32 nexttick = 100;
u32 deltatime;
/* * Block Tasklets from getting scheduled and indicate this is * polled discovery.
*/
set_bit(AF_TASKLET_SCHEDULED, &a->flags);
set_bit(AF_DISC_POLLED, &a->flags);
/* * Temporarily bring the disable count to zero to enable * deferred processing. Note that the count is already zero * after the first initialization.
*/ if (test_bit(AF_FIRST_INIT, &a->flags))
atomic_dec(&a->disable_cnt);
while (test_bit(AF_DISC_PENDING, &a->flags)) {
schedule_timeout_interruptible(msecs_to_jiffies(100));
/* * Determine the need for a timer tick based on the * delta time between this and the last iteration of * this loop. We don't use the absolute time because * then we would have to worry about when nexttick * wraps and currtime hasn't yet.
*/
deltatime = jiffies_to_msecs(jiffies) - currtime;
currtime += deltatime;
/* * Process any waiting discovery as long as the chip is * up. If a chip reset happens during initial polling, * we have to make sure the timer tick processes the * doorbell indicating the firmware is ready.
*/ if (!test_bit(AF_CHPRST_PENDING, &a->flags))
esas2r_disc_check_for_work(a);
/* Simulate a timer tick. */ if (nexttick <= deltatime) {
/* Time for a timer tick */
nexttick += 100;
esas2r_timer_tick(a);
}
if (nexttick > deltatime)
nexttick -= deltatime;
/* Do any deferred processing */ if (esas2r_is_tasklet_pending(a))
esas2r_do_tasklet_tasks(a);
}
if (test_bit(AF_FIRST_INIT, &a->flags))
atomic_inc(&a->disable_cnt);
/* * For cases where (a) the initialization messages processing may * handle an interrupt for a port event and a discovery is waiting, but * we are not waiting for devices, or (b) the device wait time has been * exhausted but there is still discovery pending, start any leftover * discovery in interrupt driven mode.
*/
esas2r_disc_start_waiting(a);
exit: /* * Regardless of whether initialization was successful, certain things * need to get done before we exit.
*/
if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
test_bit(AF_FIRST_INIT, &a->flags)) { /* * Reinitialization was performed during the first * initialization. Only clear the chip reset flag so the * original device polling is not cancelled.
*/ if (!rslt)
clear_bit(AF_CHPRST_PENDING, &a->flags);
} else { /* First initialization or a subsequent re-init is complete. */ if (!rslt) {
clear_bit(AF_CHPRST_PENDING, &a->flags);
clear_bit(AF_DISC_PENDING, &a->flags);
}
/* Enable deferred processing after the first initialization. */ if (test_bit(AF_FIRST_INIT, &a->flags)) {
clear_bit(AF_FIRST_INIT, &a->flags);
if (atomic_dec_return(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a);
}
}
void esas2r_reset_chip(struct esas2r_adapter *a)
{ if (!esas2r_is_adapter_present(a)) return;
/* * Before we reset the chip, save off the VDA core dump. The VDA core * dump is located in the upper 512KB of the onchip SRAM. Make sure * to not overwrite a previous crash that was saved.
*/ if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
esas2r_read_mem_block(a,
a->fw_coredump_buff,
MW_DATA_ADDR_SRAM + 0x80000,
ESAS2R_FWCOREDUMP_SZ);
set_bit(AF2_COREDUMP_SAVED, &a->flags2);
}
clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
/* Reset the chip */ if (a->pcid->revision == MVR_FREY_B2)
esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
MU_CTL_IN_FULL_RST2); else
esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
MU_CTL_IN_FULL_RST);
/* Stall a little while to let the reset condition clear */
mdelay(10);
}
if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
esas2r_hdebug("Timeout waiting for power down"); break;
}
}
}
/* * Perform power management processing including managing device states, adapter * states, interrupts, and I/O.
*/ void esas2r_power_down(struct esas2r_adapter *a)
{
set_bit(AF_POWER_MGT, &a->flags);
set_bit(AF_POWER_DOWN, &a->flags);
if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
u32 starttime;
u32 doorbell;
/* * We are currently running OK and will be reinitializing later. * increment the disable count to coordinate with * esas2r_init_adapter. We don't have to do this in degraded * mode since we never enabled interrupts in the first place.
*/
esas2r_disable_chip_interrupts(a);
esas2r_disable_heartbeat(a);
/* wait for any VDA activity to clear before continuing */
esas2r_write_register_dword(a, MU_DOORBELL_IN,
DRBL_MSG_IFC_DOWN);
starttime = jiffies_to_msecs(jiffies);
while (true) {
doorbell =
esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_DOWN) {
esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell); break;
}
if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
esas2r_hdebug( "timeout waiting for interface down"); break;
}
}
/* * For versions of firmware that support it tell them the driver * is powering down.
*/ if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
esas2r_power_down_notify_firmware(a);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.