/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ #include <linux/circ_buf.h> #include <linux/device.h> #include <scsi/sas.h> #include"host.h" #include"isci.h" #include"port.h" #include"probe_roms.h" #include"remote_device.h" #include"request.h" #include"scu_completion_codes.h" #include"scu_event_codes.h" #include"registers.h" #include"scu_remote_node_context.h" #include"scu_task_context.h"
/* * The number of milliseconds to wait while a given phy is consuming power * before allowing another set of phys to consume power. Ultimately, this will * be specified by OEM parameter.
*/ #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
/* * NORMALIZE_PUT_POINTER() - * * This macro will normalize the completion queue put pointer so its value can * be used as an array inde
*/ #define NORMALIZE_PUT_POINTER(x) \
((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
/* * NORMALIZE_EVENT_POINTER() - * * This macro will normalize the completion queue event entry so its value can * be used as an index.
*/ #define NORMALIZE_EVENT_POINTER(x) \
(\
((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
)
/* * NORMALIZE_GET_POINTER() - * * This macro will normalize the completion queue get pointer so its value can * be used as an index into an array
*/ #define NORMALIZE_GET_POINTER(x) \
((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
/* * NORMALIZE_GET_POINTER_CYCLE_BIT() - * * This macro will normalize the completion queue cycle pointer so it matches * the completion queue cycle bit
*/ #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
/* * COMPLETION_QUEUE_CYCLE_BIT() - * * This macro will return the cycle bit of the completion queue entry
*/ #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
/* Init the state machine and call the state entry function (if any) */ void sci_init_sm(struct sci_base_state_machine *sm, conststruct sci_base_state *state_table, u32 initial_state)
{
sci_state_transition_t handler;
handler = sm->state_table[initial_state].enter_state; if (handler)
handler(sm);
}
/* Call the state exit fn, update the current state, call the state entry fn */ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
{
sci_state_transition_t handler;
handler = sm->state_table[sm->current_state_id].exit_state; if (handler)
handler(sm);
if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) returntrue;
returnfalse;
}
staticbool sci_controller_isr(struct isci_host *ihost)
{ if (sci_controller_completion_queue_has_entries(ihost)) returntrue;
/* we have a spurious interrupt it could be that we have already * emptied the completion queue from a previous interrupt * FIXME: really!?
*/
writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
/* There is a race in the hardware that could cause us not to be * notified of an interrupt completion if we do not take this * step. We will mask then unmask the interrupts so if there is * another interrupt pending the clearing of the interrupt * source we get the next interrupt message.
*/
spin_lock(&ihost->scic_lock); if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
writel(0, &ihost->smu_registers->interrupt_mask);
}
spin_unlock(&ihost->scic_lock);
if (interrupt_status != 0) { /* * There is an error interrupt pending so let it through and handle
* in the callback */ returntrue;
}
/* * There is a race in the hardware that could cause us not to be notified * of an interrupt completion if we do not take this step. We will mask * then unmask the error interrupts so if there was another interrupt * pending we will be notified.
* Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
writel(0xff, &ihost->smu_registers->interrupt_mask);
writel(0, &ihost->smu_registers->interrupt_mask);
/* Make sure that we really want to process this IO request */ if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) /* Yep this is a valid io request pass it along to the * io request handler
*/
sci_io_request_tc_completion(ireq, ent);
}
switch (scu_get_command_request_type(ent)) { case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
ireq = ihost->reqs[index];
dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
__func__, ent, ireq); /* @todo For a post TC operation we need to fail the IO * request
*/ break; case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
idev = ihost->device_table[index];
dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
__func__, ent, idev); /* @todo For a port RNC operation we need to fail the * device
*/ break; default:
dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
__func__, ent); break;
}
}
if (SCU_GET_FRAME_ERROR(ent)) { /* * / @todo If the IAF frame or SIGNATURE FIS frame has an error will * / this cause a problem? We expect the phy initialization will
* / fail if there is an error in the frame. */
sci_controller_release_frame(ihost, frame_index); return;
}
if (frame_header->is_address_frame) {
index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
iphy = &ihost->phys[index];
result = sci_phy_frame_handler(iphy, frame_index);
} else {
index = SCU_GET_COMPLETION_INDEX(ent);
if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { /* * This is a signature fis or a frame from a direct attached SATA * device that has not yet been created. In either case forwared
* the frame to the PE and let it take care of the frame data. */
index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
iphy = &ihost->phys[index];
result = sci_phy_frame_handler(iphy, frame_index);
} else { if (index < ihost->remote_node_entries)
idev = ihost->device_table[index]; else
idev = NULL;
if (idev != NULL)
result = sci_remote_device_frame_handler(idev, frame_index); else
sci_controller_release_frame(ihost, frame_index);
}
}
if (result != SCI_SUCCESS) { /* * / @todo Is there any reason to report some additional error message
* / when we get this failure notifiction? */
}
}
switch (scu_get_event_type(ent)) { case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: /* / @todo The driver did something wrong and we need to fix the condtion. */
dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received SMU command error " "0x%x\n",
__func__,
ihost,
ent); break;
case SCU_EVENT_TYPE_SMU_PCQ_ERROR: case SCU_EVENT_TYPE_SMU_ERROR: case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: /* * / @todo This is a hardware failure and its likely that we want to
* / reset the controller. */
dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received fatal controller " "event 0x%x\n",
__func__,
ihost,
ent); break;
case SCU_EVENT_TYPE_TRANSPORT_ERROR:
ireq = ihost->reqs[index];
sci_io_request_event_handler(ireq, ent); break;
case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: switch (scu_get_event_specifier(ent)) { case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
ireq = ihost->reqs[index]; if (ireq != NULL)
sci_io_request_event_handler(ireq, ent); else
dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for io request object " "that doesn't exist.\n",
__func__,
ihost,
ent);
break;
case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
idev = ihost->device_table[index]; if (idev != NULL)
sci_remote_device_event_handler(idev, ent); else
dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for remote device object " "that doesn't exist.\n",
__func__,
ihost,
ent);
break;
} break;
case SCU_EVENT_TYPE_BROADCAST_CHANGE: /* * direct the broadcast change event to the phy first and then let
* the phy redirect the broadcast change to the port object */ case SCU_EVENT_TYPE_ERR_CNT_EVENT: /* * direct error counter event to the phy object since that is where
* we get the event notification. This is a type 4 event. */ case SCU_EVENT_TYPE_OSSP_EVENT:
index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
iphy = &ihost->phys[index];
sci_phy_event_handler(iphy, ent); break;
case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: case SCU_EVENT_TYPE_RNC_OPS_MISC: if (index < ihost->remote_node_entries) {
idev = ihost->device_table[index];
if (idev != NULL)
sci_remote_device_event_handler(idev, ent);
} else
dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received event 0x%x " "for remote device object 0x%0x that doesn't " "exist.\n",
__func__,
ihost,
ent,
index);
/* Get the component parts of the completion queue */
get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
while (
NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
== COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
) {
completion_count++;
ent = ihost->completion_queue[get_index];
/* increment the get pointer and check for rollover to toggle the cycle bit */
get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
(SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
sci_controller_event_completion(ihost, ent); break;
} default:
dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown " "completion type %x\n",
__func__,
ent); break;
}
}
/* Update the get register if we completed one or more entries */ if (completion_count > 0) {
ihost->completion_queue_get =
SMU_CQGR_GEN_BIT(ENABLE) |
SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
event_cycle |
SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
get_cycle |
SMU_CQGR_GEN_VAL(POINTER, get_index);
/* If we dont process any completions I am not sure that we want to do this. * We are in the middle of a hardware fault and should probably be reset.
*/
writel(0, &ihost->smu_registers->interrupt_mask);
}
if (sci_controller_error_isr(ihost))
sci_controller_error_handler(ihost);
return IRQ_HANDLED;
}
/** * isci_host_start_complete() - This function is called by the core library, * through the ISCI Module, to indicate controller start status. * @ihost: This parameter specifies the ISCI host object * @completion_status: This parameter specifies the completion status from the * core library. *
*/ staticvoid isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
{ if (completion_status != SCI_SUCCESS)
dev_info(&ihost->pdev->dev, "controller start timed out, continuing...\n");
clear_bit(IHOST_START_PENDING, &ihost->flags);
wake_up(&ihost->eventq);
}
if (test_bit(IHOST_START_PENDING, &ihost->flags)) return 0;
sas_drain_work(ha);
return 1;
}
/** * sci_controller_get_suggested_start_timeout() - This method returns the * suggested sci_controller_start() timeout amount. The user is free to * use any timeout value, but this method provides the suggested minimum * start timeout value. The returned value is based upon empirical * information determined as a result of interoperability testing. * @ihost: the handle to the controller object for which to return the * suggested start timeout. * * This method returns the number of milliseconds for the suggested start * operation timeout.
*/ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
{ /* Validate the user supplied parameters. */ if (!ihost) return 0;
/* * The suggested minimum timeout value for a controller start operation: * * Signature FIS Timeout * + Phy Start Timeout * + Number of Phy Spin Up Intervals * --------------------------------- * Number of milliseconds for the controller start operation. * * NOTE: The number of phy spin up intervals will be equivalent * to the number of phys divided by the number phys allowed * per interval - 1 (once OEM parameters are supported).
* Currently we assume only 1 phy per interval. */
/* Set the completion queue get pointer and enable the queue */
completion_queue_get_value = (
(SMU_CQGR_GEN_VAL(POINTER, 0))
| (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
| (SMU_CQGR_GEN_BIT(ENABLE))
| (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
);
/* Initialize the cycle bit of the completion queue entries */ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { /* * If get.cycle_bit != completion_queue.cycle_bit * its not a valid completion queue entry
* so at system start all entries are invalid */
ihost->completion_queue[index] = 0x80000000;
}
}
/* Setup the get pointer for the unsolicited frame queue */
frame_queue_get_value = (
SCU_UFQGP_GEN_VAL(POINTER, 0)
| SCU_UFQGP_GEN_BIT(ENABLE_BIT)
);
writel(frame_queue_get_value,
&ihost->scu_registers->sdma.unsolicited_frame_get_pointer); /* Setup the put pointer for the unsolicited frame queue */
frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
writel(frame_queue_put_value,
&ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
}
void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
{ if (ihost->sm.current_state_id == SCIC_STARTING) { /* * We move into the ready state, because some of the phys/ports * may be up and operational.
*/
sci_change_state(&ihost->sm, SCIC_READY);
state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_STARTING: case SCI_PHY_SUB_INITIAL: case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: case SCI_PHY_SUB_AWAIT_IAF_UF: case SCI_PHY_SUB_AWAIT_SAS_POWER: case SCI_PHY_SUB_AWAIT_SATA_POWER: case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: case SCI_PHY_SUB_AWAIT_OSSP_EN: case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: case SCI_PHY_SUB_FINAL: returntrue; default: returnfalse;
}
}
bool is_controller_start_complete(struct isci_host *ihost)
{ int i;
for (i = 0; i < SCI_MAX_PHYS; i++) { struct isci_phy *iphy = &ihost->phys[i];
u32 state = iphy->sm.current_state_id;
/* in apc mode we need to check every phy, in * mpc mode we only need to check phys that have * been configured into a port
*/ if (is_port_config_apc(ihost)) /* pass */; elseif (!phy_get_non_dummy_port(iphy)) continue;
/* The controller start operation is complete iff: * - all links have been given an opportunity to start * - have no indication of a connected device * - have an indication of a connected device and it has * finished the link training process.
*/ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
(iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
(iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
(ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) returnfalse;
}
returntrue;
}
/** * sci_controller_start_next_phy - start phy * @ihost: controller * * If all the phys have been started, then attempt to transition the * controller to the READY state and inform the user * (sci_cb_controller_start_complete()).
*/ staticenum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
{ struct sci_oem_params *oem = &ihost->oem_parameters; struct isci_phy *iphy; enum sci_status status;
status = SCI_SUCCESS;
if (ihost->phy_startup_timer_pending) return status;
if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { if (phy_get_non_dummy_port(iphy) == NULL) {
ihost->next_phy_to_start++;
/* Caution recursion ahead be forwarned * * The PHY was never added to a PORT in MPC mode * so start the next phy in sequence This phy * will never go link up and will not draw power * the OEM parameters either configured the phy * incorrectly for the PORT or it was never * assigned to a PORT
*/ return sci_controller_start_next_phy(ihost);
}
}
status = sci_phy_start(iphy);
if (status == SCI_SUCCESS) {
sci_mod_timer(&ihost->phy_timer,
SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
ihost->phy_startup_timer_pending = true;
} else {
dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed " "to stop phy %d because of status " "%d.\n",
__func__,
ihost->phys[ihost->next_phy_to_start].phy_index,
status);
}
/* Build the TCi free pool */
BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
ihost->tci_head = 0;
ihost->tci_tail = 0; for (index = 0; index < ihost->task_context_entries; index++)
isci_tci_free(ihost, index);
/* Build the RNi free pool */
sci_remote_node_table_initialize(&ihost->available_remote_nodes,
ihost->remote_node_entries);
/* * Before anything else lets make sure we will not be * interrupted by the hardware.
*/
sci_controller_disable_interrupts(ihost);
/* Enable the port task scheduler */
sci_controller_enable_port_task_scheduler(ihost);
/* Assign all the task entries to ihost physical function */
sci_controller_assign_task_entries(ihost);
/* Now initialize the completion queue */
sci_controller_initialize_completion_queue(ihost);
/* Initialize the unsolicited frame queue for use */
sci_controller_initialize_unsolicited_frame_queue(ihost);
/* Start all of the ports on this controller */ for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index];
result = sci_port_start(iport); if (result) return result;
}
staticvoid sci_controller_completion_handler(struct isci_host *ihost)
{ /* Empty out the completion queue */ if (sci_controller_completion_queue_has_entries(ihost))
sci_controller_process_completions(ihost);
/* Clear the interrupt and enable all interrupts again */
writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); /* Could we write the value of SMU_ISR_COMPLETION? */
writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
writel(0, &ihost->smu_registers->interrupt_mask);
}
if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
wake_up_all(&ihost->eventq);
if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
isci_free_tag(ihost, ireq->io_tag);
} /** * isci_host_completion_routine() - This function is the delayed service * routine that calls the sci core library's completion handler. It's * scheduled as a tasklet from the interrupt service routine when interrupts * in use, or set as the timeout function in polled mode. * @data: This parameter specifies the ISCI host object *
*/ void isci_host_completion_routine(unsignedlong data)
{ struct isci_host *ihost = (struct isci_host *)data;
u16 active;
/* * we subtract SCI_MAX_PORTS to account for the number of dummy TCs * issued for hardware issue workaround
*/
active = isci_tci_active(ihost) - SCI_MAX_PORTS;
/* * the coalesence timeout doubles at each encoding step, so * update it based on the ilog2 value of the outstanding requests
*/
writel(SMU_ICC_GEN_VAL(NUMBER, active) |
SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
&ihost->smu_registers->interrupt_coalesce_control);
}
/** * sci_controller_stop() - This method will stop an individual controller * object.This method will invoke the associated user callback upon * completion. The completion callback is called when the following * conditions are met: -# the method return status is SCI_SUCCESS. -# the * controller has been quiesced. This method will ensure that all IO * requests are quiesced, phys are stopped, and all additional operation by * the hardware is halted. * @ihost: the handle to the controller object to stop. * @timeout: This parameter specifies the number of milliseconds in which the * stop operation should complete. * * The controller must be in the STARTED or STOPPED state. Indicate if the * controller stop method succeeded or failed in some way. SCI_SUCCESS if the * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the * controller is not either in the STARTED or STOPPED states.
*/ staticenum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
{ if (ihost->sm.current_state_id != SCIC_READY) {
dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
__func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE;
}
/** * sci_controller_reset() - This method will reset the supplied core * controller regardless of the state of said controller. This operation is * considered destructive. In other words, all current operations are wiped * out. No IO completions for outstanding devices occur. Outstanding IO * requests are not aborted or completed at the actual remote device. * @ihost: the handle to the controller object to reset. * * Indicate if the controller reset method succeeded or failed in some way. * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if * the controller reset operation is unable to complete.
*/ staticenum sci_status sci_controller_reset(struct isci_host *ihost)
{ switch (ihost->sm.current_state_id) { case SCIC_RESET: case SCIC_READY: case SCIC_STOPPING: case SCIC_FAILED: /* * The reset operation is not a graceful cleanup, just * perform the state transition.
*/
sci_change_state(&ihost->sm, SCIC_RESETTING); return SCI_SUCCESS; default:
dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
__func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE;
}
}
for (index = 0; index < SCI_MAX_PHYS; index++) {
phy_status = sci_phy_stop(&ihost->phys[index]);
if (phy_status != SCI_SUCCESS &&
phy_status != SCI_FAILURE_INVALID_STATE) {
status = SCI_FAILURE;
dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed to stop " "phy %d because of status %d.\n",
__func__,
ihost->phys[index].phy_index, phy_status);
}
}
return status;
}
/** * isci_host_deinit - shutdown frame reception and dma * @ihost: host to take down * * This is called in either the driver shutdown or the suspend path. In * the shutdown case libsas went through port teardown and normal device * removal (i.e. physical links stayed up to service scsi_device removal * commands). In the suspend case we disable the hardware without * notifying libsas of the link down events since we want libsas to * remember the domain across the suspend/resume cycle
*/ void isci_host_deinit(struct isci_host *ihost)
{ int i;
/* disable output data selects */ for (i = 0; i < isci_gpio_count(ihost); i++)
writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
/* phy stop is after controller stop to allow port and device to * go idle before shutting down the phys, but the expectation is * that i/o has been shut off well before we reach this * function.
*/
sci_controller_stop_phys(ihost);
/* disable sgpio: where the above wait should give time for the * enclosure to sample the gpios going inactive
*/
writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
/** * sci_controller_set_interrupt_coalescence() - This method allows the user to * configure the interrupt coalescence. * @ihost: This parameter represents the handle to the controller object * for which its interrupt coalesce register is overridden. * @coalesce_number: Used to control the number of entries in the Completion * Queue before an interrupt is generated. If the number of entries exceed * this number, an interrupt will be generated. The valid range of the input * is [0, 256]. A setting of 0 results in coalescing being disabled. * @coalesce_timeout: Timeout value in microseconds. The valid range of the * input is [0, 2700000] . A setting of 0 is allowed and results in no * interrupt coalescing timeout. * * Indicate if the user successfully set the interrupt coalesce parameters. * SCI_SUCCESS The user successfully updated the interrutp coalescence. * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
*/ staticenum sci_status
sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
u32 coalesce_number,
u32 coalesce_timeout)
{
u8 timeout_encode = 0;
u32 min = 0;
u32 max = 0;
/* Check if the input parameters fall in the range. */ if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) return SCI_FAILURE_INVALID_PARAMETER_VALUE;
/* * Use the table above to decide the encode of interrupt coalescing timeout
* value for register writing. */ if (coalesce_timeout == 0)
timeout_encode = 0; else{ /* make the timeout value in unit of (10 ns). */
coalesce_timeout = coalesce_timeout * 100;
min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
/* get the encode of timeout for register writing. */ for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
timeout_encode++) { if (min <= coalesce_timeout && max > coalesce_timeout) break; elseif (coalesce_timeout >= max && coalesce_timeout < min * 2
&& coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) break; else{
timeout_encode++; break;
}
} else {
max = max * 2;
min = min * 2;
}
}
if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) /* the value is out of range. */ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
/* enable clock gating for power control of the scu unit */
val = readl(&ihost->smu_registers->clock_gating_control);
val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
writel(val, &ihost->smu_registers->clock_gating_control);
/* set the default interrupt coalescence number and timeout value. */
sci_controller_set_interrupt_coalescence(ihost, 0, 0);
}
for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index];
port_status = sci_port_stop(iport);
if ((port_status != SCI_SUCCESS) &&
(port_status != SCI_FAILURE_INVALID_STATE)) {
status = SCI_FAILURE;
dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed to " "stop port %d because of status %d.\n",
__func__,
iport->logical_port_index,
port_status);
}
}
for (index = 0; index < ihost->remote_node_entries; index++) { if (ihost->device_table[index] != NULL) { /* / @todo What timeout value do we want to provide to this request? */
device_status = sci_remote_device_stop(ihost->device_table[index], 0);
staticvoid sci_controller_reset_hardware(struct isci_host *ihost)
{ /* Disable interrupts so we dont take any spurious interrupts */
sci_controller_disable_interrupts(ihost);
/* Reset the SCU */
writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
/* Delay for 1ms to before clearing the CQP and UFQPR. */
udelay(1000);
/* The write to the CQGR clears the CQP */
writel(0x00000000, &ihost->smu_registers->completion_queue_get);
/* The write to the UFQGP clears the UFQPR */
writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
/* clear all interrupts */
writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
}
if (sm->current_state_id == SCIC_STARTING)
sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); elseif (sm->current_state_id == SCIC_STOPPING) {
sci_change_state(sm, SCIC_FAILED);
isci_host_stop_complete(ihost);
} else/* / @todo Now what do we want to do in this case? */
dev_err(&ihost->pdev->dev, "%s: Controller timer fired when controller was not " "in a state being timed.\n",
__func__);
/* Construct the ports for this controller */ for (i = 0; i < SCI_MAX_PORTS; i++)
sci_port_construct(&ihost->ports[i], i, ihost);
sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
/* Construct the phys for this controller */ for (i = 0; i < SCI_MAX_PHYS; i++) { /* Add all the PHYs to the dummy port */
sci_phy_construct(&ihost->phys[i],
&ihost->ports[SCI_MAX_PORTS], i);
}
/* * Search the power_control queue to see if there are other phys * attached to the same remote device. If found, take all of * them out of await_sas_power state.
*/ if (requester != NULL && requester != iphy) {
u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
iphy->frame_rcvd.iaf.sas_addr, sizeof(requester->frame_rcvd.iaf.sas_addr));
/* * It doesn't matter if the power list is empty, we need to start the * timer in case another phy becomes ready.
*/
sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
ihost->power_control.timer_started = true;
if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
ihost->power_control.phys_granted_power++;
sci_phy_consume_power_handler(iphy);
/* * stop and start the power_control timer. When the timer fires, the * no_of_phys_granted_power will be set to 0
*/ if (ihost->power_control.timer_started)
sci_del_timer(&ihost->power_control.timer);
} else { /* * There are phys, attached to the same sas address as this phy, are * already in READY state, this phy don't need wait.
*/
u8 i; struct isci_phy *current_phy;
for (i = 0; i < SCI_MAX_PHYS; i++) {
u8 other;
current_phy = &ihost->phys[i];
other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
iphy->frame_rcvd.iaf.sas_addr, sizeof(current_phy->frame_rcvd.iaf.sas_addr));
if (current_phy->sm.current_state_id == SCI_PHY_READY &&
current_phy->protocol == SAS_PROTOCOL_SSP &&
other == 0) {
sci_phy_consume_power_handler(iphy); break;
}
}
if (i == SCI_MAX_PHYS) { /* Add the phy in the waiting list */
ihost->power_control.requesters[iphy->phy_index] = iphy;
ihost->power_control.phys_waiting++;
}
}
}
/* Wait for the PLL to lock */ do {
afe_status = readl(&afe->afe_common_block_status);
udelay(AFE_REGISTER_WRITE_DELAY);
} while ((afe_status & 0x00001000) == 0);
if (is_a2(pdev)) { /* Shorten SAS SNW lock time (RxLock timer value from 76 * us to 50 us)
*/
writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id]; conststruct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; int cable_length_long =
is_long_cable(phy_id, cable_selection_mask); int cable_length_medium =
is_medium_cable(phy_id, cable_selection_mask);
if (is_a2(pdev)) { /* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800)
*/
writel(0x00004512, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800)
*/
writel(0x00014500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
} elseif (is_c1(pdev)) { /* Configure transmitter SSC parameters */
writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
/* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800)
*/
writel(0x0001C500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Power up TX and RX out from power down (PWRDNTX and * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
*/ if (is_a2(pdev))
writel(0x000003F0, &xcvr->afe_channel_control); elseif (is_b0(pdev)) {
writel(0x000003D7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
/* * There is nothing to do here for B0 since we do not have to * program the AFE registers. * / @todo The AFE settings are supposed to be correct for the B0 but
* / presently they seem to be wrong. */
sci_controller_afe_initialization(ihost);
/* Take the hardware out of reset */
writel(0, &ihost->smu_registers->soft_reset_control);
/* * / @todo Provide meaningfull error code for hardware failure
* result = SCI_FAILURE_CONTROLLER_HARDWARE; */ for (i = 100; i >= 1; i--) {
u32 status;
/* Loop until the hardware reports success */
udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
status = readl(&ihost->smu_registers->control_status);
if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) break;
} if (i == 0) goto out;
/* * Determine what are the actaul device capacities that the
* hardware will support */
val = readl(&ihost->smu_registers->device_context_capacity);
/* Record the smaller of the two capacity values */
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.31Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.