/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* * This method is will fill in the SCU Task Context for any type of SSP request.
*/ staticvoid scu_ssp_request_construct_task_context( struct isci_request *ireq, struct scu_task_context *task_context)
{
dma_addr_t dma_addr; struct isci_remote_device *idev; struct isci_port *iport;
/** * scu_ssp_task_request_construct_task_context() - This method will fill in * the SCU Task Context for a SSP Task request. The following important * settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH. This * ensures that the task request is issued ahead of other task destined * for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD. This * simply indicates that a normal request type (i.e. non-raw frame) is * being utilized to perform task management. -#control_frame == 1. This * ensures that the proper endianness is set so that the bytes are * transmitted in the right order for a task frame. * @ireq: This parameter specifies the task request object being constructed.
*/ staticvoid scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
{ struct scu_task_context *task_context = ireq->tc;
/** * scu_sata_request_construct_task_context() * This method is will fill in the SCU Task Context for any type of SATA * request. This is called from the various SATA constructors. * @ireq: The general IO request object which is to be used in * constructing the SCU task context. * @task_context: The buffer pointer for the SCU task context which is being * constructed. * * The general io request construction is complete. The buffer assignment for * the command buffer is complete. none Revisit task context construction to * determine what is common for SSP/SMP/STP task context structures.
*/ staticvoid scu_sata_request_construct_task_context( struct isci_request *ireq, struct scu_task_context *task_context)
{
dma_addr_t dma_addr; struct isci_remote_device *idev; struct isci_port *iport;
/* Set the first word of the H2D REG FIS */
task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
(ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
(iport->physical_port_index <<
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context. We must offset the command buffer by 4 bytes because the * first 4 bytes are transfered in the body of the TC.
*/
dma_addr = sci_io_request_get_dma_addr(ireq,
((char *) &ireq->stp.cmd) + sizeof(u32));
if (copy_rx_frame) {
sci_request_build_sgl(ireq);
stp_req->sgl.index = 0;
} else { /* The user does not want the data copied to the SGL buffer location */
stp_req->sgl.index = -1;
}
return SCI_SUCCESS;
}
/* * sci_stp_optimized_request_construct() * @ireq: This parameter specifies the request to be constructed as an * optimized request. * @optimized_task_type: This parameter specifies whether the request is to be * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A * value of 1 indicates NCQ. * * This method will perform request construction common to all types of STP * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method * returns an indication as to whether the construction was successful.
*/ staticvoid sci_stp_optimized_request_construct(struct isci_request *ireq,
u8 optimized_task_type,
u32 len, enum dma_data_direction dir)
{ struct scu_task_context *task_context = ireq->tc;
/* Build the STP task context structure */
scu_sata_request_construct_task_context(ireq, task_context);
/* Copy over the SGL elements */
sci_request_build_sgl(ireq);
/* Copy over the number of bytes to be transfered */
task_context->transfer_length_bytes = len;
if (dir == DMA_TO_DEVICE) { /* * The difference between the DMA IN and DMA OUT request task type * values are consistent with the difference between FPDMA READ * and FPDMA WRITE values. Add the supplied task type parameter * to this difference to set the task type properly for this
* DATA OUT (WRITE) case. */
task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
- SCU_TASK_TYPE_DMA_IN);
} else { /* * For the DATA IN (READ) case, simply save the supplied
* optimized task type. */
task_context->task_type = optimized_task_type;
}
}
/* To simplify the implementation we take advantage of the * silicon's partial acceleration of atapi protocol (dma data * transfers), so we promote all commands to dma protocol. This * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
*/
h2d_fis->features |= ATAPI_PKT_DMA;
scu_stp_raw_request_construct_task_context(ireq);
task = isci_request_access_task(ireq); if (task->data_dir == DMA_NONE)
task->total_xfer_len = 0;
/* clear the response so we can detect arrivial of an * unsolicited h2d fis
*/
ireq->stp.rsp.fis_type = 0;
}
/* check for management protocols */ if (test_bit(IREQ_TMF, &ireq->flags)) { struct isci_tmf *tmf = isci_request_access_tmf(ireq);
dev_err(&ireq->owning_controller->pdev->dev, "%s: Request 0x%p received un-handled SAT " "management protocol 0x%x.\n",
__func__, ireq, tmf->tmf_code);
return SCI_FAILURE;
}
if (!sas_protocol_ata(task->task_proto)) {
dev_err(&ireq->owning_controller->pdev->dev, "%s: Non-ATA protocol in SATA path: 0x%x\n",
__func__,
task->task_proto); return SCI_FAILURE;
status = sci_io_request_construct_sata(ireq,
task->total_xfer_len,
task->data_dir,
copy);
if (status == SCI_SUCCESS)
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
return status;
}
#define SCU_TASK_CONTEXT_SRAM 0x200000 /** * sci_req_tx_bytes - bytes transferred when reply underruns request * @ireq: request that was terminated early
*/ static u32 sci_req_tx_bytes(struct isci_request *ireq)
{ struct isci_host *ihost = ireq->owning_controller;
u32 ret_val = 0;
if (readl(&ihost->smu_registers->address_modifier) == 0) { void __iomem *scu_reg_base = ihost->scu_registers;
/* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where * BAR1 is the scu_registers * 0x20002C = 0x200000 + 0x2c * = start of task context SRAM + offset of (type.ssp.data_offset) * TCi is the io_tag of struct sci_request
*/
ret_val = readl(scu_reg_base +
(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
}
state = ireq->sm.current_state_id; if (state != SCI_REQ_CONSTRUCTED) {
dev_warn(&ihost->pdev->dev, "%s: SCIC IO Request requested to start while in wrong " "state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE;
}
tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
switch (tc->protocol_type) { case SCU_TASK_CONTEXT_PROTOCOL_SMP: case SCU_TASK_CONTEXT_PROTOCOL_SSP: /* SSP/SMP Frame */
tc->type.ssp.tag = ireq->io_tag;
tc->type.ssp.target_port_transfer_tag = 0xFFFF; break;
switch (state) { case SCI_REQ_CONSTRUCTED: /* Set to make sure no HW terminate posting is done: */
set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_STARTED: case SCI_REQ_TASK_WAIT_TC_COMP: case SCI_REQ_SMP_WAIT_RESP: case SCI_REQ_SMP_WAIT_TC_COMP: case SCI_REQ_STP_UDMA_WAIT_TC_COMP: case SCI_REQ_STP_UDMA_WAIT_D2H: case SCI_REQ_STP_NON_DATA_WAIT_H2D: case SCI_REQ_STP_NON_DATA_WAIT_D2H: case SCI_REQ_STP_PIO_WAIT_H2D: case SCI_REQ_STP_PIO_WAIT_FRAME: case SCI_REQ_STP_PIO_DATA_IN: case SCI_REQ_STP_PIO_DATA_OUT: case SCI_REQ_ATAPI_WAIT_H2D: case SCI_REQ_ATAPI_WAIT_PIO_SETUP: case SCI_REQ_ATAPI_WAIT_D2H: case SCI_REQ_ATAPI_WAIT_TC_COMP: /* Fall through and change state to ABORTING... */ case SCI_REQ_TASK_WAIT_TC_RESP: /* The task frame was already confirmed to have been * sent by the SCU HW. Since the state machine is * now only waiting for the task response itself, * abort the request and complete it immediately * and don't wait for the task response.
*/
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
fallthrough; /* and handle like ABORTING */ case SCI_REQ_ABORTING: if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
set_bit(IREQ_PENDING_ABORT, &ireq->flags); else
clear_bit(IREQ_PENDING_ABORT, &ireq->flags); /* If the request is only waiting on the remote device * suspension, return SUCCESS so the caller will wait too.
*/ return SCI_SUCCESS; case SCI_REQ_COMPLETED: default:
dev_warn(&ireq->owning_controller->pdev->dev, "%s: SCIC IO Request requested to abort while in wrong " "state %d\n", __func__, ireq->sm.current_state_id); break;
}
state = ireq->sm.current_state_id; if (WARN_ONCE(state != SCI_REQ_COMPLETED, "isci: request completion from wrong state (%s)\n",
req_state_name(state))) return SCI_FAILURE_INVALID_STATE;
if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
sci_controller_release_frame(ihost,
ireq->saved_rx_frame_index);
/* XXX can we just stop the machine and remove the 'final' state? */
sci_change_state(&ireq->sm, SCI_REQ_FINAL); return SCI_SUCCESS;
}
if (state != SCI_REQ_STP_PIO_DATA_IN) {
dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
__func__, event_code, req_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
switch (scu_get_event_specifier(event_code)) { case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: /* We are waiting for data and the SCU has R_ERR the data frame. * Go back to waiting for the D2H Register FIS
*/
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); return SCI_SUCCESS; default:
dev_err(&ihost->pdev->dev, "%s: pio request unexpected event %#x\n",
__func__, event_code);
/* TODO Should we fail the PIO request when we get an * unexpected event?
*/ return SCI_FAILURE;
}
}
/* * This function copies response data for requests returning response data * instead of sense data. * @sci_req: This parameter specifies the request object for which to copy * the response data.
*/ staticvoid sci_io_request_copy_response(struct isci_request *ireq)
{ void *resp_buf;
u32 len; struct ssp_response_iu *ssp_response; struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
ssp_response = &ireq->ssp.rsp;
resp_buf = &isci_tmf->resp.resp_iu;
len = min_t(u32,
SSP_RESP_IU_MAX_SIZE,
be32_to_cpu(ssp_response->response_data_len));
/* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 * to determine SDMA status
*/ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS; break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { /* There are times when the SCU hardware will return an early * response because the io request specified more data than is * returned by the target device (mode pages, inquiry data, * etc.). We must check the response stats to see if this is * truly a failed request or a good request that just got * completed early.
*/ struct ssp_response_iu *resp = &ireq->ssp.rsp;
ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame * guaranteed to be received before this completion status is * posted?
*/
resp_iu = &ireq->ssp.rsp;
datapres = resp_iu->datapres;
if (datapres == SAS_DATAPRES_RESPONSE_DATA ||
datapres == SAS_DATAPRES_SENSE_DATA) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
} break; /* only stp device gets suspended. */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): if (ireq->protocol == SAS_PROTOCOL_STP) {
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
} else {
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
} break;
/* both stp/ssp device gets suspended */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; break;
/* neither ssp nor stp gets suspended. */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): default:
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; break;
}
/* * TODO: This is probably wrong for ACK/NAK timeout conditions
*/
/* In all cases we will treat this as the completion of the IO req. */
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS;
}
default: /* Unless we get some strange error wait for the task abort to complete * TODO: Should there be a state change for this completion?
*/ break;
}
return SCI_SUCCESS;
}
staticenum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
u32 completion_code)
{ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): /* Currently, the decision is to simply allow the task request * to timeout if the task IU wasn't received successfully. * There is a potential for receiving multiple task responses if * we decide to send the task IU again.
*/
dev_warn(&ireq->owning_controller->pdev->dev, "%s: TaskRequest:0x%p CompletionCode:%x - " "ACK/NAK timeout\n", __func__, ireq,
completion_code);
sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; default: /* * All other completion status cause the IO to be complete. * If a NAK was received, then it is up to the user to retry * the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break;
}
return SCI_SUCCESS;
}
staticenum sci_status
smp_request_await_response_tc_event(struct isci_request *ireq,
u32 completion_code)
{ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): /* In the AWAIT RESPONSE state, any TC completion is * unexpected. but if the TC has success status, we * complete the IO anyway.
*/
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): /* These status has been seen in a specific LSI * expander, which sometimes is not able to send smp * response within 2 ms. This causes our hardware break * the connection and set TC completion with one of * these SMP_XXX_XX_ERR status. For these type of error, * we ask ihost user to retry the request.
*/
ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: /* All other completion status cause the IO to be complete. If a NAK * was received, then it is up to the user to retry the request
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break;
}
return SCI_SUCCESS;
}
staticenum sci_status
smp_request_await_tc_event(struct isci_request *ireq,
u32 completion_code)
{ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: /* All other completion status cause the IO to be * complete. If a NAK was received, then it is up to * the user to retry the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break;
}
default: /* All other completion status cause the IO to be * complete. If a NAK was received, then it is up to * the user to retry the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break;
}
return SCI_SUCCESS;
}
#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
/* transmit DATA_FIS from (current sgl + offset) for input * parameter length. current sgl and offset is alreay stored in the IO request
*/ staticenum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( struct isci_request *ireq,
u32 length)
{ struct isci_stp_request *stp_req = &ireq->stp.req; struct scu_task_context *task_context = ireq->tc; struct scu_sgl_element_pair *sgl_pair; struct scu_sgl_element *current_sgl;
/* Recycle the TC and reconstruct it for sending out DATA FIS containing * for the data from current_sgl+offset for the input length
*/
sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
current_sgl = &sgl_pair->A; else
current_sgl = &sgl_pair->B;
if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
sgl = &sgl_pair->A;
len = sgl_pair->A.length - offset;
} else {
sgl = &sgl_pair->B;
len = sgl_pair->B.length - offset;
}
if (stp_req->pio_len == 0) return SCI_SUCCESS;
if (stp_req->pio_len >= len) {
status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); if (status != SCI_SUCCESS) return status;
stp_req->pio_len -= len;
/* update the current sgl, offset and save for future */
sgl = pio_sgl_next(stp_req);
offset = 0;
} elseif (stp_req->pio_len < len) {
sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
/* Sgl offset will be adjusted and saved for future */
offset += stp_req->pio_len;
sgl->address_lower += stp_req->pio_len;
stp_req->pio_len = 0;
}
stp_req->sgl.offset = offset;
return status;
}
/** * sci_stp_request_pio_data_in_copy_data_buffer() * @stp_req: The request that is used for the SGL processing. * @data_buf: The buffer of data to be copied. * @len: The length of the data transfer. * * Copy the data from the buffer for the length specified to the IO request SGL * specified data region. enum sci_status
*/ staticenum sci_status
sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
u8 *data_buf, u32 len)
{ struct isci_request *ireq;
u8 *src_addr; int copy_len; struct sas_task *task; struct scatterlist *sg; void *kaddr; int total_len = len;
/** * sci_stp_request_pio_data_in_copy_data() * @stp_req: The PIO DATA IN request that is to receive the data. * @data_buffer: The buffer to copy from. * * Copy the data buffer to the io request data region. enum sci_status
*/ staticenum sci_status sci_stp_request_pio_data_in_copy_data( struct isci_stp_request *stp_req,
u8 *data_buffer)
{ enum sci_status status;
/* * If there is less than 1K remaining in the transfer request
* copy just the data for the transfer */ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
status = sci_stp_request_pio_data_in_copy_data_buffer(
stp_req, data_buffer, stp_req->pio_len);
if (status == SCI_SUCCESS)
stp_req->pio_len = 0;
} else { /* We are transfering the whole frame so copy */
status = sci_stp_request_pio_data_in_copy_data_buffer(
stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
if (status == SCI_SUCCESS)
stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
}
default: /* All other completion status cause the IO to be * complete. If a NAK was received, then it is up to * the user to retry the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break;
}
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): /* Transmit data */ if (stp_req->pio_len != 0) {
status = sci_stp_request_pio_data_out_transmit_data(ireq); if (status == SCI_SUCCESS) { if (stp_req->pio_len == 0)
all_frames_transferred = true;
}
} elseif (stp_req->pio_len == 0) { /* * this will happen if the all data is written at the * first time after the pio setup fis is received
*/
all_frames_transferred = true;
}
/* all data transferred. */ if (all_frames_transferred) { /* * Change the state to SCI_REQ_STP_PIO_DATA_IN
* and wait for PIO_SETUP fis / or D2H REg fis. */
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
} break;
default: /* * All other completion status cause the IO to be complete. * If a NAK was received, then it is up to the user to retry * the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break;
}
status = process_unsolicited_fis(ireq, frame_index);
if (status == SCI_SUCCESS) { if (ireq->stp.rsp.status & ATA_ERR)
status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
status = SCI_FAILURE_IO_RESPONSE_VALID;
}
/* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame * type. The TC for previous Packet fis was already there, we only need to * change the H2D fis content.
*/
memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
task_context->type.stp.fis_type = FIS_DATA;
task_context->transfer_length_bytes = dev->cdb_len;
}
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
} else { /* * This was not a response frame why did it get * forwarded?
*/
dev_err(&ihost->pdev->dev, "%s: SCIC SMP Request 0x%p received unexpected " "frame %d type 0x%02x\n",
__func__,
ireq,
frame_index,
rsp[0]);
case SCI_REQ_STP_UDMA_WAIT_TC_COMP: return sci_stp_request_udma_general_frame_handler(ireq,
frame_index);
case SCI_REQ_STP_UDMA_WAIT_D2H: /* Use the general frame handler to copy the resposne data */
status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
case SCI_REQ_STP_NON_DATA_WAIT_D2H: { struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n",
__func__,
stp_req,
frame_index,
status);
return status;
}
switch (frame_header->fis_type) { case FIS_REGD2H:
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n",
__func__, stp_req, frame_index, status); return status;
}
switch (frame_header->fis_type) { case FIS_PIO_SETUP: /* Get from the frame buffer the PIO Setup Data */
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
/* Get the data from the PIO Setup The SCU Hardware * returns first word in the frame_header and the rest * of the data is in the frame buffer so we need to * back up one dword
*/
/* transfer_count: first 16bits in the 4th dword */
stp_req->pio_len = frame_buffer[3] & 0xffff;
/* status: 4th byte in the 3rd dword */
stp_req->status = (frame_buffer[2] >> 24) & 0xff;
/* The next state is dependent on whether the * request was PIO Data-in or Data out
*/ if (task->data_dir == DMA_FROM_DEVICE) {
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
} elseif (task->data_dir == DMA_TO_DEVICE) { /* Transmit data */
status = sci_stp_request_pio_data_out_transmit_data(ireq); if (status != SCI_SUCCESS) break;
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
} break;
case FIS_SETDEVBITS:
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); break;
case FIS_REGD2H: if (frame_header->status & ATA_BUSY) { /* * Now why is the drive sending a D2H Register * FIS when it is still busy? Do nothing since * we are still in the right state.
*/
dev_dbg(&ihost->pdev->dev, "%s: SCIC PIO Request 0x%p received " "D2H Register FIS with BSY status " "0x%x\n",
__func__,
stp_req,
frame_header->status); break;
}
default: /* FIXME: what do we do here? */ break;
}
/* Frame is decoded return it to the controller */
sci_controller_release_frame(ihost, frame_index);
return status;
}
case SCI_REQ_STP_PIO_DATA_IN: { struct dev_to_host_fis *frame_header; struct sata_fis_data *frame_buffer;
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n",
__func__,
stp_req,
frame_index,
status); return status;
}
if (frame_header->fis_type != FIS_DATA) {
dev_err(&ihost->pdev->dev, "%s: SCIC PIO Request 0x%p received frame %d " "with fis type 0x%02x when expecting a data " "fis.\n",
__func__,
stp_req,
frame_index,
frame_header->fis_type);
status = sci_stp_request_pio_data_in_copy_data(stp_req,
(u8 *)frame_buffer);
/* Frame is decoded return it to the controller */
sci_controller_release_frame(ihost, frame_index);
}
/* Check for the end of the transfer, are there more * bytes remaining for this data transfer
*/ if (status != SCI_SUCCESS || stp_req->pio_len != 0) return status;
sci_controller_continue_io(ireq); return SCI_SUCCESS;
} case SCI_REQ_ATAPI_WAIT_D2H: return atapi_d2h_reg_frame_handler(ireq, frame_index); case SCI_REQ_ABORTING: /* * TODO: Is it even possible to get an unsolicited frame in the * aborting state?
*/
sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS;
default:
dev_warn(&ihost->pdev->dev, "%s: SCIC IO Request given unexpected frame %x while " "in state %d\n",
__func__,
frame_index,
state);
staticenum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
u32 completion_code)
{ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): /* We must check ther response buffer to see if the D2H * Register FIS was received before we got the TC * completion.
*/ if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
sci_remote_device_suspend(ireq->target_device,
SCI_SW_SUSPEND_NORMAL);
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
} else { /* If we have an error completion status for the * TC then we can expect a D2H register FIS from * the device so we must change state to wait * for it
*/
sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
} break;
/* TODO Check to see if any of these completion status need to * wait for the device to host register fis.
*/ /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR * - this comes only for B0
*/ default: /* All other completion status cause the IO to be complete. */
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.